summaryrefslogtreecommitdiff
path: root/src/cmd/compile/internal/gc/escape.go
diff options
context:
space:
mode:
authorRuss Cox <rsc@golang.org>2020-11-22 09:59:15 -0500
committerRuss Cox <rsc@golang.org>2020-11-25 17:30:39 +0000
commitacb4d1cef14529585266df1868045f80e37ae081 (patch)
treea82e843b647d06f304cd8c6d5927fe0854be88d3 /src/cmd/compile/internal/gc/escape.go
parent41ab6689edb1f51001feab0928e598050e2f6d32 (diff)
downloadgo-git-acb4d1cef14529585266df1868045f80e37ae081.tar.gz
[dev.regabi] cmd/compile: use Node getters and setters [generated]
Now that we have all the getters and setters defined, use them and unexport all the actual Node fields. This is the next step toward replacing Node with an interface. [git-generate] cd src/cmd/compile/internal/gc rf ' ex . ../ir ../ssa { import "cmd/compile/internal/ir" import "cmd/compile/internal/types" import "cmd/internal/src" var n, x *ir.Node var op ir.Op var t *types.Type var f *ir.Func var m *ir.Name var s *types.Sym var p src.XPos var i int64 var e uint16 var nodes ir.Nodes n.Op = op -> n.SetOp(op) n.Left = x -> n.SetLeft(x) n.Right = x -> n.SetRight(x) n.Orig = x -> n.SetOrig(x) n.Type = t -> n.SetType(t) n.Func = f -> n.SetFunc(f) n.Name = m -> n.SetName(m) n.Sym = s -> n.SetSym(s) n.Pos = p -> n.SetPos(p) n.Xoffset = i -> n.SetXoffset(i) n.Esc = e -> n.SetEsc(e) n.Ninit.Append -> n.PtrNinit().Append n.Ninit.AppendNodes -> n.PtrNinit().AppendNodes n.Ninit.MoveNodes -> n.PtrNinit().MoveNodes n.Ninit.Prepend -> n.PtrNinit().Prepend n.Ninit.Set -> n.PtrNinit().Set n.Ninit.Set1 -> n.PtrNinit().Set1 n.Ninit.Set2 -> n.PtrNinit().Set2 n.Ninit.Set3 -> n.PtrNinit().Set3 &n.Ninit -> n.PtrNinit() n.Ninit = nodes -> n.SetNinit(nodes) n.Nbody.Append -> n.PtrNbody().Append n.Nbody.AppendNodes -> n.PtrNbody().AppendNodes n.Nbody.MoveNodes -> n.PtrNbody().MoveNodes n.Nbody.Prepend -> n.PtrNbody().Prepend n.Nbody.Set -> n.PtrNbody().Set n.Nbody.Set1 -> n.PtrNbody().Set1 n.Nbody.Set2 -> n.PtrNbody().Set2 n.Nbody.Set3 -> n.PtrNbody().Set3 &n.Nbody -> n.PtrNbody() n.Nbody = nodes -> n.SetNbody(nodes) n.List.Append -> n.PtrList().Append n.List.AppendNodes -> n.PtrList().AppendNodes n.List.MoveNodes -> n.PtrList().MoveNodes n.List.Prepend -> n.PtrList().Prepend n.List.Set -> n.PtrList().Set n.List.Set1 -> n.PtrList().Set1 n.List.Set2 -> n.PtrList().Set2 n.List.Set3 -> n.PtrList().Set3 &n.List -> n.PtrList() n.List = nodes -> n.SetList(nodes) n.Rlist.Append -> n.PtrRlist().Append n.Rlist.AppendNodes -> n.PtrRlist().AppendNodes n.Rlist.MoveNodes -> n.PtrRlist().MoveNodes n.Rlist.Prepend -> n.PtrRlist().Prepend n.Rlist.Set -> n.PtrRlist().Set n.Rlist.Set1 -> n.PtrRlist().Set1 n.Rlist.Set2 -> n.PtrRlist().Set2 n.Rlist.Set3 -> n.PtrRlist().Set3 &n.Rlist -> n.PtrRlist() n.Rlist = nodes -> n.SetRlist(nodes) } ex . ../ir ../ssa { import "cmd/compile/internal/ir" var n *ir.Node n.Op -> n.GetOp() n.Left -> n.GetLeft() n.Right -> n.GetRight() n.Orig -> n.GetOrig() n.Type -> n.GetType() n.Func -> n.GetFunc() n.Name -> n.GetName() n.Sym -> n.GetSym() n.Pos -> n.GetPos() n.Xoffset -> n.GetXoffset() n.Esc -> n.GetEsc() avoid (*ir.Node).PtrNinit avoid (*ir.Node).PtrNbody avoid (*ir.Node).PtrList avoid (*ir.Node).PtrRlist n.Ninit -> n.GetNinit() n.Nbody -> n.GetNbody() n.List -> n.GetList() n.Rlist -> n.GetRlist() } ' cd ../ir rf ' mv Node.Op Node.op mv Node.GetOp Node.Op mv Node.Left Node.left mv Node.GetLeft Node.Left mv Node.Right Node.right mv Node.GetRight Node.Right mv Node.Orig Node.orig mv Node.GetOrig Node.Orig mv Node.Type Node.typ mv Node.GetType Node.Type mv Node.Func Node.fn mv Node.GetFunc Node.Func mv Node.Name Node.name mv Node.GetName Node.Name # All uses are in other Node methods already. mv Node.E Node.e mv Node.Sym Node.sym mv Node.GetSym Node.Sym mv Node.Pos Node.pos mv Node.GetPos Node.Pos mv Node.Esc Node.esc mv Node.GetEsc Node.Esc # While we are here, rename Xoffset to more idiomatic Offset. mv Node.Xoffset Node.offset mv Node.GetXoffset Node.Offset mv Node.SetXoffset Node.SetOffset # While we are here, rename Ninit, Nbody to more idiomatic Init, Body. mv Node.Ninit Node.init mv Node.GetNinit Node.Init mv Node.PtrNinit Node.PtrInit mv Node.SetNinit Node.SetInit mv Node.Nbody Node.body mv Node.GetNbody Node.Body mv Node.PtrNbody Node.PtrBody mv Node.SetNbody Node.SetBody mv Node.List Node.list mv Node.GetList Node.List mv Node.Rlist Node.rlist mv Node.GetRlist Node.Rlist # Unexport these mv Node.SetHasOpt Node.setHasOpt mv Node.SetHasVal Node.setHasVal ' Change-Id: I9894f633375c5237a29b6d6d7b89ba181b56ca3a Reviewed-on: https://go-review.googlesource.com/c/go/+/273009 Trust: Russ Cox <rsc@golang.org> Run-TryBot: Russ Cox <rsc@golang.org> Reviewed-by: Matthew Dempsky <mdempsky@google.com>
Diffstat (limited to 'src/cmd/compile/internal/gc/escape.go')
-rw-r--r--src/cmd/compile/internal/gc/escape.go518
1 files changed, 259 insertions, 259 deletions
diff --git a/src/cmd/compile/internal/gc/escape.go b/src/cmd/compile/internal/gc/escape.go
index a0aa516d9a..866bdf8a6f 100644
--- a/src/cmd/compile/internal/gc/escape.go
+++ b/src/cmd/compile/internal/gc/escape.go
@@ -149,7 +149,7 @@ func init() {
// escFmt is called from node printing to print information about escape analysis results.
func escFmt(n *ir.Node, short bool) string {
text := ""
- switch n.Esc {
+ switch n.Esc() {
case EscUnknown:
break
@@ -165,7 +165,7 @@ func escFmt(n *ir.Node, short bool) string {
}
default:
- text = fmt.Sprintf("esc(%d)", n.Esc)
+ text = fmt.Sprintf("esc(%d)", n.Esc())
}
if e, ok := n.Opt().(*EscLocation); ok && e.loopDepth != 0 {
@@ -181,7 +181,7 @@ func escFmt(n *ir.Node, short bool) string {
// functions.
func escapeFuncs(fns []*ir.Node, recursive bool) {
for _, fn := range fns {
- if fn.Op != ir.ODCLFUNC {
+ if fn.Op() != ir.ODCLFUNC {
base.Fatalf("unexpected node: %v", fn)
}
}
@@ -203,10 +203,10 @@ func escapeFuncs(fns []*ir.Node, recursive bool) {
}
func (e *Escape) initFunc(fn *ir.Node) {
- if fn.Op != ir.ODCLFUNC || fn.Esc != EscFuncUnknown {
+ if fn.Op() != ir.ODCLFUNC || fn.Esc() != EscFuncUnknown {
base.Fatalf("unexpected node: %v", fn)
}
- fn.Esc = EscFuncPlanned
+ fn.SetEsc(EscFuncPlanned)
if base.Flag.LowerM > 3 {
ir.Dump("escAnalyze", fn)
}
@@ -215,27 +215,27 @@ func (e *Escape) initFunc(fn *ir.Node) {
e.loopDepth = 1
// Allocate locations for local variables.
- for _, dcl := range fn.Func.Dcl {
- if dcl.Op == ir.ONAME {
+ for _, dcl := range fn.Func().Dcl {
+ if dcl.Op() == ir.ONAME {
e.newLoc(dcl, false)
}
}
}
func (e *Escape) walkFunc(fn *ir.Node) {
- fn.Esc = EscFuncStarted
+ fn.SetEsc(EscFuncStarted)
// Identify labels that mark the head of an unstructured loop.
- ir.InspectList(fn.Nbody, func(n *ir.Node) bool {
- switch n.Op {
+ ir.InspectList(fn.Body(), func(n *ir.Node) bool {
+ switch n.Op() {
case ir.OLABEL:
- n.Sym.Label = ir.AsTypesNode(nonlooping)
+ n.Sym().Label = ir.AsTypesNode(nonlooping)
case ir.OGOTO:
// If we visited the label before the goto,
// then this is a looping label.
- if n.Sym.Label == ir.AsTypesNode(nonlooping) {
- n.Sym.Label = ir.AsTypesNode(looping)
+ if n.Sym().Label == ir.AsTypesNode(nonlooping) {
+ n.Sym().Label = ir.AsTypesNode(looping)
}
}
@@ -244,7 +244,7 @@ func (e *Escape) walkFunc(fn *ir.Node) {
e.curfn = fn
e.loopDepth = 1
- e.block(fn.Nbody)
+ e.block(fn.Body())
}
// Below we implement the methods for walking the AST and recording
@@ -288,9 +288,9 @@ func (e *Escape) stmt(n *ir.Node) {
fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, funcSym(e.curfn), n)
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
- switch n.Op {
+ switch n.Op() {
default:
base.Fatalf("unexpected stmt: %v", n)
@@ -301,16 +301,16 @@ func (e *Escape) stmt(n *ir.Node) {
// TODO(mdempsky): Handle dead code?
case ir.OBLOCK:
- e.stmts(n.List)
+ e.stmts(n.List())
case ir.ODCL:
// Record loop depth at declaration.
- if !ir.IsBlank(n.Left) {
- e.dcl(n.Left)
+ if !ir.IsBlank(n.Left()) {
+ e.dcl(n.Left())
}
case ir.OLABEL:
- switch ir.AsNode(n.Sym.Label) {
+ switch ir.AsNode(n.Sym().Label) {
case nonlooping:
if base.Flag.LowerM > 2 {
fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
@@ -323,109 +323,109 @@ func (e *Escape) stmt(n *ir.Node) {
default:
base.Fatalf("label missing tag")
}
- n.Sym.Label = nil
+ n.Sym().Label = nil
case ir.OIF:
- e.discard(n.Left)
- e.block(n.Nbody)
- e.block(n.Rlist)
+ e.discard(n.Left())
+ e.block(n.Body())
+ e.block(n.Rlist())
case ir.OFOR, ir.OFORUNTIL:
e.loopDepth++
- e.discard(n.Left)
- e.stmt(n.Right)
- e.block(n.Nbody)
+ e.discard(n.Left())
+ e.stmt(n.Right())
+ e.block(n.Body())
e.loopDepth--
case ir.ORANGE:
// for List = range Right { Nbody }
e.loopDepth++
- ks := e.addrs(n.List)
- e.block(n.Nbody)
+ ks := e.addrs(n.List())
+ e.block(n.Body())
e.loopDepth--
// Right is evaluated outside the loop.
k := e.discardHole()
if len(ks) >= 2 {
- if n.Right.Type.IsArray() {
+ if n.Right().Type().IsArray() {
k = ks[1].note(n, "range")
} else {
k = ks[1].deref(n, "range-deref")
}
}
- e.expr(e.later(k), n.Right)
+ e.expr(e.later(k), n.Right())
case ir.OSWITCH:
- typesw := n.Left != nil && n.Left.Op == ir.OTYPESW
+ typesw := n.Left() != nil && n.Left().Op() == ir.OTYPESW
var ks []EscHole
- for _, cas := range n.List.Slice() { // cases
- if typesw && n.Left.Left != nil {
- cv := cas.Rlist.First()
+ for _, cas := range n.List().Slice() { // cases
+ if typesw && n.Left().Left() != nil {
+ cv := cas.Rlist().First()
k := e.dcl(cv) // type switch variables have no ODCL.
- if cv.Type.HasPointers() {
- ks = append(ks, k.dotType(cv.Type, cas, "switch case"))
+ if cv.Type().HasPointers() {
+ ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
}
}
- e.discards(cas.List)
- e.block(cas.Nbody)
+ e.discards(cas.List())
+ e.block(cas.Body())
}
if typesw {
- e.expr(e.teeHole(ks...), n.Left.Right)
+ e.expr(e.teeHole(ks...), n.Left().Right())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
case ir.OSELECT:
- for _, cas := range n.List.Slice() {
- e.stmt(cas.Left)
- e.block(cas.Nbody)
+ for _, cas := range n.List().Slice() {
+ e.stmt(cas.Left())
+ e.block(cas.Body())
}
case ir.OSELRECV:
- e.assign(n.Left, n.Right, "selrecv", n)
+ e.assign(n.Left(), n.Right(), "selrecv", n)
case ir.OSELRECV2:
- e.assign(n.Left, n.Right, "selrecv", n)
- e.assign(n.List.First(), nil, "selrecv", n)
+ e.assign(n.Left(), n.Right(), "selrecv", n)
+ e.assign(n.List().First(), nil, "selrecv", n)
case ir.ORECV:
// TODO(mdempsky): Consider e.discard(n.Left).
e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
case ir.OSEND:
- e.discard(n.Left)
- e.assignHeap(n.Right, "send", n)
+ e.discard(n.Left())
+ e.assignHeap(n.Right(), "send", n)
case ir.OAS, ir.OASOP:
- e.assign(n.Left, n.Right, "assign", n)
+ e.assign(n.Left(), n.Right(), "assign", n)
case ir.OAS2:
- for i, nl := range n.List.Slice() {
- e.assign(nl, n.Rlist.Index(i), "assign-pair", n)
+ for i, nl := range n.List().Slice() {
+ e.assign(nl, n.Rlist().Index(i), "assign-pair", n)
}
case ir.OAS2DOTTYPE: // v, ok = x.(type)
- e.assign(n.List.First(), n.Right, "assign-pair-dot-type", n)
- e.assign(n.List.Second(), nil, "assign-pair-dot-type", n)
+ e.assign(n.List().First(), n.Right(), "assign-pair-dot-type", n)
+ e.assign(n.List().Second(), nil, "assign-pair-dot-type", n)
case ir.OAS2MAPR: // v, ok = m[k]
- e.assign(n.List.First(), n.Right, "assign-pair-mapr", n)
- e.assign(n.List.Second(), nil, "assign-pair-mapr", n)
+ e.assign(n.List().First(), n.Right(), "assign-pair-mapr", n)
+ e.assign(n.List().Second(), nil, "assign-pair-mapr", n)
case ir.OAS2RECV: // v, ok = <-ch
- e.assign(n.List.First(), n.Right, "assign-pair-receive", n)
- e.assign(n.List.Second(), nil, "assign-pair-receive", n)
+ e.assign(n.List().First(), n.Right(), "assign-pair-receive", n)
+ e.assign(n.List().Second(), nil, "assign-pair-receive", n)
case ir.OAS2FUNC:
- e.stmts(n.Right.Ninit)
- e.call(e.addrs(n.List), n.Right, nil)
+ e.stmts(n.Right().Init())
+ e.call(e.addrs(n.List()), n.Right(), nil)
case ir.ORETURN:
- results := e.curfn.Type.Results().FieldSlice()
- for i, v := range n.List.Slice() {
+ results := e.curfn.Type().Results().FieldSlice()
+ for i, v := range n.List().Slice() {
e.assign(ir.AsNode(results[i].Nname), v, "return", n)
}
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
e.call(nil, n, nil)
case ir.OGO, ir.ODEFER:
- e.stmts(n.Left.Ninit)
- e.call(nil, n.Left, n)
+ e.stmts(n.Left().Init())
+ e.call(nil, n.Left(), n)
case ir.ORETJMP:
// TODO(mdempsky): What do? esc.go just ignores it.
@@ -451,7 +451,7 @@ func (e *Escape) expr(k EscHole, n *ir.Node) {
if n == nil {
return
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
e.exprSkipInit(k, n)
}
@@ -468,13 +468,13 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) {
uintptrEscapesHack := k.uintptrEscapesHack
k.uintptrEscapesHack = false
- if uintptrEscapesHack && n.Op == ir.OCONVNOP && n.Left.Type.IsUnsafePtr() {
+ if uintptrEscapesHack && n.Op() == ir.OCONVNOP && n.Left().Type().IsUnsafePtr() {
// nop
- } else if k.derefs >= 0 && !n.Type.HasPointers() {
+ } else if k.derefs >= 0 && !n.Type().HasPointers() {
k = e.discardHole()
}
- switch n.Op {
+ switch n.Op() {
default:
base.Fatalf("unexpected expr: %v", n)
@@ -488,61 +488,61 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) {
e.flow(k, e.oldLoc(n))
case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
- e.discard(n.Left)
+ e.discard(n.Left())
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE, ir.OANDAND, ir.OOROR:
- e.discard(n.Left)
- e.discard(n.Right)
+ e.discard(n.Left())
+ e.discard(n.Right())
case ir.OADDR:
- e.expr(k.addr(n, "address-of"), n.Left) // "address-of"
+ e.expr(k.addr(n, "address-of"), n.Left()) // "address-of"
case ir.ODEREF:
- e.expr(k.deref(n, "indirection"), n.Left) // "indirection"
+ e.expr(k.deref(n, "indirection"), n.Left()) // "indirection"
case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
- e.expr(k.note(n, "dot"), n.Left)
+ e.expr(k.note(n, "dot"), n.Left())
case ir.ODOTPTR:
- e.expr(k.deref(n, "dot of pointer"), n.Left) // "dot of pointer"
+ e.expr(k.deref(n, "dot of pointer"), n.Left()) // "dot of pointer"
case ir.ODOTTYPE, ir.ODOTTYPE2:
- e.expr(k.dotType(n.Type, n, "dot"), n.Left)
+ e.expr(k.dotType(n.Type(), n, "dot"), n.Left())
case ir.OINDEX:
- if n.Left.Type.IsArray() {
- e.expr(k.note(n, "fixed-array-index-of"), n.Left)
+ if n.Left().Type().IsArray() {
+ e.expr(k.note(n, "fixed-array-index-of"), n.Left())
} else {
// TODO(mdempsky): Fix why reason text.
- e.expr(k.deref(n, "dot of pointer"), n.Left)
+ e.expr(k.deref(n, "dot of pointer"), n.Left())
}
- e.discard(n.Right)
+ e.discard(n.Right())
case ir.OINDEXMAP:
- e.discard(n.Left)
- e.discard(n.Right)
+ e.discard(n.Left())
+ e.discard(n.Right())
case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
- e.expr(k.note(n, "slice"), n.Left)
+ e.expr(k.note(n, "slice"), n.Left())
low, high, max := n.SliceBounds()
e.discard(low)
e.discard(high)
e.discard(max)
case ir.OCONV, ir.OCONVNOP:
- if checkPtr(e.curfn, 2) && n.Type.IsUnsafePtr() && n.Left.Type.IsPtr() {
+ if checkPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.Left().Type().IsPtr() {
// When -d=checkptr=2 is enabled, treat
// conversions to unsafe.Pointer as an
// escaping operation. This allows better
// runtime instrumentation, since we can more
// easily detect object boundaries on the heap
// than the stack.
- e.assignHeap(n.Left, "conversion to unsafe.Pointer", n)
- } else if n.Type.IsUnsafePtr() && n.Left.Type.IsUintptr() {
- e.unsafeValue(k, n.Left)
+ e.assignHeap(n.Left(), "conversion to unsafe.Pointer", n)
+ } else if n.Type().IsUnsafePtr() && n.Left().Type().IsUintptr() {
+ e.unsafeValue(k, n.Left())
} else {
- e.expr(k, n.Left)
+ e.expr(k, n.Left())
}
case ir.OCONVIFACE:
- if !n.Left.Type.IsInterface() && !isdirectiface(n.Left.Type) {
+ if !n.Left().Type().IsInterface() && !isdirectiface(n.Left().Type()) {
k = e.spill(k, n)
}
- e.expr(k.note(n, "interface-converted"), n.Left)
+ e.expr(k.note(n, "interface-converted"), n.Left())
case ir.ORECV:
- e.discard(n.Left)
+ e.discard(n.Left())
case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY:
e.call([]EscHole{k}, n, nil)
@@ -552,13 +552,13 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) {
case ir.OMAKESLICE:
e.spill(k, n)
- e.discard(n.Left)
- e.discard(n.Right)
+ e.discard(n.Left())
+ e.discard(n.Right())
case ir.OMAKECHAN:
- e.discard(n.Left)
+ e.discard(n.Left())
case ir.OMAKEMAP:
e.spill(k, n)
- e.discard(n.Left)
+ e.discard(n.Left())
case ir.ORECOVER:
// nop
@@ -583,15 +583,15 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) {
}
paramK := e.tagHole(ks, ir.AsNode(m.Nname), m.Type.Recv())
- e.expr(e.teeHole(paramK, closureK), n.Left)
+ e.expr(e.teeHole(paramK, closureK), n.Left())
case ir.OPTRLIT:
- e.expr(e.spill(k, n), n.Left)
+ e.expr(e.spill(k, n), n.Left())
case ir.OARRAYLIT:
- for _, elt := range n.List.Slice() {
- if elt.Op == ir.OKEY {
- elt = elt.Right
+ for _, elt := range n.List().Slice() {
+ if elt.Op() == ir.OKEY {
+ elt = elt.Right()
}
e.expr(k.note(n, "array literal element"), elt)
}
@@ -600,89 +600,89 @@ func (e *Escape) exprSkipInit(k EscHole, n *ir.Node) {
k = e.spill(k, n)
k.uintptrEscapesHack = uintptrEscapesHack // for ...uintptr parameters
- for _, elt := range n.List.Slice() {
- if elt.Op == ir.OKEY {
- elt = elt.Right
+ for _, elt := range n.List().Slice() {
+ if elt.Op() == ir.OKEY {
+ elt = elt.Right()
}
e.expr(k.note(n, "slice-literal-element"), elt)
}
case ir.OSTRUCTLIT:
- for _, elt := range n.List.Slice() {
- e.expr(k.note(n, "struct literal element"), elt.Left)
+ for _, elt := range n.List().Slice() {
+ e.expr(k.note(n, "struct literal element"), elt.Left())
}
case ir.OMAPLIT:
e.spill(k, n)
// Map keys and values are always stored in the heap.
- for _, elt := range n.List.Slice() {
- e.assignHeap(elt.Left, "map literal key", n)
- e.assignHeap(elt.Right, "map literal value", n)
+ for _, elt := range n.List().Slice() {
+ e.assignHeap(elt.Left(), "map literal key", n)
+ e.assignHeap(elt.Right(), "map literal value", n)
}
case ir.OCLOSURE:
k = e.spill(k, n)
// Link addresses of captured variables to closure.
- for _, v := range n.Func.ClosureVars.Slice() {
- if v.Op == ir.OXXX { // unnamed out argument; see dcl.go:/^funcargs
+ for _, v := range n.Func().ClosureVars.Slice() {
+ if v.Op() == ir.OXXX { // unnamed out argument; see dcl.go:/^funcargs
continue
}
k := k
- if !v.Name.Byval() {
+ if !v.Name().Byval() {
k = k.addr(v, "reference")
}
- e.expr(k.note(n, "captured by a closure"), v.Name.Defn)
+ e.expr(k.note(n, "captured by a closure"), v.Name().Defn)
}
case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
e.spill(k, n)
- e.discard(n.Left)
+ e.discard(n.Left())
case ir.OADDSTR:
e.spill(k, n)
// Arguments of OADDSTR never escape;
// runtime.concatstrings makes sure of that.
- e.discards(n.List)
+ e.discards(n.List())
}
}
// unsafeValue evaluates a uintptr-typed arithmetic expression looking
// for conversions from an unsafe.Pointer.
func (e *Escape) unsafeValue(k EscHole, n *ir.Node) {
- if n.Type.Etype != types.TUINTPTR {
- base.Fatalf("unexpected type %v for %v", n.Type, n)
+ if n.Type().Etype != types.TUINTPTR {
+ base.Fatalf("unexpected type %v for %v", n.Type(), n)
}
- e.stmts(n.Ninit)
+ e.stmts(n.Init())
- switch n.Op {
+ switch n.Op() {
case ir.OCONV, ir.OCONVNOP:
- if n.Left.Type.IsUnsafePtr() {
- e.expr(k, n.Left)
+ if n.Left().Type().IsUnsafePtr() {
+ e.expr(k, n.Left())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
case ir.ODOTPTR:
if isReflectHeaderDataField(n) {
- e.expr(k.deref(n, "reflect.Header.Data"), n.Left)
+ e.expr(k.deref(n, "reflect.Header.Data"), n.Left())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
case ir.OPLUS, ir.ONEG, ir.OBITNOT:
- e.unsafeValue(k, n.Left)
+ e.unsafeValue(k, n.Left())
case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
- e.unsafeValue(k, n.Left)
- e.unsafeValue(k, n.Right)
+ e.unsafeValue(k, n.Left())
+ e.unsafeValue(k, n.Right())
case ir.OLSH, ir.ORSH:
- e.unsafeValue(k, n.Left)
+ e.unsafeValue(k, n.Left())
// RHS need not be uintptr-typed (#32959) and can't meaningfully
// flow pointers anyway.
- e.discard(n.Right)
+ e.discard(n.Right())
default:
e.exprSkipInit(e.discardHole(), n)
}
@@ -711,7 +711,7 @@ func (e *Escape) addr(n *ir.Node) EscHole {
k := e.heapHole()
- switch n.Op {
+ switch n.Op() {
default:
base.Fatalf("unexpected addr: %v", n)
case ir.ONAME:
@@ -720,22 +720,22 @@ func (e *Escape) addr(n *ir.Node) EscHole {
}
k = e.oldLoc(n).asHole()
case ir.ODOT:
- k = e.addr(n.Left)
+ k = e.addr(n.Left())
case ir.OINDEX:
- e.discard(n.Right)
- if n.Left.Type.IsArray() {
- k = e.addr(n.Left)
+ e.discard(n.Right())
+ if n.Left().Type().IsArray() {
+ k = e.addr(n.Left())
} else {
- e.discard(n.Left)
+ e.discard(n.Left())
}
case ir.ODEREF, ir.ODOTPTR:
e.discard(n)
case ir.OINDEXMAP:
- e.discard(n.Left)
- e.assignHeap(n.Right, "key of map put", n)
+ e.discard(n.Left())
+ e.assignHeap(n.Right(), "key of map put", n)
}
- if !n.Type.HasPointers() {
+ if !n.Type().HasPointers() {
k = e.discardHole()
}
@@ -755,11 +755,11 @@ func (e *Escape) assign(dst, src *ir.Node, why string, where *ir.Node) {
// Filter out some no-op assignments for escape analysis.
ignore := dst != nil && src != nil && isSelfAssign(dst, src)
if ignore && base.Flag.LowerM != 0 {
- base.WarnfAt(where.Pos, "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
+ base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %S", funcSym(e.curfn), where)
}
k := e.addr(dst)
- if dst != nil && dst.Op == ir.ODOTPTR && isReflectHeaderDataField(dst) {
+ if dst != nil && dst.Op() == ir.ODOTPTR && isReflectHeaderDataField(dst) {
e.unsafeValue(e.heapHole().note(where, why), src)
} else {
if ignore {
@@ -777,11 +777,11 @@ func (e *Escape) assignHeap(src *ir.Node, why string, where *ir.Node) {
// should contain the holes representing where the function callee's
// results flows; where is the OGO/ODEFER context of the call, if any.
func (e *Escape) call(ks []EscHole, call, where *ir.Node) {
- topLevelDefer := where != nil && where.Op == ir.ODEFER && e.loopDepth == 1
+ topLevelDefer := where != nil && where.Op() == ir.ODEFER && e.loopDepth == 1
if topLevelDefer {
// force stack allocation of defer record, unless
// open-coded defers are used (see ssa.go)
- where.Esc = EscNever
+ where.SetEsc(EscNever)
}
argument := func(k EscHole, arg *ir.Node) {
@@ -797,66 +797,66 @@ func (e *Escape) call(ks []EscHole, call, where *ir.Node) {
e.expr(k.note(call, "call parameter"), arg)
}
- switch call.Op {
+ switch call.Op() {
default:
- base.Fatalf("unexpected call op: %v", call.Op)
+ base.Fatalf("unexpected call op: %v", call.Op())
case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
fixVariadicCall(call)
// Pick out the function callee, if statically known.
var fn *ir.Node
- switch call.Op {
+ switch call.Op() {
case ir.OCALLFUNC:
- switch v := staticValue(call.Left); {
- case v.Op == ir.ONAME && v.Class() == ir.PFUNC:
+ switch v := staticValue(call.Left()); {
+ case v.Op() == ir.ONAME && v.Class() == ir.PFUNC:
fn = v
- case v.Op == ir.OCLOSURE:
- fn = v.Func.Nname
+ case v.Op() == ir.OCLOSURE:
+ fn = v.Func().Nname
}
case ir.OCALLMETH:
- fn = methodExprName(call.Left)
+ fn = methodExprName(call.Left())
}
- fntype := call.Left.Type
+ fntype := call.Left().Type()
if fn != nil {
- fntype = fn.Type
+ fntype = fn.Type()
}
if ks != nil && fn != nil && e.inMutualBatch(fn) {
- for i, result := range fn.Type.Results().FieldSlice() {
+ for i, result := range fn.Type().Results().FieldSlice() {
e.expr(ks[i], ir.AsNode(result.Nname))
}
}
if r := fntype.Recv(); r != nil {
- argument(e.tagHole(ks, fn, r), call.Left.Left)
+ argument(e.tagHole(ks, fn, r), call.Left().Left())
} else {
// Evaluate callee function expression.
- argument(e.discardHole(), call.Left)
+ argument(e.discardHole(), call.Left())
}
- args := call.List.Slice()
+ args := call.List().Slice()
for i, param := range fntype.Params().FieldSlice() {
argument(e.tagHole(ks, fn, param), args[i])
}
case ir.OAPPEND:
- args := call.List.Slice()
+ args := call.List().Slice()
// Appendee slice may flow directly to the result, if
// it has enough capacity. Alternatively, a new heap
// slice might be allocated, and all slice elements
// might flow to heap.
appendeeK := ks[0]
- if args[0].Type.Elem().HasPointers() {
+ if args[0].Type().Elem().HasPointers() {
appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
}
argument(appendeeK, args[0])
if call.IsDDD() {
appendedK := e.discardHole()
- if args[1].Type.IsSlice() && args[1].Type.Elem().HasPointers() {
+ if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
appendedK = e.heapHole().deref(call, "appended slice...")
}
argument(appendedK, args[1])
@@ -867,26 +867,26 @@ func (e *Escape) call(ks []EscHole, call, where *ir.Node) {
}
case ir.OCOPY:
- argument(e.discardHole(), call.Left)
+ argument(e.discardHole(), call.Left())
copiedK := e.discardHole()
- if call.Right.Type.IsSlice() && call.Right.Type.Elem().HasPointers() {
+ if call.Right().Type().IsSlice() && call.Right().Type().Elem().HasPointers() {
copiedK = e.heapHole().deref(call, "copied slice")
}
- argument(copiedK, call.Right)
+ argument(copiedK, call.Right())
case ir.OPANIC:
- argument(e.heapHole(), call.Left)
+ argument(e.heapHole(), call.Left())
case ir.OCOMPLEX:
- argument(e.discardHole(), call.Left)
- argument(e.discardHole(), call.Right)
+ argument(e.discardHole(), call.Left())
+ argument(e.discardHole(), call.Right())
case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
- for _, arg := range call.List.Slice() {
+ for _, arg := range call.List().Slice() {
argument(e.discardHole(), arg)
}
case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
- argument(e.discardHole(), call.Left)
+ argument(e.discardHole(), call.Left())
}
}
@@ -936,8 +936,8 @@ func (e *Escape) tagHole(ks []EscHole, fn *ir.Node, param *types.Field) EscHole
// should be incorporated directly into the flow graph instead of
// relying on its escape analysis tagging.
func (e *Escape) inMutualBatch(fn *ir.Node) bool {
- if fn.Name.Defn != nil && fn.Name.Defn.Esc < EscFuncTagged {
- if fn.Name.Defn.Esc == EscFuncUnknown {
+ if fn.Name().Defn != nil && fn.Name().Defn.Esc() < EscFuncTagged {
+ if fn.Name().Defn.Esc() == EscFuncUnknown {
base.Fatalf("graph inconsistency")
}
return true
@@ -1053,9 +1053,9 @@ func (e *Escape) later(k EscHole) EscHole {
// canonicalNode returns the canonical *Node that n logically
// represents.
func canonicalNode(n *ir.Node) *ir.Node {
- if n != nil && n.Op == ir.ONAME && n.Name.IsClosureVar() {
- n = n.Name.Defn
- if n.Name.IsClosureVar() {
+ if n != nil && n.Op() == ir.ONAME && n.Name().IsClosureVar() {
+ n = n.Name().Defn
+ if n.Name().IsClosureVar() {
base.Fatalf("still closure var")
}
}
@@ -1067,8 +1067,8 @@ func (e *Escape) newLoc(n *ir.Node, transient bool) *EscLocation {
if e.curfn == nil {
base.Fatalf("e.curfn isn't set")
}
- if n != nil && n.Type != nil && n.Type.NotInHeap() {
- base.ErrorfAt(n.Pos, "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type)
+ if n != nil && n.Type() != nil && n.Type().NotInHeap() {
+ base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
}
n = canonicalNode(n)
@@ -1080,8 +1080,8 @@ func (e *Escape) newLoc(n *ir.Node, transient bool) *EscLocation {
}
e.allLocs = append(e.allLocs, loc)
if n != nil {
- if n.Op == ir.ONAME && n.Name.Curfn != e.curfn {
- base.Fatalf("curfn mismatch: %v != %v", n.Name.Curfn, e.curfn)
+ if n.Op() == ir.ONAME && n.Name().Curfn != e.curfn {
+ base.Fatalf("curfn mismatch: %v != %v", n.Name().Curfn, e.curfn)
}
if n.HasOpt() {
@@ -1115,13 +1115,13 @@ func (e *Escape) flow(k EscHole, src *EscLocation) {
}
if dst.escapes && k.derefs < 0 { // dst = &src
if base.Flag.LowerM >= 2 || logopt.Enabled() {
- pos := base.FmtPos(src.n.Pos)
+ pos := base.FmtPos(src.n.Pos())
if base.Flag.LowerM >= 2 {
fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
}
explanation := e.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
if logopt.Enabled() {
- logopt.LogOpt(src.n.Pos, "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+ logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
}
}
@@ -1218,11 +1218,11 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
if l.isName(ir.PPARAM) {
if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos), l.n, e.explainLoc(root), derefs)
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, e.explainLoc(root), derefs)
}
explanation := e.explainPath(root, l)
if logopt.Enabled() {
- logopt.LogOpt(l.n.Pos, "leak", "escape", ir.FuncName(e.curfn),
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e.curfn),
fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, e.explainLoc(root), derefs), explanation)
}
}
@@ -1235,11 +1235,11 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
if addressOf && !l.escapes {
if logopt.Enabled() || base.Flag.LowerM >= 2 {
if base.Flag.LowerM >= 2 {
- fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos), l.n)
+ fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
}
explanation := e.explainPath(root, l)
if logopt.Enabled() {
- logopt.LogOpt(l.n.Pos, "escape", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+ logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e.curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
}
}
l.escapes = true
@@ -1267,7 +1267,7 @@ func (e *Escape) walkOne(root *EscLocation, walkgen uint32, enqueue func(*EscLoc
// explainPath prints an explanation of how src flows to the walk root.
func (e *Escape) explainPath(root, src *EscLocation) []*logopt.LoggedOpt {
visited := make(map[*EscLocation]bool)
- pos := base.FmtPos(src.n.Pos)
+ pos := base.FmtPos(src.n.Pos())
var explanation []*logopt.LoggedOpt
for {
// Prevent infinite loop.
@@ -1309,19 +1309,19 @@ func (e *Escape) explainFlow(pos string, dst, srcloc *EscLocation, derefs int, n
if logopt.Enabled() {
var epos src.XPos
if notes != nil {
- epos = notes.where.Pos
+ epos = notes.where.Pos()
} else if srcloc != nil && srcloc.n != nil {
- epos = srcloc.n.Pos
+ epos = srcloc.n.Pos()
}
explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e.curfn), flow))
}
for note := notes; note != nil; note = note.next {
if print {
- fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos))
+ fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
}
if logopt.Enabled() {
- explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos, "escflow", "escape", ir.FuncName(e.curfn),
+ explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e.curfn),
fmt.Sprintf(" from %v (%v)", note.where, note.why)))
}
}
@@ -1336,7 +1336,7 @@ func (e *Escape) explainLoc(l *EscLocation) string {
// TODO(mdempsky): Omit entirely.
return "{temp}"
}
- if l.n.Op == ir.ONAME {
+ if l.n.Op() == ir.ONAME {
return fmt.Sprintf("%v", l.n)
}
return fmt.Sprintf("{storage for %v}", l.n)
@@ -1360,7 +1360,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool {
//
// var u int // okay to stack allocate
// *(func() *int { return &u }()) = 42
- if containsClosure(other.curfn, l.curfn) && l.curfn.Func.ClosureCalled {
+ if containsClosure(other.curfn, l.curfn) && l.curfn.Func().ClosureCalled {
return false
}
@@ -1395,7 +1395,7 @@ func (e *Escape) outlives(l, other *EscLocation) bool {
// containsClosure reports whether c is a closure contained within f.
func containsClosure(f, c *ir.Node) bool {
- if f.Op != ir.ODCLFUNC || c.Op != ir.ODCLFUNC {
+ if f.Op() != ir.ODCLFUNC || c.Op() != ir.ODCLFUNC {
base.Fatalf("bad containsClosure: %v, %v", f, c)
}
@@ -1406,8 +1406,8 @@ func containsClosure(f, c *ir.Node) bool {
// Closures within function Foo are named like "Foo.funcN..."
// TODO(mdempsky): Better way to recognize this.
- fn := f.Func.Nname.Sym.Name
- cn := c.Func.Nname.Sym.Name
+ fn := f.Func().Nname.Sym().Name
+ cn := c.Func().Nname.Sym().Name
return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
}
@@ -1417,7 +1417,7 @@ func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
// into the escape analysis tag, then record a return leak.
if sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
// TODO(mdempsky): Eliminate dependency on Vargen here.
- ri := int(sink.n.Name.Vargen) - 1
+ ri := int(sink.n.Name().Vargen) - 1
if ri < numEscResults {
// Leak to result parameter.
l.paramEsc.AddResult(ri, derefs)
@@ -1432,11 +1432,11 @@ func (l *EscLocation) leakTo(sink *EscLocation, derefs int) {
func (e *Escape) finish(fns []*ir.Node) {
// Record parameter tags for package export data.
for _, fn := range fns {
- fn.Esc = EscFuncTagged
+ fn.SetEsc(EscFuncTagged)
narg := 0
for _, fs := range &types.RecvsParams {
- for _, f := range fs(fn.Type).Fields().Slice() {
+ for _, f := range fs(fn.Type()).Fields().Slice() {
narg++
f.Note = e.paramTag(fn, narg, f)
}
@@ -1453,21 +1453,21 @@ func (e *Escape) finish(fns []*ir.Node) {
// Update n.Esc based on escape analysis results.
if loc.escapes {
- if n.Op != ir.ONAME {
+ if n.Op() != ir.ONAME {
if base.Flag.LowerM != 0 {
- base.WarnfAt(n.Pos, "%S escapes to heap", n)
+ base.WarnfAt(n.Pos(), "%S escapes to heap", n)
}
if logopt.Enabled() {
- logopt.LogOpt(n.Pos, "escape", "escape", ir.FuncName(e.curfn))
+ logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e.curfn))
}
}
- n.Esc = EscHeap
+ n.SetEsc(EscHeap)
addrescapes(n)
} else {
- if base.Flag.LowerM != 0 && n.Op != ir.ONAME {
- base.WarnfAt(n.Pos, "%S does not escape", n)
+ if base.Flag.LowerM != 0 && n.Op() != ir.ONAME {
+ base.WarnfAt(n.Pos(), "%S does not escape", n)
}
- n.Esc = EscNone
+ n.SetEsc(EscNone)
if loc.transient {
n.SetTransient(true)
}
@@ -1476,7 +1476,7 @@ func (e *Escape) finish(fns []*ir.Node) {
}
func (l *EscLocation) isName(c ir.Class) bool {
- return l.n != nil && l.n.Op == ir.ONAME && l.n.Class() == c
+ return l.n != nil && l.n.Op() == ir.ONAME && l.n.Class() == c
}
const numEscResults = 7
@@ -1608,10 +1608,10 @@ const (
// funcSym returns fn.Func.Nname.Sym if no nils are encountered along the way.
func funcSym(fn *ir.Node) *types.Sym {
- if fn == nil || fn.Func.Nname == nil {
+ if fn == nil || fn.Func().Nname == nil {
return nil
}
- return fn.Func.Nname.Sym
+ return fn.Func().Nname.Sym()
}
// Mark labels that have no backjumps to them as not increasing e.loopdepth.
@@ -1639,11 +1639,11 @@ func isSliceSelfAssign(dst, src *ir.Node) bool {
// when we evaluate it for dst and for src.
// dst is ONAME dereference.
- if dst.Op != ir.ODEREF && dst.Op != ir.ODOTPTR || dst.Left.Op != ir.ONAME {
+ if dst.Op() != ir.ODEREF && dst.Op() != ir.ODOTPTR || dst.Left().Op() != ir.ONAME {
return false
}
// src is a slice operation.
- switch src.Op {
+ switch src.Op() {
case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
// OK.
case ir.OSLICEARR, ir.OSLICE3ARR:
@@ -1656,18 +1656,18 @@ func isSliceSelfAssign(dst, src *ir.Node) bool {
// Pointer to an array is OK since it's not stored inside b directly.
// For slicing an array (not pointer to array), there is an implicit OADDR.
// We check that to determine non-pointer array slicing.
- if src.Left.Op == ir.OADDR {
+ if src.Left().Op() == ir.OADDR {
return false
}
default:
return false
}
// slice is applied to ONAME dereference.
- if src.Left.Op != ir.ODEREF && src.Left.Op != ir.ODOTPTR || src.Left.Left.Op != ir.ONAME {
+ if src.Left().Op() != ir.ODEREF && src.Left().Op() != ir.ODOTPTR || src.Left().Left().Op() != ir.ONAME {
return false
}
// dst and src reference the same base ONAME.
- return dst.Left == src.Left.Left
+ return dst.Left() == src.Left().Left()
}
// isSelfAssign reports whether assignment from src to dst can
@@ -1687,15 +1687,15 @@ func isSelfAssign(dst, src *ir.Node) bool {
//
// These assignments do not change assigned object lifetime.
- if dst == nil || src == nil || dst.Op != src.Op {
+ if dst == nil || src == nil || dst.Op() != src.Op() {
return false
}
- switch dst.Op {
+ switch dst.Op() {
case ir.ODOT, ir.ODOTPTR:
// Safe trailing accessors that are permitted to differ.
case ir.OINDEX:
- if mayAffectMemory(dst.Right) || mayAffectMemory(src.Right) {
+ if mayAffectMemory(dst.Right()) || mayAffectMemory(src.Right()) {
return false
}
default:
@@ -1703,7 +1703,7 @@ func isSelfAssign(dst, src *ir.Node) bool {
}
// The expression prefix must be both "safe" and identical.
- return samesafeexpr(dst.Left, src.Left)
+ return samesafeexpr(dst.Left(), src.Left())
}
// mayAffectMemory reports whether evaluation of n may affect the program's
@@ -1716,18 +1716,18 @@ func mayAffectMemory(n *ir.Node) bool {
//
// We're ignoring things like division by zero, index out of range,
// and nil pointer dereference here.
- switch n.Op {
+ switch n.Op() {
case ir.ONAME, ir.OCLOSUREVAR, ir.OLITERAL, ir.ONIL:
return false
// Left+Right group.
case ir.OINDEX, ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
- return mayAffectMemory(n.Left) || mayAffectMemory(n.Right)
+ return mayAffectMemory(n.Left()) || mayAffectMemory(n.Right())
// Left group.
case ir.ODOT, ir.ODOTPTR, ir.ODEREF, ir.OCONVNOP, ir.OCONV, ir.OLEN, ir.OCAP,
ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
- return mayAffectMemory(n.Left)
+ return mayAffectMemory(n.Left())
default:
return true
@@ -1737,39 +1737,39 @@ func mayAffectMemory(n *ir.Node) bool {
// heapAllocReason returns the reason the given Node must be heap
// allocated, or the empty string if it doesn't.
func heapAllocReason(n *ir.Node) string {
- if n.Type == nil {
+ if n.Type() == nil {
return ""
}
// Parameters are always passed via the stack.
- if n.Op == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) {
+ if n.Op() == ir.ONAME && (n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT) {
return ""
}
- if n.Type.Width > maxStackVarSize {
+ if n.Type().Width > maxStackVarSize {
return "too large for stack"
}
- if (n.Op == ir.ONEW || n.Op == ir.OPTRLIT) && n.Type.Elem().Width >= maxImplicitStackVarSize {
+ if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Width >= maxImplicitStackVarSize {
return "too large for stack"
}
- if n.Op == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
+ if n.Op() == ir.OCLOSURE && closureType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
- if n.Op == ir.OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
+ if n.Op() == ir.OCALLPART && partialCallType(n).Size() >= maxImplicitStackVarSize {
return "too large for stack"
}
- if n.Op == ir.OMAKESLICE {
- r := n.Right
+ if n.Op() == ir.OMAKESLICE {
+ r := n.Right()
if r == nil {
- r = n.Left
+ r = n.Left()
}
if !smallintconst(r) {
return "non-constant size"
}
- if t := n.Type; t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
+ if t := n.Type(); t.Elem().Width != 0 && r.Int64Val() >= maxImplicitStackVarSize/t.Elem().Width {
return "too large for stack"
}
}
@@ -1782,7 +1782,7 @@ func heapAllocReason(n *ir.Node) string {
// Storage is allocated as necessary to allow the address
// to be taken.
func addrescapes(n *ir.Node) {
- switch n.Op {
+ switch n.Op() {
default:
// Unexpected Op, probably due to a previous type error. Ignore.
@@ -1796,13 +1796,13 @@ func addrescapes(n *ir.Node) {
// if this is a tmpname (PAUTO), it was tagged by tmpname as not escaping.
// on PPARAM it means something different.
- if n.Class() == ir.PAUTO && n.Esc == EscNever {
+ if n.Class() == ir.PAUTO && n.Esc() == EscNever {
break
}
// If a closure reference escapes, mark the outer variable as escaping.
- if n.Name.IsClosureVar() {
- addrescapes(n.Name.Defn)
+ if n.Name().IsClosureVar() {
+ addrescapes(n.Name().Defn)
break
}
@@ -1823,13 +1823,13 @@ func addrescapes(n *ir.Node) {
// then we're analyzing the inner closure but we need to move x to the
// heap in f, not in the inner closure. Flip over to f before calling moveToHeap.
oldfn := Curfn
- Curfn = n.Name.Curfn
- if Curfn.Op == ir.OCLOSURE {
- Curfn = Curfn.Func.Decl
+ Curfn = n.Name().Curfn
+ if Curfn.Op() == ir.OCLOSURE {
+ Curfn = Curfn.Func().Decl
panic("can't happen")
}
ln := base.Pos
- base.Pos = Curfn.Pos
+ base.Pos = Curfn.Pos()
moveToHeap(n)
Curfn = oldfn
base.Pos = ln
@@ -1840,8 +1840,8 @@ func addrescapes(n *ir.Node) {
// escape--the pointer inside x does, but that
// is always a heap pointer anyway.
case ir.ODOT, ir.OINDEX, ir.OPAREN, ir.OCONVNOP:
- if !n.Left.Type.IsSlice() {
- addrescapes(n.Left)
+ if !n.Left().Type().IsSlice() {
+ addrescapes(n.Left())
}
}
}
@@ -1861,21 +1861,21 @@ func moveToHeap(n *ir.Node) {
// Allocate a local stack variable to hold the pointer to the heap copy.
// temp will add it to the function declaration list automatically.
- heapaddr := temp(types.NewPtr(n.Type))
- heapaddr.Sym = lookup("&" + n.Sym.Name)
- heapaddr.Orig.Sym = heapaddr.Sym
- heapaddr.Pos = n.Pos
+ heapaddr := temp(types.NewPtr(n.Type()))
+ heapaddr.SetSym(lookup("&" + n.Sym().Name))
+ heapaddr.Orig().SetSym(heapaddr.Sym())
+ heapaddr.SetPos(n.Pos())
// Unset AutoTemp to persist the &foo variable name through SSA to
// liveness analysis.
// TODO(mdempsky/drchase): Cleaner solution?
- heapaddr.Name.SetAutoTemp(false)
+ heapaddr.Name().SetAutoTemp(false)
// Parameters have a local stack copy used at function start/end
// in addition to the copy in the heap that may live longer than
// the function.
if n.Class() == ir.PPARAM || n.Class() == ir.PPARAMOUT {
- if n.Xoffset == types.BADWIDTH {
+ if n.Offset() == types.BADWIDTH {
base.Fatalf("addrescapes before param assignment")
}
@@ -1883,28 +1883,28 @@ func moveToHeap(n *ir.Node) {
// Preserve a copy so we can still write code referring to the original,
// and substitute that copy into the function declaration list
// so that analyses of the local (on-stack) variables use it.
- stackcopy := NewName(n.Sym)
- stackcopy.Type = n.Type
- stackcopy.Xoffset = n.Xoffset
+ stackcopy := NewName(n.Sym())
+ stackcopy.SetType(n.Type())
+ stackcopy.SetOffset(n.Offset())
stackcopy.SetClass(n.Class())
- stackcopy.Name.Param.Heapaddr = heapaddr
+ stackcopy.Name().Param.Heapaddr = heapaddr
if n.Class() == ir.PPARAMOUT {
// Make sure the pointer to the heap copy is kept live throughout the function.
// The function could panic at any point, and then a defer could recover.
// Thus, we need the pointer to the heap copy always available so the
// post-deferreturn code can copy the return value back to the stack.
// See issue 16095.
- heapaddr.Name.SetIsOutputParamHeapAddr(true)
+ heapaddr.Name().SetIsOutputParamHeapAddr(true)
}
- n.Name.Param.Stackcopy = stackcopy
+ n.Name().Param.Stackcopy = stackcopy
// Substitute the stackcopy into the function variable list so that
// liveness and other analyses use the underlying stack slot
// and not the now-pseudo-variable n.
found := false
- for i, d := range Curfn.Func.Dcl {
+ for i, d := range Curfn.Func().Dcl {
if d == n {
- Curfn.Func.Dcl[i] = stackcopy
+ Curfn.Func().Dcl[i] = stackcopy
found = true
break
}
@@ -1917,16 +1917,16 @@ func moveToHeap(n *ir.Node) {
if !found {
base.Fatalf("cannot find %v in local variable list", n)
}
- Curfn.Func.Dcl = append(Curfn.Func.Dcl, n)
+ Curfn.Func().Dcl = append(Curfn.Func().Dcl, n)
}
// Modify n in place so that uses of n now mean indirection of the heapaddr.
n.SetClass(ir.PAUTOHEAP)
- n.Xoffset = 0
- n.Name.Param.Heapaddr = heapaddr
- n.Esc = EscHeap
+ n.SetOffset(0)
+ n.Name().Param.Heapaddr = heapaddr
+ n.SetEsc(EscHeap)
if base.Flag.LowerM != 0 {
- base.WarnfAt(n.Pos, "moved to heap: %v", n)
+ base.WarnfAt(n.Pos(), "moved to heap: %v", n)
}
}
@@ -1947,7 +1947,7 @@ func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string {
return fmt.Sprintf("arg#%d", narg)
}
- if fn.Nbody.Len() == 0 {
+ if fn.Body().Len() == 0 {
// Assume that uintptr arguments must be held live across the call.
// This is most important for syscall.Syscall.
// See golang.org/issue/13372.
@@ -1969,7 +1969,7 @@ func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string {
// External functions are assumed unsafe, unless
// //go:noescape is given before the declaration.
- if fn.Func.Pragma&ir.Noescape != 0 {
+ if fn.Func().Pragma&ir.Noescape != 0 {
if base.Flag.LowerM != 0 && f.Sym != nil {
base.WarnfAt(f.Pos, "%v does not escape", name())
}
@@ -1983,7 +1983,7 @@ func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string {
return esc.Encode()
}
- if fn.Func.Pragma&ir.UintptrEscapes != 0 {
+ if fn.Func().Pragma&ir.UintptrEscapes != 0 {
if f.Type.IsUintptr() {
if base.Flag.LowerM != 0 {
base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
@@ -2028,7 +2028,7 @@ func (e *Escape) paramTag(fn *ir.Node, narg int, f *types.Field) string {
}
for i := 0; i < numEscResults; i++ {
if x := esc.Result(i); x >= 0 {
- res := fn.Type.Results().Field(i).Sym
+ res := fn.Type().Results().Field(i).Sym
base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
}
}