Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 49 additions & 8 deletions core/opcodeCompiler/compiler/MIRInterpreter.go
Original file line number Diff line number Diff line change
Expand Up @@ -2801,18 +2801,39 @@ func (it *MIRInterpreter) EnsureMemorySize(size uint64) {

func (it *MIRInterpreter) readMem(off, sz *uint256.Int) []byte {
o := off.Uint64()
s := sz.Uint64()
it.ensureMemSize(o + s)
return append([]byte(nil), it.memory[o:o+s]...)
sReq := sz.Uint64()
memLen := uint64(len(it.memory))
// Compute high index safely (detect overflow)
hi := o + sReq
if hi < o {
hi = memLen
}
if hi > memLen {
hi = memLen
}
if o > hi {
return nil
}
return append([]byte(nil), it.memory[o:hi]...)
}

// readMemView returns a view (subslice) of the internal memory without allocating.
// The returned slice is only valid until the next memory growth.
func (it *MIRInterpreter) readMemView(off, sz *uint256.Int) []byte {
o := off.Uint64()
s := sz.Uint64()
it.ensureMemSize(o + s)
return it.memory[o : o+s]
sReq := sz.Uint64()
memLen := uint64(len(it.memory))
hi := o + sReq
if hi < o {
hi = memLen
}
if hi > memLen {
hi = memLen
}
if o > hi {
return nil
}
return it.memory[o:hi]
}

func (it *MIRInterpreter) readMem32(off *uint256.Int) []byte {
Expand Down Expand Up @@ -2850,8 +2871,28 @@ func (it *MIRInterpreter) memCopy(dest, src, length *uint256.Int) {
// readMemCopy allocates a new buffer of size sz and copies from memory at off
func (it *MIRInterpreter) readMemCopy(off, sz *uint256.Int) []byte {
o := off.Uint64()
s := sz.Uint64()
it.ensureMemSize(o + s)
sReq := sz.Uint64()
// Clamp copy length to available memory to avoid oversize allocations/slicing
memLen := uint64(len(it.memory))
var s uint64
if o >= memLen {
s = 0
} else {
rem := memLen - o
if sReq < rem {
s = sReq
} else {
s = rem
}
}
// Hard-cap to a reasonable bound to avoid pathological allocations
const maxCopy = 64 * 1024 * 1024 // 64 MiB
if s > maxCopy {
s = maxCopy
}
if s == 0 {
return nil
}
out := make([]byte, s)
copy(out, it.memory[o:o+s])
return out
Expand Down
158 changes: 142 additions & 16 deletions core/opcodeCompiler/compiler/opcodeParser.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"os"

"github.com/ethereum/go-ethereum/common"
"github.com/holiman/uint256"
)

// debugDumpBB logs a basic block and its MIR instructions for diagnostics.
Expand Down Expand Up @@ -94,6 +95,105 @@ func debugDumpMIR(m *MIR) {
parserDebugWarn(" MIR op", fields...)
}

// tryResolveUint64ConstPC attempts to resolve a Value into a constant uint64 by
// recursively evaluating a small subset of MIR operations when all inputs are constants.
// This is used in the builder to conservatively identify PHI-derived JUMP/JUMPI targets.
// The evaluation is bounded by 'budget' to avoid pathological recursion.
func tryResolveUint64ConstPC(v *Value, budget int) (uint64, bool) {
if v == nil || budget <= 0 {
return 0, false
}
if v.kind == Konst {
if v.u != nil {
u, _ := v.u.Uint64WithOverflow()
return u, true
}
// Fallback to payload
tmp := uint256.NewInt(0).SetBytes(v.payload)
u, _ := tmp.Uint64WithOverflow()
return u, true
}
if v.kind != Variable || v.def == nil {
return 0, false
}
// Helper to eval operand k
evalOp := func(k int) (*uint256.Int, bool) {
if k < 0 || k >= len(v.def.operands) || v.def.operands[k] == nil {
return nil, false
}
if u64, ok := tryResolveUint64ConstPC(v.def.operands[k], budget-1); ok {
return uint256.NewInt(0).SetUint64(u64), true
}
return nil, false
}
switch v.def.op {
case MirPHI:
// PHI itself is a constant only if all alternatives resolve to the same constant
var have bool
var out uint64
for _, alt := range v.def.operands {
if alt == nil {
return 0, false
}
u, ok := tryResolveUint64ConstPC(alt, budget-1)
if !ok {
return 0, false
}
if !have {
out = u
have = true
} else if out != u {
return 0, false
}
}
if have {
return out, true
}
return 0, false
case MirAND, MirOR, MirXOR, MirADD, MirSUB, MirSHL, MirSHR, MirSAR, MirBYTE:
// Binary ops with constant operands
a, okA := evalOp(0)
b, okB := evalOp(1)
if !okA || !okB {
return 0, false
}
tmp := uint256.NewInt(0)
switch v.def.op {
case MirAND:
tmp.And(a, b)
case MirOR:
tmp.Or(a, b)
case MirXOR:
tmp.Xor(a, b)
case MirADD:
tmp.Add(a, b)
case MirSUB:
tmp.Sub(a, b)
case MirSHL:
shift, _ := b.Uint64WithOverflow()
tmp.Lsh(a, uint(shift))
case MirSHR, MirSAR:
shift, _ := b.Uint64WithOverflow()
tmp.Rsh(a, uint(shift))
case MirBYTE:
// byte(n, x) extracts the nth byte from big-endian x (EVM semantics).
n, _ := a.Uint64WithOverflow()
if n >= 32 {
tmp.Clear()
} else {
buf := a.Bytes32()
// EVM byte index 0 = most significant byte
byteVal := buf[n]
tmp.SetUint64(uint64(byteVal))
}
}
u, _ := tmp.Uint64WithOverflow()
return u, true
default:
return 0, false
}
}

// debugDumpBBFull logs a BB header and all MIRs with operand stack values.
func debugDumpBBFull(where string, bb *MIRBasicBlock) {
if bb == nil {
Expand Down Expand Up @@ -264,8 +364,6 @@ func (c *CFG) createBB(pc uint, parent *MIRBasicBlock) *MIRBasicBlock {
}

func (c *CFG) reachEndBB() {
// reach the end of BasicBlock.
// TODO - zlin: check the child is backward only.
}

// GenerateMIRCFG generates a MIR Control Flow Graph for the given bytecode
Expand Down Expand Up @@ -297,6 +395,8 @@ func GenerateMIRCFG(hash common.Hash, code []byte) (*CFG, error) {
}
processedUnique := 0

// Guard map to limit repeated rebuild-triggered enqueues for the same block
rebuildCounts := make(map[*MIRBasicBlock]int)
for unprcessedBBs.Size() != 0 {
if processedUnique >= maxBasicBlocks {
parserDebugWarn("MIR CFG build budget reached", "blocks", processedUnique)
Expand Down Expand Up @@ -375,16 +475,24 @@ func GenerateMIRCFG(hash common.Hash, code []byte) (*CFG, error) {
// If exit changed, propagate to children and enqueue them
newExit := curBB.ExitStack()
if !stacksEqual(prevExit, newExit) {
for _, ch := range curBB.Children() {
if ch == nil {
continue
}
prevIncoming := prevIncomingByChild[ch]
if !stacksEqual(prevIncoming, newExit) {
ch.AddIncomingStack(curBB, newExit)
if !ch.queued {
ch.queued = true
unprcessedBBs.Push(ch)
rebuildCounts[curBB]++
if rebuildCounts[curBB] > 16 {
// Suppress further enqueues to break potential oscillation; runtime backfill will handle unresolved edges.
parserDebugWarn("MIR CFG: suppressing child enqueue due to repeated exit oscillation", "bb", curBB.blockNum, "firstPC", curBB.firstPC, "count", rebuildCounts[curBB])
} else {
for _, ch := range curBB.Children() {
if ch == nil {
continue
}
prevIncoming := prevIncomingByChild[ch]
if !stacksEqual(prevIncoming, newExit) {
ch.AddIncomingStack(curBB, newExit)
// update snapshot to avoid immediate re-enqueue due to stale prev
prevIncomingByChild[ch] = newExit
if !ch.queued {
ch.queued = true
unprcessedBBs.Push(ch)
}
}
}
}
Expand Down Expand Up @@ -1097,7 +1205,13 @@ func (c *CFG) buildBasicBlock(curBB *MIRBasicBlock, valueStack *ValueStack, memo
} else if ov.kind == Variable && ov.def != nil && ov.def.op == MirPHI {
visitPhi(ov.def)
} else {
unknown = true
// Try a conservative constant evaluation of this operand
if tpc, ok := tryResolveUint64ConstPC(ov, 16); ok {
parserDebugWarn("==buildBasicBlock== phi.target.eval", "pc", tpc)
targetSet[tpc] = true
} else {
unknown = true
}
}
}
}
Expand Down Expand Up @@ -1202,8 +1316,11 @@ func (c *CFG) buildBasicBlock(curBB *MIRBasicBlock, valueStack *ValueStack, memo
// Ensure the linear fallthrough block (i+1) is created and queued for processing,
// so its pc is mapped even if no edge comes from this JUMP (useful for future targets).
if _, ok := c.pcToBlock[uint(i+1)]; !ok {
fall := c.createBB(uint(i+1), nil)
fall := c.createBB(uint(i+1), curBB)
fall.SetInitDepthMax(depth)
// Seed modeling so building this block later doesn't underflow on DUP/SWAP
fall.SetParents([]*MIRBasicBlock{curBB})
fall.AddIncomingStack(curBB, curBB.ExitStack())
if !fall.queued {
fall.queued = true
parserDebugWarn("==buildBasicBlock== MIR JUMP fallthrough BB queued", "curbb", curBB.blockNum, "curBB.firstPC", curBB.firstPC,
Expand All @@ -1213,6 +1330,9 @@ func (c *CFG) buildBasicBlock(curBB *MIRBasicBlock, valueStack *ValueStack, memo
} else {
if fall, ok2 := c.pcToBlock[uint(i+1)]; ok2 {
fall.SetInitDepthMax(depth)
// Likewise, seed parent/incoming stack to avoid orphan modeling
fall.SetParents([]*MIRBasicBlock{curBB})
fall.AddIncomingStack(curBB, curBB.ExitStack())
if !fall.queued {
fall.queued = true
parserDebugWarn("==buildBasicBlock== MIR JUMP fallthrough BB queued", "curbb", curBB.blockNum, "curBB.firstPC", curBB.firstPC,
Expand Down Expand Up @@ -1269,8 +1389,14 @@ func (c *CFG) buildBasicBlock(curBB *MIRBasicBlock, valueStack *ValueStack, memo
parserDebugWarn("==buildBasicBlock== MIR JUMPI target is PHI", "bb", curBB.blockNum, "pc", i, "targetpc", tpc)
targetSet[tpc] = true
} else {
unknown = true
break
// Attempt a small constant evaluation; if fails, mark unknown
if tpc, ok := tryResolveUint64ConstPC(ov, 16); ok {
parserDebugWarn("==buildBasicBlock== MIR JUMPI target eval", "bb", curBB.blockNum, "pc", i, "targetpc", tpc)
targetSet[tpc] = true
} else {
unknown = true
break
}
}
}
if unknown || len(targetSet) == 0 {
Expand Down
Loading