Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 3 additions & 2 deletions configs/example/idealkmhv3.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,8 +78,9 @@ def setKmhV3IdealParams(args, system):

# branch predictor
if args.bp_type == 'DecoupledBPUWithBTB':
cpu.branchPred.ftq_size = 256
cpu.branchPred.fsq_size = 256
cpu.branchPred.ftq_size = 64
cpu.branchPred.fsq_size = 64
cpu.branchPred.enable2Fetch = True

# l1 cache per core
if args.caches:
Expand Down
5 changes: 3 additions & 2 deletions configs/example/kmhv3.py
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,8 @@ def setKmhV3Params(args, system):

# branch predictor
if args.bp_type == 'DecoupledBPUWithBTB':
cpu.branchPred.ftq_size = 256
cpu.branchPred.fsq_size = 256
cpu.branchPred.ftq_size = 64
cpu.branchPred.fsq_size = 64

cpu.branchPred.mbtb.resolvedUpdate = True
cpu.branchPred.tage.resolvedUpdate = True
Expand All @@ -107,6 +107,7 @@ def setKmhV3Params(args, system):
cpu.branchPred.ittage.enabled = True
cpu.branchPred.mgsc.enabled = False
cpu.branchPred.ras.enabled = True
cpu.branchPred.enable2Fetch = True

# l1 cache per core
if args.caches:
Expand Down
48 changes: 36 additions & 12 deletions src/cpu/o3/fetch.cc
Original file line number Diff line number Diff line change
Expand Up @@ -813,13 +813,34 @@ Fetch::lookupAndUpdateNextPC(const DynInstPtr &inst, PCStateBase &next_pc)
run_out = fall_thru >= stream.predEndPC;
}

bool do_2fetch = false;

// Track how many dynamic instructions were fetched for this (legacy) FTQ/FSQ entry.
ftqEntryFetchedInsts[tid]++;
if (run_out) {
if (predict_taken && dbpbtb->is2FetchEnabled() && dbpbtb->ftqHasNext()) {
const Addr target_pc = stream.predBranchInfo.target;
const auto &next_stream = dbpbtb->ftqNext();
const Addr span = next_stream.predEndPC - stream.startPC;
const unsigned max_bytes = dbpbtb->getMaxFetchBytesPerCycle();
const bool target_in_buffer =
target_pc >= fetchBuffer[tid].startPC && target_pc + 4 <= fetchBuffer[tid].startPC + fetchBufferSize;

if (target_pc == next_stream.startPC && span <= max_bytes && target_in_buffer) {
do_2fetch = true;
DPRINTF(DecoupleBP,
"2Fetch: extend in-cycle to next FSQ entry (cur [%#lx, %#lx), next [%#lx, %#lx), span=%lu, "
"max=%u)\n",
stream.startPC, stream.predEndPC, next_stream.startPC, next_stream.predEndPC, span, max_bytes);
}
}
Comment on lines +821 to +836
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Limit 2-fetch extension to once per cycle.

At Line 821, do_2fetch can be re-enabled on each taken boundary, and Line 2025 keeps iterating in the same cycle. This can chain beyond two FSQ entries in one cycle, which breaks the intended “2-fetch” cap and can exceed the per-cycle byte budget semantics.

💡 Proposed fix (add a per-cycle one-shot guard)
-bool Fetch::lookupAndUpdateNextPC(const DynInstPtr &inst, PCStateBase &next_pc)
+bool Fetch::lookupAndUpdateNextPC(const DynInstPtr &inst, PCStateBase &next_pc,
+                                  bool &used2FetchExtension)

-        if (predict_taken && dbpbtb->is2FetchEnabled() && dbpbtb->ftqHasNext()) {
+        if (!used2FetchExtension &&
+            predict_taken && dbpbtb->is2FetchEnabled() && dbpbtb->ftqHasNext()) {
             ...
             if (target_pc == next_stream.startPC && span <= max_bytes && target_in_buffer) {
                 do_2fetch = true;
+                used2FetchExtension = true;
                 ...
             }
         }
-bool Fetch::processSingleInstruction(ThreadID tid, PCStateBase &pc,
-                                     StaticInstPtr &curMacroop)
+bool Fetch::processSingleInstruction(ThreadID tid, PCStateBase &pc,
+                                     StaticInstPtr &curMacroop,
+                                     bool &used2FetchExtension)
{
    ...
-    stopFetchThisCycle = lookupAndUpdateNextPC(instruction, *next_pc);
+    stopFetchThisCycle =
+        lookupAndUpdateNextPC(instruction, *next_pc, used2FetchExtension);
 void Fetch::performInstructionFetch(ThreadID tid)
 {
     ...
     bool stopFetchThisCycle = false;
+    bool used2FetchExtension = false;
     ...
-            stopFetchThisCycle = processSingleInstruction(tid, pc_state, curMacroop);
+            stopFetchThisCycle =
+                processSingleInstruction(tid, pc_state, curMacroop,
+                                         used2FetchExtension);

Also applies to: 2004-2025

🤖 Prompt for AI Agents
Verify each finding against the current code and only fix it if needed.

In `@src/cpu/o3/fetch.cc` around lines 821 - 836, The 2-fetch extension can be
re-enabled multiple times in a single cycle; add a per-cycle one-shot guard so
do_2fetch is only allowed once per cycle: introduce a boolean flag (e.g.,
twoFetchTakenThisCycle) that is cleared at the start of the fetch cycle and
checked before setting do_2fetch in the block that currently tests predict_taken
&& dbpbtb->is2FetchEnabled() && dbpbtb->ftqHasNext(), then set the flag when you
set do_2fetch = true; apply the same guard update to the other symmetric 2-fetch
site around the loop (the block referenced at lines ~2004-2025) so the extension
cannot chain beyond one extra FTQ entry per cycle and still obey the per-cycle
byte budget.


dbpbtb->consumeFetchTarget(ftqEntryFetchedInsts[tid]);
ftqEntryFetchedInsts[tid] = 0;
fetchBuffer[tid].valid = false;
DPRINTF(DecoupleBP, "Used up fetch targets.\n");
if (!do_2fetch) {
fetchBuffer[tid].valid = false;
DPRINTF(DecoupleBP, "Used up fetch targets.\n");
}
}

inst->setLoopIteration(currentLoopIter);
Expand All @@ -845,7 +866,7 @@ Fetch::lookupAndUpdateNextPC(const DynInstPtr &inst, PCStateBase &next_pc)
++fetchStats.predictedBranches;
}

return predict_taken;
return predict_taken && !do_2fetch;
}

bool
Expand Down Expand Up @@ -1857,6 +1878,8 @@ Fetch::checkMemoryNeeds(ThreadID tid, const PCStateBase &this_pc,
fetch_pc + 4 > fetchBuffer[tid].startPC + fetchBufferSize) {
DPRINTF(Fetch, "[tid:%i] PC %#x outside fetch buffer range [%#x, %#x), stalling on ICache\n",
tid, fetch_pc, fetchBuffer[tid].startPC, fetchBuffer[tid].startPC + fetchBufferSize);
// Force issuing a new I-cache request.
fetchBuffer[tid].valid = false;
return StallReason::IcacheStall;
}

Expand All @@ -1879,7 +1902,7 @@ Fetch::processSingleInstruction(ThreadID tid, PCStateBase &pc,
StaticInstPtr &curMacroop)
{
auto *dec_ptr = decoder[tid];
bool predictedBranch = false;
bool stopFetchThisCycle = false;
bool newMacroop = false;

// Create a copy of the current PC state to calculate the next PC.
Expand Down Expand Up @@ -1936,16 +1959,17 @@ Fetch::processSingleInstruction(ThreadID tid, PCStateBase &pc,
set(next_pc, pc);

// Handle branch prediction and update next_pc for both modes
predictedBranch = lookupAndUpdateNextPC(instruction, *next_pc);
stopFetchThisCycle = lookupAndUpdateNextPC(instruction, *next_pc);
const bool predictedTaken = instruction->readPredTaken();

if (predictedBranch) {
if (predictedTaken) {
DPRINTF(Fetch, "[tid:%i] Branch detected with PC = %s, target = %s\n",
instruction->threadNumber, pc, *next_pc);
}

if (isTraceMode()) {
assert(traceFetch);
traceFetch->postBranchPredict(tid, instruction, traceForThisInst, pc, *next_pc, predictedBranch);
traceFetch->postBranchPredict(tid, instruction, traceForThisInst, pc, *next_pc, predictedTaken);
}

// A new macro-op also begins if the PC changes discontinuously.
Expand All @@ -1959,7 +1983,7 @@ Fetch::processSingleInstruction(ThreadID tid, PCStateBase &pc,
// Update the main PC state for the next instruction.
set(pc, *next_pc);

return predictedBranch;
return stopFetchThisCycle;
}

void
Expand All @@ -1977,15 +2001,15 @@ Fetch::performInstructionFetch(ThreadID tid)
StaticInstPtr &curMacroop = macroop[tid];

// Control flags for main fetch loop
bool predictedBranch = false;
bool stopFetchThisCycle = false;

DPRINTF(Fetch, "[tid:%i] Adding instructions to queue to decode.\n", tid);

// Main instruction fetch loop - process until fetch width or other limits
// For decoupled frontend (including trace mode), check FTQ availability
StallReason stall = StallReason::NoStall;
while (numInst < fetchWidth && fetchQueue[tid].size() < fetchQueueSize &&
!predictedBranch && !ftqEmpty() && !waitForVsetvl) {
!stopFetchThisCycle && !ftqEmpty() && !waitForVsetvl) {

// Check memory needs and supply bytes to decoder if required
stall = checkMemoryNeeds(tid, pc_state, curMacroop);
Expand All @@ -1998,7 +2022,7 @@ Fetch::performInstructionFetch(ThreadID tid)
// into multiple micro-ops.
do {
// Process a single instruction, from decoding to PC update.
predictedBranch = processSingleInstruction(tid, pc_state, curMacroop);
stopFetchThisCycle = processSingleInstruction(tid, pc_state, curMacroop);

} while (curMacroop &&
numInst < fetchWidth &&
Expand All @@ -2017,7 +2041,7 @@ Fetch::performInstructionFetch(ThreadID tid)
}

// Log why fetch stopped
if (predictedBranch) {
if (stopFetchThisCycle) {
DPRINTF(Fetch, "[tid:%i] Done fetching, predicted branch instruction encountered.\n", tid);
} else if (numInst >= fetchWidth) {
DPRINTF(Fetch, "[tid:%i] Done fetching, reached fetch bandwidth for this cycle.\n", tid);
Expand Down
5 changes: 3 additions & 2 deletions src/cpu/o3/fetch.hh
Original file line number Diff line number Diff line change
Expand Up @@ -354,7 +354,8 @@ class Fetch
* Looks up the branch predictor, gets a prediction, and updates the PC.
* @param inst The dynamic instruction object.
* @param next_pc The PC state to update with the prediction.
* @return true if a branch was predicted taken.
* @return true if fetch should stop this cycle due to a predicted-taken
* branch (2Fetch may override and return false).
*/
bool lookupAndUpdateNextPC(const DynInstPtr &inst, PCStateBase &next_pc);

Expand Down Expand Up @@ -564,7 +565,7 @@ class Fetch
* @param tid The thread ID of the instruction.
* @param pc The current program counter state (will be updated).
* @param curMacroop The current macro-op being processed (if any).
* @return true if a branch was predicted.
* @return true if fetch should stop this cycle.
*/
bool
processSingleInstruction(ThreadID tid, PCStateBase &pc,
Expand Down
3 changes: 3 additions & 0 deletions src/cpu/pred/BranchPredictor.py
Original file line number Diff line number Diff line change
Expand Up @@ -1178,3 +1178,6 @@ class DecoupledBPUWithBTB(BranchPredictor):

bpDBSwitches = VectorParam.String([], "Enable which traces in the form of database")
resolveBlockThreshold = Param.Unsigned(8, "Consecutive resolve dequeue failures before blocking prediction once")

enable2Fetch = Param.Bool(False, "Enable 2fetch feature")
maxFetchBytesPerCycle = Param.Unsigned(64, "Maximum fetch bytes per cycle for 2fetch")
71 changes: 40 additions & 31 deletions src/cpu/pred/btb/decoupled_bpred.cc
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,8 @@ DecoupledBPUWithBTB::DecoupledBPUWithBTB(const DecoupledBPUWithBTBParams &p)
numStages(p.numStages),
historyManager(16), // TODO: fix this
resolveBlockThreshold(p.resolveBlockThreshold),
enable2Fetch(p.enable2Fetch),
maxFetchBytesPerCycle(p.maxFetchBytesPerCycle),
dbpBtbStats(this, p.numStages, p.fsq_size, maxInstsNum)
{
if (bpDBSwitches.size() > 0) {
Expand Down Expand Up @@ -132,44 +134,51 @@ DecoupledBPUWithBTB::tick()
return;
}

// 1. Request new prediction if FSQ not full and we are idle
if (bpuState == BpuState::IDLE && !targetQueueFull()) {
if (blockPredictionPending) {
DPRINTF(Override, "Prediction blocked to prioritize resolve update\n");
dbpBtbStats.predictionBlockedForUpdate++;
blockPredictionPending = false;
} else {
requestNewPrediction();
bpuState = BpuState::PREDICTOR_DONE;
int predsRemainsToBeMade = enableTwoTaken ? 2 : 1;
unsigned tempNumOverrideBubbles = 0;
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Remove unused variable tempNumOverrideBubbles.

This variable is declared but never used anywhere in the function. It appears to be leftover code from development.

🧹 Proposed fix
     int predsRemainsToBeMade = enableTwoTaken ? 2 : 1;
-    unsigned tempNumOverrideBubbles = 0;
 
     while (predsRemainsToBeMade > 0) {
🤖 Prompt for AI Agents
In `@src/cpu/pred/btb/decoupled_bpred.cc` at line 140, The local variable
tempNumOverrideBubbles is declared but never used; remove the unused declaration
of tempNumOverrideBubbles from the scope where it's defined (the function
containing the line "unsigned tempNumOverrideBubbles = 0;") to clean up dead
code and avoid compiler warnings, ensuring no other references to
tempNumOverrideBubbles remain in the function.


while (predsRemainsToBeMade > 0) {
// 1. Request new prediction if FSQ not full and we are idle
if (bpuState == BpuState::IDLE && !targetQueueFull()) {
if (blockPredictionPending) {
DPRINTF(Override, "Prediction blocked to prioritize resolve update\n");
dbpBtbStats.predictionBlockedForUpdate++;
blockPredictionPending = false;
} else {
requestNewPrediction();
bpuState = BpuState::PREDICTOR_DONE;
}
}
}

// 2. Handle pending prediction if available
if (bpuState == BpuState::PREDICTOR_DONE) {
DPRINTF(Override, "Generating final prediction for PC %#lx\n", s0PC);
numOverrideBubbles = generateFinalPredAndCreateBubbles();
bpuState = BpuState::PREDICTION_OUTSTANDING;
// 2. Handle pending prediction if available
if (bpuState == BpuState::PREDICTOR_DONE) {
DPRINTF(Override, "Generating final prediction for PC %#lx\n", s0PC);
numOverrideBubbles = generateFinalPredAndCreateBubbles();
bpuState = BpuState::PREDICTION_OUTSTANDING;

// Clear each predictor's output
for (int i = 0; i < numStages; i++) {
predsOfEachStage[i].btbEntries.clear();
// Clear each predictor's output
for (int i = 0; i < numStages; i++) {
predsOfEachStage[i].btbEntries.clear();
}
}
}

if (bpuState == BpuState::PREDICTION_OUTSTANDING && numOverrideBubbles > 0) {
tage->dryRunCycle(s0PC);
}
if (bpuState == BpuState::PREDICTION_OUTSTANDING && numOverrideBubbles > 0) {
tage->dryRunCycle(s0PC);
}

// check if:
// 1. FSQ has space
// 2. there's no bubble
// 3. PREDICTION_OUTSTANDING
if (validateFSQEnqueue()) {
// Create new FSQ entry with the current prediction
processNewPrediction();
// check if:
// 1. FSQ has space
// 2. there's no bubble
// 3. PREDICTION_OUTSTANDING
if (validateFSQEnqueue()) {
// Create new FSQ entry with the current prediction
processNewPrediction();

DPRINTF(Override, "FSQ entry enqueued, prediction state reset\n");
bpuState = BpuState::IDLE;
DPRINTF(Override, "FSQ entry enqueued, prediction state reset\n");
bpuState = BpuState::IDLE;
}

predsRemainsToBeMade--;
Comment on lines +137 to +181
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟠 Major

Block prediction should skip all iterations when resolve backpressure is set.
In two‑taken mode, blockPredictionPending is cleared in the first iteration, so the second iteration can still issue a prediction in the same tick. That undermines the intended “prioritize resolve update” block.

Proposed fix (exit the loop when prediction is blocked)
         if (bpuState == BpuState::IDLE && !targetQueueFull()) {
             if (blockPredictionPending) {
                 DPRINTF(Override, "Prediction blocked to prioritize resolve update\n");
                 dbpBtbStats.predictionBlockedForUpdate++;
                 blockPredictionPending = false;
+                break; // block predictions for the rest of this tick
             } else {
                 requestNewPrediction();
                 bpuState = BpuState::PREDICTOR_DONE;
             }
         }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
int predsRemainsToBeMade = enableTwoTaken ? 2 : 1;
unsigned tempNumOverrideBubbles = 0;
while (predsRemainsToBeMade > 0) {
// 1. Request new prediction if FSQ not full and we are idle
if (bpuState == BpuState::IDLE && !targetQueueFull()) {
if (blockPredictionPending) {
DPRINTF(Override, "Prediction blocked to prioritize resolve update\n");
dbpBtbStats.predictionBlockedForUpdate++;
blockPredictionPending = false;
} else {
requestNewPrediction();
bpuState = BpuState::PREDICTOR_DONE;
}
}
}
// 2. Handle pending prediction if available
if (bpuState == BpuState::PREDICTOR_DONE) {
DPRINTF(Override, "Generating final prediction for PC %#lx\n", s0PC);
numOverrideBubbles = generateFinalPredAndCreateBubbles();
bpuState = BpuState::PREDICTION_OUTSTANDING;
// 2. Handle pending prediction if available
if (bpuState == BpuState::PREDICTOR_DONE) {
DPRINTF(Override, "Generating final prediction for PC %#lx\n", s0PC);
numOverrideBubbles = generateFinalPredAndCreateBubbles();
bpuState = BpuState::PREDICTION_OUTSTANDING;
// Clear each predictor's output
for (int i = 0; i < numStages; i++) {
predsOfEachStage[i].btbEntries.clear();
// Clear each predictor's output
for (int i = 0; i < numStages; i++) {
predsOfEachStage[i].btbEntries.clear();
}
}
}
if (bpuState == BpuState::PREDICTION_OUTSTANDING && numOverrideBubbles > 0) {
tage->dryRunCycle(s0PC);
}
if (bpuState == BpuState::PREDICTION_OUTSTANDING && numOverrideBubbles > 0) {
tage->dryRunCycle(s0PC);
}
// check if:
// 1. FSQ has space
// 2. there's no bubble
// 3. PREDICTION_OUTSTANDING
if (validateFSQEnqueue()) {
// Create new FSQ entry with the current prediction
processNewPrediction();
// check if:
// 1. FSQ has space
// 2. there's no bubble
// 3. PREDICTION_OUTSTANDING
if (validateFSQEnqueue()) {
// Create new FSQ entry with the current prediction
processNewPrediction();
DPRINTF(Override, "FSQ entry enqueued, prediction state reset\n");
bpuState = BpuState::IDLE;
DPRINTF(Override, "FSQ entry enqueued, prediction state reset\n");
bpuState = BpuState::IDLE;
}
predsRemainsToBeMade--;
int predsRemainsToBeMade = enableTwoTaken ? 2 : 1;
unsigned tempNumOverrideBubbles = 0;
while (predsRemainsToBeMade > 0) {
// 1. Request new prediction if FSQ not full and we are idle
if (bpuState == BpuState::IDLE && !targetQueueFull()) {
if (blockPredictionPending) {
DPRINTF(Override, "Prediction blocked to prioritize resolve update\n");
dbpBtbStats.predictionBlockedForUpdate++;
blockPredictionPending = false;
break; // block predictions for the rest of this tick
} else {
requestNewPrediction();
bpuState = BpuState::PREDICTOR_DONE;
}
}
// 2. Handle pending prediction if available
if (bpuState == BpuState::PREDICTOR_DONE) {
DPRINTF(Override, "Generating final prediction for PC %#lx\n", s0PC);
numOverrideBubbles = generateFinalPredAndCreateBubbles();
bpuState = BpuState::PREDICTION_OUTSTANDING;
// Clear each predictor's output
for (int i = 0; i < numStages; i++) {
predsOfEachStage[i].btbEntries.clear();
}
}
if (bpuState == BpuState::PREDICTION_OUTSTANDING && numOverrideBubbles > 0) {
tage->dryRunCycle(s0PC);
}
// check if:
// 1. FSQ has space
// 2. there's no bubble
// 3. PREDICTION_OUTSTANDING
if (validateFSQEnqueue()) {
// Create new FSQ entry with the current prediction
processNewPrediction();
DPRINTF(Override, "FSQ entry enqueued, prediction state reset\n");
bpuState = BpuState::IDLE;
}
predsRemainsToBeMade--;
🧰 Tools
🪛 Cppcheck (2.19.0)

[error] 138-138: Shifting 64-bit value by 64 bits is undefined behaviour

(shiftTooManyBits)

🤖 Prompt for AI Agents
In `@src/cpu/pred/btb/decoupled_bpred.cc` around lines 135 - 179, When
blockPredictionPending is true the loop clears it and continues so a second
prediction can be issued in the same tick; change that behavior to exit the
prediction loop immediately to honor the "prioritize resolve update"
backpressure. In the while loop that uses predsRemainsToBeMade (and
enableTwoTaken), update the branch that currently does DPRINTF(Override) /
dbpBtbStats.predictionBlockedForUpdate++ / blockPredictionPending = false so
that after logging and incrementing the stat you break out of the loop (or set
predsRemainsToBeMade = 0) instead of clearing blockPredictionPending; this
ensures requestNewPrediction(), requestNewPrediction() / bpuState transitions,
generateFinalPredAndCreateBubbles(), validateFSQEnqueue(), and
processNewPrediction() cannot run for subsequent iterations when a block is
pending.

}

// Decrement override bubbles counter
Expand Down
18 changes: 18 additions & 0 deletions src/cpu/pred/btb/decoupled_bpred.hh
Original file line number Diff line number Diff line change
Expand Up @@ -143,8 +143,13 @@ class DecoupledBPUWithBTB : public BPredUnit
unsigned resolveDequeueFailCounter{0};
const unsigned resolveBlockThreshold;

const bool enable2Fetch;
const unsigned maxFetchBytesPerCycle;

unsigned numOverrideBubbles{0};

bool enableTwoTaken{true};

bool validateFSQEnqueue();

void processNewPrediction();
Expand Down Expand Up @@ -211,6 +216,13 @@ class DecoupledBPUWithBTB : public BPredUnit
return fetchTargetQueue[id - fetchTargetBaseId];
}

const FetchTarget&
getTarget(FetchTargetId id) const
{
assert(hasTarget(id));
return fetchTargetQueue[id - fetchTargetBaseId];
}

FetchTargetId
frontTargetId() const
{
Expand Down Expand Up @@ -453,6 +465,12 @@ class DecoupledBPUWithBTB : public BPredUnit
FetchTargetId ftqHeadId() const { assert(ftqHasHead()); return fetchHeadFtqId; }
const FetchTarget &ftqHead() { assert(ftqHasHead()); return getTarget(fetchHeadFtqId); }

bool ftqHasNext() const { return hasTarget(fetchHeadFtqId + 1); }
const FetchTarget &ftqNext() const { assert(ftqHasNext()); return getTarget(fetchHeadFtqId + 1); }

bool is2FetchEnabled() const { return enable2Fetch; }
unsigned getMaxFetchBytesPerCycle() const { return maxFetchBytesPerCycle; }

void dumpFsq(const char *when);

// Dummy overriding
Expand Down