summaryrefslogtreecommitdiff
path: root/src/cpu/minor/fetch1.cc
diff options
context:
space:
mode:
Diffstat (limited to 'src/cpu/minor/fetch1.cc')
-rw-r--r--src/cpu/minor/fetch1.cc273
1 files changed, 189 insertions, 84 deletions
diff --git a/src/cpu/minor/fetch1.cc b/src/cpu/minor/fetch1.cc
index d19d7b042..f4f120534 100644
--- a/src/cpu/minor/fetch1.cc
+++ b/src/cpu/minor/fetch1.cc
@@ -57,7 +57,7 @@ Fetch1::Fetch1(const std::string &name_,
Latch<BranchData>::Output inp_,
Latch<ForwardLineData>::Input out_,
Latch<BranchData>::Output prediction_,
- Reservable &next_stage_input_buffer) :
+ std::vector<InputBuffer<ForwardLineData>> &next_stage_input_buffer) :
Named(name_),
cpu(cpu_),
inp(inp_),
@@ -68,11 +68,8 @@ Fetch1::Fetch1(const std::string &name_,
lineSnap(params.fetch1LineSnapWidth),
maxLineWidth(params.fetch1LineWidth),
fetchLimit(params.fetch1FetchLimit),
- state(FetchWaitingForPC),
- pc(0),
- streamSeqNum(InstId::firstStreamSeqNum),
- predictionSeqNum(InstId::firstPredictionSeqNum),
- blocked(false),
+ fetchInfo(params.numThreads),
+ threadPriority(0),
requests(name_ + ".requests", "lines", params.fetch1FetchLimit),
transfers(name_ + ".transfers", "lines", params.fetch1FetchLimit),
icacheState(IcacheRunning),
@@ -114,32 +111,67 @@ Fetch1::Fetch1(const std::string &name_,
}
}
+inline ThreadID
+Fetch1::getScheduledThread()
+{
+ /* Select thread via policy. */
+ std::vector<ThreadID> priority_list;
+
+ switch (cpu.threadPolicy) {
+ case Enums::SingleThreaded:
+ priority_list.push_back(0);
+ break;
+ case Enums::RoundRobin:
+ priority_list = cpu.roundRobinPriority(threadPriority);
+ break;
+ case Enums::Random:
+ priority_list = cpu.randomPriority();
+ break;
+ default:
+ panic("Unknown fetch policy");
+ }
+
+ for (auto tid : priority_list) {
+ if (cpu.getContext(tid)->status() == ThreadContext::Active &&
+ !fetchInfo[tid].blocked &&
+ fetchInfo[tid].state == FetchRunning) {
+ threadPriority = tid;
+ return tid;
+ }
+ }
+
+ return InvalidThreadID;
+}
+
void
-Fetch1::fetchLine()
+Fetch1::fetchLine(ThreadID tid)
{
+ /* Reference the currently used thread state. */
+ Fetch1ThreadInfo &thread = fetchInfo[tid];
+
/* If line_offset != 0, a request is pushed for the remainder of the
* line. */
/* Use a lower, sizeof(MachInst) aligned address for the fetch */
- Addr aligned_pc = pc.instAddr() & ~((Addr) lineSnap - 1);
+ Addr aligned_pc = thread.pc.instAddr() & ~((Addr) lineSnap - 1);
unsigned int line_offset = aligned_pc % lineSnap;
unsigned int request_size = maxLineWidth - line_offset;
/* Fill in the line's id */
- InstId request_id(0 /* thread */,
- streamSeqNum, predictionSeqNum,
+ InstId request_id(tid,
+ thread.streamSeqNum, thread.predictionSeqNum,
lineSeqNum);
- FetchRequestPtr request = new FetchRequest(*this, request_id, pc);
+ FetchRequestPtr request = new FetchRequest(*this, request_id, thread.pc);
DPRINTF(Fetch, "Inserting fetch into the fetch queue "
"%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
- request_id, aligned_pc, pc, line_offset, request_size);
+ request_id, aligned_pc, thread.pc, line_offset, request_size);
- request->request.setContext(cpu.threads[0]->getTC()->contextId());
+ request->request.setContext(cpu.threads[tid]->getTC()->contextId());
request->request.setVirt(0 /* asid */,
aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
/* I've no idea why we need the PC, but give it */
- pc.instAddr());
+ thread.pc.instAddr());
DPRINTF(Fetch, "Submitting ITLB request\n");
numFetchesInITLB++;
@@ -165,12 +197,12 @@ Fetch1::fetchLine()
* reliable 'new' PC if the next line has a new stream sequence number. */
#if THE_ISA == ALPHA_ISA
/* Restore the low bits of the PC used as address space flags */
- Addr pc_low_bits = pc.instAddr() &
+ Addr pc_low_bits = thread.pc.instAddr() &
((Addr) (1 << sizeof(TheISA::MachInst)) - 1);
- pc.set(aligned_pc + request_size + pc_low_bits);
+ thread.pc.set(aligned_pc + request_size + pc_low_bits);
#else
- pc.set(aligned_pc + request_size);
+ thread.pc.set(aligned_pc + request_size);
#endif
}
@@ -454,46 +486,58 @@ operator <<(std::ostream &os, Fetch1::FetchState state)
void
Fetch1::changeStream(const BranchData &branch)
{
+ Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
+
updateExpectedSeqNums(branch);
/* Start fetching again if we were stopped */
switch (branch.reason) {
case BranchData::SuspendThread:
- DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
- state = FetchWaitingForPC;
+ {
+ if (thread.wakeupGuard) {
+ DPRINTF(Fetch, "Not suspending fetch due to guard: %s\n",
+ branch);
+ } else {
+ DPRINTF(Fetch, "Suspending fetch: %s\n", branch);
+ thread.state = FetchWaitingForPC;
+ }
+ }
break;
case BranchData::HaltFetch:
DPRINTF(Fetch, "Halting fetch\n");
- state = FetchHalted;
+ thread.state = FetchHalted;
break;
default:
DPRINTF(Fetch, "Changing stream on branch: %s\n", branch);
- state = FetchRunning;
+ thread.state = FetchRunning;
break;
}
- pc = branch.target;
+ thread.pc = branch.target;
}
void
Fetch1::updateExpectedSeqNums(const BranchData &branch)
{
+ Fetch1ThreadInfo &thread = fetchInfo[branch.threadId];
+
DPRINTF(Fetch, "Updating streamSeqNum from: %d to %d,"
" predictionSeqNum from: %d to %d\n",
- streamSeqNum, branch.newStreamSeqNum,
- predictionSeqNum, branch.newPredictionSeqNum);
+ thread.streamSeqNum, branch.newStreamSeqNum,
+ thread.predictionSeqNum, branch.newPredictionSeqNum);
/* Change the stream */
- streamSeqNum = branch.newStreamSeqNum;
+ thread.streamSeqNum = branch.newStreamSeqNum;
/* Update the prediction. Note that it's possible for this to
* actually set the prediction to an *older* value if new
* predictions have been discarded by execute */
- predictionSeqNum = branch.newPredictionSeqNum;
+ thread.predictionSeqNum = branch.newPredictionSeqNum;
}
void
Fetch1::processResponse(Fetch1::FetchRequestPtr response,
ForwardLineData &line)
{
+ Fetch1ThreadInfo &thread = fetchInfo[response->id.threadId];
PacketPtr packet = response->packet;
/* Pass the prefetch abort (if any) on to Fetch2 in a ForwardLineData
@@ -514,7 +558,7 @@ Fetch1::processResponse(Fetch1::FetchRequestPtr response,
* can't (currently) selectively remove this stream from the queues */
DPRINTF(Fetch, "Stopping line fetch because of fault: %s\n",
response->fault->name());
- state = Fetch1::FetchWaitingForPC;
+ thread.state = Fetch1::FetchWaitingForPC;
} else {
line.adoptPacketData(packet);
/* Null the response's packet to prevent the response from trying to
@@ -532,61 +576,86 @@ Fetch1::evaluate()
assert(line_out.isBubble());
- blocked = !nextStageReserve.canReserve();
+ for (ThreadID tid = 0; tid < cpu.numThreads; tid++)
+ fetchInfo[tid].blocked = !nextStageReserve[tid].canReserve();
- /* Are we changing stream? Look to the Execute branches first, then
- * to predicted changes of stream from Fetch2 */
- /* @todo, find better way to express ignoring branch predictions */
- if (execute_branch.isStreamChange() &&
- execute_branch.reason != BranchData::BranchPrediction)
- {
- if (state == FetchHalted) {
- if (execute_branch.reason == BranchData::WakeupFetch) {
- DPRINTF(Fetch, "Waking up fetch: %s\n", execute_branch);
+ /** Are both branches from later stages valid and for the same thread? */
+ if (execute_branch.threadId != InvalidThreadID &&
+ execute_branch.threadId == fetch2_branch.threadId) {
+
+ Fetch1ThreadInfo &thread = fetchInfo[execute_branch.threadId];
+
+ /* Are we changing stream? Look to the Execute branches first, then
+ * to predicted changes of stream from Fetch2 */
+ if (execute_branch.isStreamChange()) {
+ if (thread.state == FetchHalted) {
+ DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
+ } else {
changeStream(execute_branch);
+ }
+
+ if (!fetch2_branch.isBubble()) {
+ DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
+ fetch2_branch);
+ }
+
+ /* The streamSeqNum tagging in request/response ->req should handle
+ * discarding those requests when we get to them. */
+ } else if (thread.state != FetchHalted && fetch2_branch.isStreamChange()) {
+ /* Handle branch predictions by changing the instruction source
+ * if we're still processing the same stream (as set by streamSeqNum)
+ * as the one of the prediction.
+ */
+ if (fetch2_branch.newStreamSeqNum != thread.streamSeqNum) {
+ DPRINTF(Fetch, "Not changing stream on prediction: %s,"
+ " streamSeqNum mismatch\n",
+ fetch2_branch);
} else {
- DPRINTF(Fetch, "Halted, ignoring branch: %s\n",
- execute_branch);
+ changeStream(fetch2_branch);
}
- } else {
- changeStream(execute_branch);
}
+ } else {
+ /* Fetch2 and Execute branches are for different threads */
+ if (execute_branch.threadId != InvalidThreadID &&
+ execute_branch.isStreamChange()) {
- if (!fetch2_branch.isBubble()) {
- DPRINTF(Fetch, "Ignoring simultaneous prediction: %s\n",
- fetch2_branch);
+ if (fetchInfo[execute_branch.threadId].state == FetchHalted) {
+ DPRINTF(Fetch, "Halted, ignoring branch: %s\n", execute_branch);
+ } else {
+ changeStream(execute_branch);
+ }
}
- /* The streamSeqNum tagging in request/response ->req should handle
- * discarding those requests when we get to them. */
- } else if (state != FetchHalted && fetch2_branch.isStreamChange()) {
- /* Handle branch predictions by changing the instruction source
- * if we're still processing the same stream (as set by streamSeqNum)
- * as the one of the prediction.
- */
- if (fetch2_branch.newStreamSeqNum != streamSeqNum) {
- DPRINTF(Fetch, "Not changing stream on prediction: %s,"
- " streamSeqNum mismatch\n",
- fetch2_branch);
- } else {
- changeStream(fetch2_branch);
+ if (fetch2_branch.threadId != InvalidThreadID &&
+ fetch2_branch.isStreamChange()) {
+
+ if (fetchInfo[fetch2_branch.threadId].state == FetchHalted) {
+ DPRINTF(Fetch, "Halted, ignoring branch: %s\n", fetch2_branch);
+ } else if (fetch2_branch.newStreamSeqNum != fetchInfo[fetch2_branch.threadId].streamSeqNum) {
+ DPRINTF(Fetch, "Not changing stream on prediction: %s,"
+ " streamSeqNum mismatch\n", fetch2_branch);
+ } else {
+ changeStream(fetch2_branch);
+ }
}
}
- /* Can we fetch? */
- /* The bare minimum requirements for initiating a fetch */
- /* THREAD need to handle multiple threads */
- if (state == FetchRunning && /* We are actually fetching */
- !blocked && /* Space in the Fetch2 inputBuffer */
- /* The thread we're going to fetch for (thread 0), is active */
- cpu.getContext(0)->status() == ThreadContext::Active &&
- numInFlightFetches() < fetchLimit)
- {
- fetchLine();
- /* Take up a slot in the fetch queue */
- nextStageReserve.reserve();
+ if (numInFlightFetches() < fetchLimit) {
+ ThreadID fetch_tid = getScheduledThread();
+
+ if (fetch_tid != InvalidThreadID) {
+ DPRINTF(Fetch, "Fetching from thread %d\n", fetch_tid);
+
+ /* Generate fetch to selected thread */
+ fetchLine(fetch_tid);
+ /* Take up a slot in the fetch queue */
+ nextStageReserve[fetch_tid].reserve();
+ } else {
+ DPRINTF(Fetch, "No active threads available to fetch from\n");
+ }
}
+
/* Halting shouldn't prevent fetches in flight from being processed */
/* Step fetches through the icachePort queues and memory system */
stepQueues();
@@ -599,9 +668,9 @@ Fetch1::evaluate()
Fetch1::FetchRequestPtr response = transfers.front();
if (response->isDiscardable()) {
- nextStageReserve.freeReservation();
+ nextStageReserve[response->id.threadId].freeReservation();
- DPRINTF(Fetch, "Discarding translated fetch at it's for"
+ DPRINTF(Fetch, "Discarding translated fetch as it's for"
" an old stream\n");
/* Wake up next cycle just in case there was some other
@@ -626,19 +695,49 @@ Fetch1::evaluate()
* generate a line output (tested just above) or to initiate a memory
* fetch which will signal activity when it returns/needs stepping
* between queues */
+
+
+ /* This looks hackish. And it is, but there doesn't seem to be a better
+ * way to do this. The signal from commit to suspend fetch takes 1
+ * clock cycle to propagate to fetch. However, a legitimate wakeup
+ * may occur between cycles from the memory system. Thus wakeup guard
+ * prevents us from suspending in that case. */
+
+ for (auto& thread : fetchInfo) {
+ thread.wakeupGuard = false;
+ }
+}
+
+void
+Fetch1::wakeupFetch(ThreadID tid)
+{
+ ThreadContext *thread_ctx = cpu.getContext(tid);
+ Fetch1ThreadInfo &thread = fetchInfo[tid];
+ thread.pc = thread_ctx->pcState();
+ thread.state = FetchRunning;
+ thread.wakeupGuard = true;
+ DPRINTF(Fetch, "[tid:%d]: Changing stream wakeup %s\n",
+ tid, thread_ctx->pcState());
+
+ cpu.wakeupOnEvent(Pipeline::Fetch1StageId);
}
bool
Fetch1::isDrained()
{
- DPRINTF(Drain, "isDrained %s %s%s\n",
- state,
- (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
- ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
-
- return state == FetchHalted &&
- numInFlightFetches() == 0 &&
- (*out.inputWire).isBubble();
+ bool drained = numInFlightFetches() == 0 && (*out.inputWire).isBubble();
+ for (ThreadID tid = 0; tid < cpu.numThreads; tid++) {
+ Fetch1ThreadInfo &thread = fetchInfo[tid];
+ DPRINTF(Drain, "isDrained[tid:%d]: %s %s%s\n",
+ tid,
+ thread.state == FetchHalted,
+ (numInFlightFetches() == 0 ? "" : "inFlightFetches "),
+ ((*out.inputWire).isBubble() ? "" : "outputtingLine"));
+
+ drained = drained && thread.state == FetchHalted;
+ }
+
+ return drained;
}
void
@@ -649,26 +748,32 @@ Fetch1::FetchRequest::reportData(std::ostream &os) const
bool Fetch1::FetchRequest::isDiscardable() const
{
+ Fetch1ThreadInfo &thread = fetch.fetchInfo[id.threadId];
+
/* Can't discard lines in TLB/memory */
return state != InTranslation && state != RequestIssuing &&
- (id.streamSeqNum != fetch.streamSeqNum ||
- id.predictionSeqNum != fetch.predictionSeqNum);
+ (id.streamSeqNum != thread.streamSeqNum ||
+ id.predictionSeqNum != thread.predictionSeqNum);
}
void
Fetch1::minorTrace() const
{
+ // TODO: Un-bork minorTrace for THREADS
+ // bork bork bork
+ const Fetch1ThreadInfo &thread = fetchInfo[0];
+
std::ostringstream data;
- if (blocked)
+ if (thread.blocked)
data << 'B';
else
(*out.inputWire).reportData(data);
MINORTRACE("state=%s icacheState=%s in_tlb_mem=%s/%s"
- " streamSeqNum=%d lines=%s\n", state, icacheState,
+ " streamSeqNum=%d lines=%s\n", thread.state, icacheState,
numFetchesInITLB, numFetchesInMemorySystem,
- streamSeqNum, data.str());
+ thread.streamSeqNum, data.str());
requests.minorTrace();
transfers.minorTrace();
}