summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorKorey Sewell <ksewell@umich.edu>2010-01-31 18:25:27 -0500
committerKorey Sewell <ksewell@umich.edu>2010-01-31 18:25:27 -0500
commita892af7b261e1c48b06ccbded5551e958c778414 (patch)
tree62f582c53677bb485f791b97ad1ed98ca90c2a10 /src/cpu
parent0e96798fe0a56936f8590dbd301f2b07a1850e22 (diff)
downloadgem5-a892af7b261e1c48b06ccbded5551e958c778414.tar.xz
inorder: dont allow early loads
- loads were happening on same cycle as the address was generated which is slightly unrealistic. Instead, force address generation to be on separate cycle from load initiation - also, mark the stages in a more traditional way (F-D-X-M-W)
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/inorder/pipeline_traits.cc49
-rw-r--r--src/cpu/inorder/pipeline_traits.hh3
2 files changed, 29 insertions, 23 deletions
diff --git a/src/cpu/inorder/pipeline_traits.cc b/src/cpu/inorder/pipeline_traits.cc
index ed72ab1d0..8ff26dce2 100644
--- a/src/cpu/inorder/pipeline_traits.cc
+++ b/src/cpu/inorder/pipeline_traits.cc
@@ -65,16 +65,18 @@ int getNextPriority(DynInstPtr &inst, int stage_num)
void createFrontEndSchedule(DynInstPtr &inst)
{
- InstStage *I = inst->addStage();
- InstStage *E = inst->addStage();
-
- I->needs(FetchSeq, FetchSeqUnit::AssignNextPC);
- I->needs(ICache, CacheUnit::InitiateFetch);
-
- E->needs(ICache, CacheUnit::CompleteFetch);
- E->needs(Decode, DecodeUnit::DecodeInst);
- E->needs(BPred, BranchPredictor::PredictBranch);
- E->needs(FetchSeq, FetchSeqUnit::UpdateTargetPC);
+ InstStage *F = inst->addStage();
+ InstStage *D = inst->addStage();
+
+ // FETCH
+ F->needs(FetchSeq, FetchSeqUnit::AssignNextPC);
+ F->needs(ICache, CacheUnit::InitiateFetch);
+
+ // DECODE
+ D->needs(ICache, CacheUnit::CompleteFetch);
+ D->needs(Decode, DecodeUnit::DecodeInst);
+ D->needs(BPred, BranchPredictor::PredictBranch);
+ D->needs(FetchSeq, FetchSeqUnit::UpdateTargetPC);
}
bool createBackEndSchedule(DynInstPtr &inst)
@@ -83,45 +85,48 @@ bool createBackEndSchedule(DynInstPtr &inst)
return false;
}
- InstStage *E = inst->currentStage();
+ InstStage *X = inst->addStage();
InstStage *M = inst->addStage();
- InstStage *A = inst->addStage();
InstStage *W = inst->addStage();
+ // EXECUTE
for (int idx=0; idx < inst->numSrcRegs(); idx++) {
if (!idx || !inst->isStore()) {
- E->needs(RegManager, UseDefUnit::ReadSrcReg, idx);
+ X->needs(RegManager, UseDefUnit::ReadSrcReg, idx);
}
}
-
if ( inst->isNonSpeculative() ) {
// skip execution of non speculative insts until later
} else if ( inst->isMemRef() ) {
if ( inst->isLoad() ) {
- E->needs(AGEN, AGENUnit::GenerateAddr);
- E->needs(DCache, CacheUnit::InitiateReadData);
+ X->needs(AGEN, AGENUnit::GenerateAddr);
}
} else if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
- E->needs(MDU, MultDivUnit::StartMultDiv);
+ X->needs(MDU, MultDivUnit::StartMultDiv);
} else {
- E->needs(ExecUnit, ExecutionUnit::ExecuteInst);
+ X->needs(ExecUnit, ExecutionUnit::ExecuteInst);
}
if (inst->opClass() == IntMultOp || inst->opClass() == IntDivOp) {
- M->needs(MDU, MultDivUnit::EndMultDiv);
+ X->needs(MDU, MultDivUnit::EndMultDiv);
}
+ // MEMORY
if ( inst->isLoad() ) {
- M->needs(DCache, CacheUnit::CompleteReadData);
+ M->needs(DCache, CacheUnit::InitiateReadData);
} else if ( inst->isStore() ) {
M->needs(RegManager, UseDefUnit::ReadSrcReg, 1);
M->needs(AGEN, AGENUnit::GenerateAddr);
M->needs(DCache, CacheUnit::InitiateWriteData);
}
- if ( inst->isStore() ) {
- A->needs(DCache, CacheUnit::CompleteWriteData);
+
+ // WRITEBACK
+ if ( inst->isLoad() ) {
+ W->needs(DCache, CacheUnit::CompleteReadData);
+ } else if ( inst->isStore() ) {
+ W->needs(DCache, CacheUnit::CompleteWriteData);
}
if ( inst->isNonSpeculative() ) {
diff --git a/src/cpu/inorder/pipeline_traits.hh b/src/cpu/inorder/pipeline_traits.hh
index 3c28894e7..ddc8a3ad7 100644
--- a/src/cpu/inorder/pipeline_traits.hh
+++ b/src/cpu/inorder/pipeline_traits.hh
@@ -113,7 +113,8 @@ namespace ThePipeline {
};
struct entryCompare {
- bool operator()(const ScheduleEntry* lhs, const ScheduleEntry* rhs) const
+ bool operator()(const ScheduleEntry* lhs, const ScheduleEntry* rhs)
+ const
{
// Prioritize first by stage number that the resource is needed
if (lhs->stageNum > rhs->stageNum) {