summaryrefslogtreecommitdiff
path: root/src/cpu/o3
diff options
context:
space:
mode:
authorNilay Vaish <nilay@cs.wisc.edu>2015-04-29 22:35:22 -0500
committerNilay Vaish <nilay@cs.wisc.edu>2015-04-29 22:35:22 -0500
commit43335495754abac71377bbd6df0c668b60b22822 (patch)
tree62ca271baac3fafb041bf24acaaeef14f6ab8e97 /src/cpu/o3
parent0dbd696aaef47205c1430b53841423c7d25455ed (diff)
downloadgem5-43335495754abac71377bbd6df0c668b60b22822.tar.xz
cpu: o3: replace issueLatency with bool pipelined
Currently, each op class has a parameter issueLat that denotes the cycles after which another op of the same class can be issued. As of now, this latency can either be one cycle (fully pipelined) or same as execution latency of the op (not at all pipelined). The fact that issueLat is a parameter of type Cycles makes one believe that it can be set to any value. To avoid the confusion, the parameter is being renamed as 'pipelined' with type boolean. If set to true, the op would execute in a fully pipelined fashion. Otherwise, it would execute in an unpipelined fashion.
Diffstat (limited to 'src/cpu/o3')
-rw-r--r--src/cpu/o3/FuncUnitConfig.py9
-rw-r--r--src/cpu/o3/fu_pool.cc8
-rw-r--r--src/cpu/o3/fu_pool.hh8
-rw-r--r--src/cpu/o3/inst_queue_impl.hh5
4 files changed, 14 insertions, 16 deletions
diff --git a/src/cpu/o3/FuncUnitConfig.py b/src/cpu/o3/FuncUnitConfig.py
index 0f5efb776..b8be400b5 100644
--- a/src/cpu/o3/FuncUnitConfig.py
+++ b/src/cpu/o3/FuncUnitConfig.py
@@ -49,7 +49,7 @@ class IntALU(FUDesc):
class IntMultDiv(FUDesc):
opList = [ OpDesc(opClass='IntMult', opLat=3),
- OpDesc(opClass='IntDiv', opLat=20, issueLat=19) ]
+ OpDesc(opClass='IntDiv', opLat=20, pipelined=False) ]
# DIV and IDIV instructions in x86 are implemented using a loop which
# issues division microops. The latency of these microops should really be
@@ -57,7 +57,6 @@ class IntMultDiv(FUDesc):
# of the quotient.
if buildEnv['TARGET_ISA'] in ('x86'):
opList[1].opLat=1
- opList[1].issueLat=1
count=2
@@ -69,8 +68,8 @@ class FP_ALU(FUDesc):
class FP_MultDiv(FUDesc):
opList = [ OpDesc(opClass='FloatMult', opLat=4),
- OpDesc(opClass='FloatDiv', opLat=12, issueLat=12),
- OpDesc(opClass='FloatSqrt', opLat=24, issueLat=24) ]
+ OpDesc(opClass='FloatDiv', opLat=12, pipelined=False),
+ OpDesc(opClass='FloatSqrt', opLat=24, pipelined=False) ]
count = 2
class SIMD_Unit(FUDesc):
@@ -109,6 +108,6 @@ class RdWrPort(FUDesc):
count = 4
class IprPort(FUDesc):
- opList = [ OpDesc(opClass='IprAccess', opLat = 3, issueLat = 3) ]
+ opList = [ OpDesc(opClass='IprAccess', opLat = 3, pipelined = False) ]
count = 1
diff --git a/src/cpu/o3/fu_pool.cc b/src/cpu/o3/fu_pool.cc
index 016b171bc..dab7dbed2 100644
--- a/src/cpu/o3/fu_pool.cc
+++ b/src/cpu/o3/fu_pool.cc
@@ -89,7 +89,7 @@ FUPool::FUPool(const Params *p)
for (int i = 0; i < Num_OpClasses; ++i) {
maxOpLatencies[i] = Cycles(0);
- maxIssueLatencies[i] = Cycles(0);
+ pipelined[i] = true;
}
//
@@ -123,13 +123,13 @@ FUPool::FUPool(const Params *p)
fuPerCapList[(*j)->opClass].addFU(numFU + k);
// indicate that this FU has the capability
- fu->addCapability((*j)->opClass, (*j)->opLat, (*j)->issueLat);
+ fu->addCapability((*j)->opClass, (*j)->opLat, (*j)->pipelined);
if ((*j)->opLat > maxOpLatencies[(*j)->opClass])
maxOpLatencies[(*j)->opClass] = (*j)->opLat;
- if ((*j)->issueLat > maxIssueLatencies[(*j)->opClass])
- maxIssueLatencies[(*j)->opClass] = (*j)->issueLat;
+ if (!(*j)->pipelined)
+ pipelined[(*j)->opClass] = false;
}
numFU++;
diff --git a/src/cpu/o3/fu_pool.hh b/src/cpu/o3/fu_pool.hh
index 2e1b71dad..8b501fc81 100644
--- a/src/cpu/o3/fu_pool.hh
+++ b/src/cpu/o3/fu_pool.hh
@@ -72,8 +72,8 @@ class FUPool : public SimObject
private:
/** Maximum op execution latencies, per op class. */
Cycles maxOpLatencies[Num_OpClasses];
- /** Maximum issue latencies, per op class. */
- Cycles maxIssueLatencies[Num_OpClasses];
+ /** Whether op is pipelined or not. */
+ bool pipelined[Num_OpClasses];
/** Bitvector listing capabilities of this FU pool. */
std::bitset<Num_OpClasses> capabilityList;
@@ -160,8 +160,8 @@ class FUPool : public SimObject
}
/** Returns the issue latency of the given capability. */
- Cycles getIssueLatency(OpClass capability) {
- return maxIssueLatencies[capability];
+ bool isPipelined(OpClass capability) {
+ return pipelined[capability];
}
/** Have all the FUs drained? */
diff --git a/src/cpu/o3/inst_queue_impl.hh b/src/cpu/o3/inst_queue_impl.hh
index fa621ffbf..7d359b992 100644
--- a/src/cpu/o3/inst_queue_impl.hh
+++ b/src/cpu/o3/inst_queue_impl.hh
@@ -825,7 +825,7 @@ InstructionQueue<Impl>::scheduleReadyInsts()
if (idx >= 0)
fuPool->freeUnitNextCycle(idx);
} else {
- Cycles issue_latency = fuPool->getIssueLatency(op_class);
+ bool pipelined = fuPool->isPipelined(op_class);
// Generate completion event for the FU
++wbOutstanding;
FUCompletion *execution = new FUCompletion(issuing_inst,
@@ -834,8 +834,7 @@ InstructionQueue<Impl>::scheduleReadyInsts()
cpu->schedule(execution,
cpu->clockEdge(Cycles(op_latency - 1)));
- // @todo: Enforce that issue_latency == 1 or op_latency
- if (issue_latency > Cycles(1)) {
+ if (!pipelined) {
// If FU isn't pipelined, then it must be freed
// upon the execution completing.
execution->setFreeFU();