summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--configs/common/FSConfig.py1
-rw-r--r--configs/common/SysPaths.py2
-rw-r--r--configs/example/fs.py4
-rw-r--r--configs/example/se.py3
-rw-r--r--src/SConscript1
-rw-r--r--src/cpu/checker/thread_context.hh2
-rw-r--r--src/cpu/o3/commit_impl.hh12
-rw-r--r--src/cpu/o3/cpu.cc71
-rw-r--r--src/cpu/o3/cpu.hh19
-rw-r--r--src/cpu/o3/fetch_impl.hh5
-rw-r--r--src/cpu/o3/iew_impl.hh6
-rw-r--r--src/cpu/o3/lsq_unit.hh40
-rw-r--r--src/cpu/o3/lsq_unit_impl.hh24
-rwxr-xr-xsrc/cpu/o3/thread_context_impl.hh6
-rw-r--r--src/cpu/simple/timing.cc48
-rw-r--r--src/cpu/simple/timing.hh1
-rw-r--r--src/mem/physical.cc2
-rw-r--r--src/python/m5/objects/FUPool.py6
-rw-r--r--src/python/m5/objects/FuncUnitConfig.py41
-rw-r--r--src/python/m5/objects/O3CPU.py11
-rw-r--r--tests/configs/o3-timing-mp.py3
-rw-r--r--tests/configs/o3-timing.py3
22 files changed, 232 insertions, 79 deletions
diff --git a/configs/common/FSConfig.py b/configs/common/FSConfig.py
index 67a1e5735..470dc8867 100644
--- a/configs/common/FSConfig.py
+++ b/configs/common/FSConfig.py
@@ -30,7 +30,6 @@ import m5
from m5 import makeList
from m5.objects import *
from Benchmarks import *
-from FullO3Config import *
class CowIdeDisk(IdeDisk):
image = CowDiskImage(child=RawDiskImage(read_only=True),
diff --git a/configs/common/SysPaths.py b/configs/common/SysPaths.py
index 2070d11f8..5098c54ce 100644
--- a/configs/common/SysPaths.py
+++ b/configs/common/SysPaths.py
@@ -58,7 +58,7 @@ def system():
if not binary.dir:
binary.dir = joinpath(system.dir, 'binaries')
if not disk.dir:
- disk.dir = joinpath(system.dir, 'disks')
+ disk.dir = joinpath('/n/zamp/z/ktlim/local/clean/linux', 'disks')
if not script.dir:
script.dir = joinpath(system.dir, 'boot')
diff --git a/configs/example/fs.py b/configs/example/fs.py
index 0dadcbe1b..460fb68fb 100644
--- a/configs/example/fs.py
+++ b/configs/example/fs.py
@@ -65,8 +65,8 @@ if args:
sys.exit(1)
if options.detailed:
- cpu = DetailedO3CPU()
- cpu2 = DetailedO3CPU()
+ cpu = DerivO3CPU()
+ cpu2 = DerivO3CPU()
mem_mode = 'timing'
elif options.timing:
cpu = TimingSimpleCPU()
diff --git a/configs/example/se.py b/configs/example/se.py
index d1d19eebc..6a941b9da 100644
--- a/configs/example/se.py
+++ b/configs/example/se.py
@@ -34,7 +34,6 @@ import m5
from m5.objects import *
import os, optparse, sys
m5.AddToPath('../common')
-from FullO3Config import *
parser = optparse.OptionParser()
@@ -86,7 +85,7 @@ if options.detailed:
if options.timing:
cpu = TimingSimpleCPU()
elif options.detailed:
- cpu = DetailedO3CPU()
+ cpu = DerivO3CPU()
else:
cpu = AtomicSimpleCPU()
diff --git a/src/SConscript b/src/SConscript
index 9f88bbeea..e75a36ab5 100644
--- a/src/SConscript
+++ b/src/SConscript
@@ -285,6 +285,7 @@ turbolaser_sources = Split('''
# Syscall emulation (non-full-system) sources
syscall_emulation_sources = Split('''
+ cpu/memtest/memtest.cc
mem/translating_port.cc
mem/page_table.cc
sim/process.cc
diff --git a/src/cpu/checker/thread_context.hh b/src/cpu/checker/thread_context.hh
index 8c0186dae..b2806d40b 100644
--- a/src/cpu/checker/thread_context.hh
+++ b/src/cpu/checker/thread_context.hh
@@ -133,7 +133,7 @@ class CheckerThreadContext : public ThreadContext
void takeOverFrom(ThreadContext *oldContext)
{
actualTC->takeOverFrom(oldContext);
- checkerTC->takeOverFrom(oldContext);
+ checkerTC->copyState(oldContext);
}
void regStats(const std::string &name) { actualTC->regStats(name); }
diff --git a/src/cpu/o3/commit_impl.hh b/src/cpu/o3/commit_impl.hh
index c80e4d8c1..ecf6ed632 100644
--- a/src/cpu/o3/commit_impl.hh
+++ b/src/cpu/o3/commit_impl.hh
@@ -342,12 +342,6 @@ DefaultCommit<Impl>::drain()
{
drainPending = true;
- // If it's already drained, return true.
- if (rob->isEmpty() && !iewStage->hasStoresToWB()) {
- cpu->signalDrained();
- return true;
- }
-
return false;
}
@@ -1218,16 +1212,16 @@ DefaultCommit<Impl>::skidInsert()
for (int inst_num = 0; inst_num < fromRename->size; ++inst_num) {
DynInstPtr inst = fromRename->insts[inst_num];
- int tid = inst->threadNumber;
if (!inst->isSquashed()) {
DPRINTF(Commit, "Inserting PC %#x [sn:%i] [tid:%i] into ",
- "skidBuffer.\n", inst->readPC(), inst->seqNum, tid);
+ "skidBuffer.\n", inst->readPC(), inst->seqNum,
+ inst->threadNumber);
skidBuffer.push(inst);
} else {
DPRINTF(Commit, "Instruction PC %#x [sn:%i] [tid:%i] was "
"squashed, skipping.\n",
- inst->readPC(), inst->seqNum, tid);
+ inst->readPC(), inst->seqNum, inst->threadNumber);
}
}
}
diff --git a/src/cpu/o3/cpu.cc b/src/cpu/o3/cpu.cc
index 7386dfadd..4c9a8e91f 100644
--- a/src/cpu/o3/cpu.cc
+++ b/src/cpu/o3/cpu.cc
@@ -88,7 +88,7 @@ FullO3CPU<Impl>::TickEvent::description()
template <class Impl>
FullO3CPU<Impl>::ActivateThreadEvent::ActivateThreadEvent()
- : Event(&mainEventQueue, CPU_Tick_Pri)
+ : Event(&mainEventQueue, CPU_Switch_Pri)
{
}
@@ -135,7 +135,8 @@ void
FullO3CPU<Impl>::DeallocateContextEvent::process()
{
cpu->deactivateThread(tid);
- cpu->removeThread(tid);
+ if (remove)
+ cpu->removeThread(tid);
}
template <class Impl>
@@ -191,7 +192,11 @@ FullO3CPU<Impl>::FullO3CPU(Params *params)
deferRegistration(params->deferRegistration),
numThreads(number_of_threads)
{
- _status = Idle;
+ if (!deferRegistration) {
+ _status = Running;
+ } else {
+ _status = Idle;
+ }
checker = NULL;
@@ -304,6 +309,9 @@ FullO3CPU<Impl>::FullO3CPU(Params *params)
tid,
bindRegs);
+
+ activateThreadEvent[tid].init(tid, this);
+ deallocateContextEvent[tid].init(tid, this);
}
rename.setRenameMap(renameMap);
@@ -447,13 +455,16 @@ FullO3CPU<Impl>::tick()
if (!tickEvent.scheduled()) {
if (_status == SwitchedOut ||
getState() == SimObject::Drained) {
+ DPRINTF(O3CPU, "Switched out!\n");
// increment stat
lastRunningCycle = curTick;
- } else if (!activityRec.active()) {
+ } else if (!activityRec.active() || _status == Idle) {
+ DPRINTF(O3CPU, "Idle!\n");
lastRunningCycle = curTick;
timesIdled++;
} else {
tickEvent.schedule(curTick + cycles(1));
+ DPRINTF(O3CPU, "Scheduling next tick!\n");
}
}
@@ -512,6 +523,8 @@ FullO3CPU<Impl>::activateThread(unsigned tid)
list<unsigned>::iterator isActive = find(
activeThreads.begin(), activeThreads.end(), tid);
+ DPRINTF(O3CPU, "[tid:%i]: Calling activate thread.\n", tid);
+
if (isActive == activeThreads.end()) {
DPRINTF(O3CPU, "[tid:%i]: Adding to active threads list\n",
tid);
@@ -528,6 +541,8 @@ FullO3CPU<Impl>::deactivateThread(unsigned tid)
list<unsigned>::iterator thread_it =
find(activeThreads.begin(), activeThreads.end(), tid);
+ DPRINTF(O3CPU, "[tid:%i]: Calling deactivate thread.\n", tid);
+
if (thread_it != activeThreads.end()) {
DPRINTF(O3CPU,"[tid:%i]: Removing from active threads list\n",
tid);
@@ -548,7 +563,7 @@ FullO3CPU<Impl>::activateContext(int tid, int delay)
activateThread(tid);
}
- if(lastActivatedCycle < curTick) {
+ if (lastActivatedCycle < curTick) {
scheduleTickEvent(delay);
// Be sure to signal that there's some activity so the CPU doesn't
@@ -563,17 +578,20 @@ FullO3CPU<Impl>::activateContext(int tid, int delay)
}
template <class Impl>
-void
-FullO3CPU<Impl>::deallocateContext(int tid, int delay)
+bool
+FullO3CPU<Impl>::deallocateContext(int tid, bool remove, int delay)
{
// Schedule removal of thread data from CPU
if (delay){
DPRINTF(O3CPU, "[tid:%i]: Scheduling thread context to deallocate "
"on cycle %d\n", tid, curTick + cycles(delay));
- scheduleDeallocateContextEvent(tid, delay);
+ scheduleDeallocateContextEvent(tid, remove, delay);
+ return false;
} else {
deactivateThread(tid);
- removeThread(tid);
+ if (remove)
+ removeThread(tid);
+ return true;
}
}
@@ -582,8 +600,9 @@ void
FullO3CPU<Impl>::suspendContext(int tid)
{
DPRINTF(O3CPU,"[tid: %i]: Suspending Thread Context.\n", tid);
- deactivateThread(tid);
- if (activeThreads.size() == 0)
+ bool deallocated = deallocateContext(tid, false, 1);
+ // If this was the last thread then unschedule the tick event.
+ if ((activeThreads.size() == 1 && !deallocated) || activeThreads.size() == 0)
unscheduleTickEvent();
_status = Idle;
}
@@ -594,7 +613,7 @@ FullO3CPU<Impl>::haltContext(int tid)
{
//For now, this is the same as deallocate
DPRINTF(O3CPU,"[tid:%i]: Halt Context called. Deallocating", tid);
- deallocateContext(tid, 1);
+ deallocateContext(tid, true, 1);
}
template <class Impl>
@@ -682,10 +701,17 @@ FullO3CPU<Impl>::removeThread(unsigned tid)
assert(iew.ldstQueue.getCount(tid) == 0);
// Reset ROB/IQ/LSQ Entries
+
+ // Commented out for now. This should be possible to do by
+ // telling all the pipeline stages to drain first, and then
+ // checking until the drain completes. Once the pipeline is
+ // drained, call resetEntries(). - 10-09-06 ktlim
+/*
if (activeThreads.size() >= 1) {
commit.rob->resetEntries();
iew.resetEntries();
}
+*/
}
@@ -824,7 +850,9 @@ template <class Impl>
void
FullO3CPU<Impl>::resume()
{
+#if FULL_SYSTEM
assert(system->getMemoryMode() == System::Timing);
+#endif
fetch.resume();
decode.resume();
rename.resume();
@@ -935,6 +963,25 @@ FullO3CPU<Impl>::takeOverFrom(BaseCPU *oldCPU)
}
if (!tickEvent.scheduled())
tickEvent.schedule(curTick);
+
+ Port *peer;
+ Port *icachePort = fetch.getIcachePort();
+ if (icachePort->getPeer() == NULL) {
+ peer = oldCPU->getPort("icache_port")->getPeer();
+ icachePort->setPeer(peer);
+ } else {
+ peer = icachePort->getPeer();
+ }
+ peer->setPeer(icachePort);
+
+ Port *dcachePort = iew.getDcachePort();
+ if (dcachePort->getPeer() == NULL) {
+ peer = oldCPU->getPort("dcache_port")->getPeer();
+ dcachePort->setPeer(peer);
+ } else {
+ peer = dcachePort->getPeer();
+ }
+ peer->setPeer(dcachePort);
}
template <class Impl>
diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh
index dcdcd1fe6..fe510519c 100644
--- a/src/cpu/o3/cpu.hh
+++ b/src/cpu/o3/cpu.hh
@@ -202,9 +202,12 @@ class FullO3CPU : public BaseO3CPU
class DeallocateContextEvent : public Event
{
private:
- /** Number of Thread to Activate */
+ /** Number of Thread to deactivate */
int tid;
+ /** Should the thread be removed from the CPU? */
+ bool remove;
+
/** Pointer to the CPU. */
FullO3CPU<Impl> *cpu;
@@ -218,12 +221,15 @@ class FullO3CPU : public BaseO3CPU
/** Processes the event, calling activateThread() on the CPU. */
void process();
+ /** Sets whether the thread should also be removed from the CPU. */
+ void setRemove(bool _remove) { remove = _remove; }
+
/** Returns the description of the event. */
const char *description();
};
/** Schedule cpu to deallocate thread context.*/
- void scheduleDeallocateContextEvent(int tid, int delay)
+ void scheduleDeallocateContextEvent(int tid, bool remove, int delay)
{
// Schedule thread to activate, regardless of its current state.
if (deallocateContextEvent[tid].squashed())
@@ -296,9 +302,9 @@ class FullO3CPU : public BaseO3CPU
void suspendContext(int tid);
/** Remove Thread from Active Threads List &&
- * Remove Thread Context from CPU.
+ * Possibly Remove Thread Context from CPU.
*/
- void deallocateContext(int tid, int delay = 1);
+ bool deallocateContext(int tid, bool remove, int delay = 1);
/** Remove Thread from Active Threads List &&
* Remove Thread Context from CPU.
@@ -626,11 +632,6 @@ class FullO3CPU : public BaseO3CPU
/** Pointers to all of the threads in the CPU. */
std::vector<Thread *> thread;
- /** Pointer to the icache interface. */
- MemInterface *icacheInterface;
- /** Pointer to the dcache interface. */
- MemInterface *dcacheInterface;
-
/** Whether or not the CPU should defer its registration. */
bool deferRegistration;
diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh
index b3c3caaad..32210f1cd 100644
--- a/src/cpu/o3/fetch_impl.hh
+++ b/src/cpu/o3/fetch_impl.hh
@@ -623,6 +623,11 @@ DefaultFetch<Impl>::fetchCacheLine(Addr fetch_PC, Fault &ret_fault, unsigned tid
// Now do the timing access to see whether or not the instruction
// exists within the cache.
if (!icachePort->sendTiming(data_pkt)) {
+ if (data_pkt->result == Packet::BadAddress) {
+ fault = TheISA::genMachineCheckFault();
+ delete mem_req;
+ memReq[tid] = NULL;
+ }
assert(retryPkt == NULL);
assert(retryTid == -1);
DPRINTF(Fetch, "[tid:%i] Out of MSHRs!\n", tid);
diff --git a/src/cpu/o3/iew_impl.hh b/src/cpu/o3/iew_impl.hh
index b2baae296..ba5260fe2 100644
--- a/src/cpu/o3/iew_impl.hh
+++ b/src/cpu/o3/iew_impl.hh
@@ -600,6 +600,11 @@ template<class Impl>
void
DefaultIEW<Impl>::instToCommit(DynInstPtr &inst)
{
+ // This function should not be called after writebackInsts in a
+ // single cycle. That will cause problems with an instruction
+ // being added to the queue to commit without being processed by
+ // writebackInsts prior to being sent to commit.
+
// First check the time slot that this instruction will write
// to. If there are free write ports at the time, then go ahead
// and write the instruction to that time. If there are not,
@@ -1286,6 +1291,7 @@ DefaultIEW<Impl>::executeInsts()
} else if (fault != NoFault) {
// If the instruction faulted, then we need to send it along to commit
// without the instruction completing.
+ DPRINTF(IEW, "Store has fault! [sn:%lli]\n", inst->seqNum);
// Send this instruction to commit, also make sure iew stage
// realizes there is activity.
diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh
index 58945f04e..11a02e7c7 100644
--- a/src/cpu/o3/lsq_unit.hh
+++ b/src/cpu/o3/lsq_unit.hh
@@ -626,18 +626,30 @@ LSQUnit<Impl>::read(Request *req, T &data, int load_idx)
++usedPorts;
- PacketPtr data_pkt = new Packet(req, Packet::ReadReq, Packet::Broadcast);
- data_pkt->dataStatic(load_inst->memData);
-
- LSQSenderState *state = new LSQSenderState;
- state->isLoad = true;
- state->idx = load_idx;
- state->inst = load_inst;
- data_pkt->senderState = state;
-
// if we the cache is not blocked, do cache access
if (!lsq->cacheBlocked()) {
+ PacketPtr data_pkt =
+ new Packet(req, Packet::ReadReq, Packet::Broadcast);
+ data_pkt->dataStatic(load_inst->memData);
+
+ LSQSenderState *state = new LSQSenderState;
+ state->isLoad = true;
+ state->idx = load_idx;
+ state->inst = load_inst;
+ data_pkt->senderState = state;
+
if (!dcachePort->sendTiming(data_pkt)) {
+ Packet::Result result = data_pkt->result;
+
+ // Delete state and data packet because a load retry
+ // initiates a pipeline restart; it does not retry.
+ delete state;
+ delete data_pkt;
+
+ if (result == Packet::BadAddress) {
+ return TheISA::genMachineCheckFault();
+ }
+
// If the access didn't succeed, tell the LSQ by setting
// the retry thread id.
lsq->setRetryTid(lsqID);
@@ -664,16 +676,6 @@ LSQUnit<Impl>::read(Request *req, T &data, int load_idx)
return NoFault;
}
- if (data_pkt->result != Packet::Success) {
- DPRINTF(LSQUnit, "LSQUnit: D-cache miss!\n");
- DPRINTF(Activity, "Activity: ld accessing mem miss [sn:%lli]\n",
- load_inst->seqNum);
- } else {
- DPRINTF(LSQUnit, "LSQUnit: D-cache hit!\n");
- DPRINTF(Activity, "Activity: ld accessing mem hit [sn:%lli]\n",
- load_inst->seqNum);
- }
-
return NoFault;
}
diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh
index 63ffcece1..3f9db912f 100644
--- a/src/cpu/o3/lsq_unit_impl.hh
+++ b/src/cpu/o3/lsq_unit_impl.hh
@@ -608,9 +608,9 @@ LSQUnit<Impl>::writebackStores()
DPRINTF(LSQUnit, "D-Cache: Writing back store idx:%i PC:%#x "
"to Addr:%#x, data:%#x [sn:%lli]\n",
- storeWBIdx, storeQueue[storeWBIdx].inst->readPC(),
+ storeWBIdx, inst->readPC(),
req->getPaddr(), *(inst->memData),
- storeQueue[storeWBIdx].inst->seqNum);
+ inst->seqNum);
// @todo: Remove this SC hack once the memory system handles it.
if (req->isLocked()) {
@@ -619,10 +619,19 @@ LSQUnit<Impl>::writebackStores()
} else {
if (cpu->lockFlag) {
req->setScResult(1);
+ DPRINTF(LSQUnit, "Store conditional [sn:%lli] succeeded.",
+ inst->seqNum);
} else {
req->setScResult(0);
// Hack: Instantly complete this store.
- completeDataAccess(data_pkt);
+// completeDataAccess(data_pkt);
+ DPRINTF(LSQUnit, "Store conditional [sn:%lli] failed. "
+ "Instantly completing it.\n",
+ inst->seqNum);
+ WritebackEvent *wb = new WritebackEvent(inst, data_pkt, this);
+ wb->schedule(curTick + 1);
+ delete state;
+ completeStore(storeWBIdx);
incrStIdx(storeWBIdx);
continue;
}
@@ -633,7 +642,13 @@ LSQUnit<Impl>::writebackStores()
}
if (!dcachePort->sendTiming(data_pkt)) {
+ if (data_pkt->result == Packet::BadAddress) {
+ panic("LSQ sent out a bad address for a completed store!");
+ }
// Need to handle becoming blocked on a store.
+ DPRINTF(IEW, "D-Cache became blcoked when writing [sn:%lli], will"
+ "retry later\n",
+ inst->seqNum);
isStoreBlocked = true;
++lsqCacheBlocked;
assert(retryPkt == NULL);
@@ -880,6 +895,9 @@ LSQUnit<Impl>::recvRetry()
assert(retryPkt != NULL);
if (dcachePort->sendTiming(retryPkt)) {
+ if (retryPkt->result == Packet::BadAddress) {
+ panic("LSQ sent out a bad address for a completed store!");
+ }
storePostSend(retryPkt);
retryPkt = NULL;
isStoreBlocked = false;
diff --git a/src/cpu/o3/thread_context_impl.hh b/src/cpu/o3/thread_context_impl.hh
index 25e1db21c..2bc194d53 100755
--- a/src/cpu/o3/thread_context_impl.hh
+++ b/src/cpu/o3/thread_context_impl.hh
@@ -165,14 +165,14 @@ template <class Impl>
void
O3ThreadContext<Impl>::deallocate(int delay)
{
- DPRINTF(O3CPU, "Calling deallocate on Thread Context %d\n",
- getThreadNum());
+ DPRINTF(O3CPU, "Calling deallocate on Thread Context %d delay %d\n",
+ getThreadNum(), delay);
if (thread->status() == ThreadContext::Unallocated)
return;
thread->setStatus(ThreadContext::Unallocated);
- cpu->deallocateContext(thread->readTid(), delay);
+ cpu->deallocateContext(thread->readTid(), true, delay);
}
template <class Impl>
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
index 48362c42a..33f673cbc 100644
--- a/src/cpu/simple/timing.cc
+++ b/src/cpu/simple/timing.cc
@@ -103,6 +103,7 @@ TimingSimpleCPU::TimingSimpleCPU(Params *p)
ifetch_pkt = dcache_pkt = NULL;
drainEvent = NULL;
fetchEvent = NULL;
+ previousTick = 0;
changeState(SimObject::Running);
}
@@ -161,6 +162,7 @@ TimingSimpleCPU::resume()
assert(system->getMemoryMode() == System::Timing);
changeState(SimObject::Running);
+ previousTick = curTick;
}
void
@@ -168,6 +170,7 @@ TimingSimpleCPU::switchOut()
{
assert(status() == Running || status() == Idle);
_status = SwitchedOut;
+ numCycles += curTick - previousTick;
// If we've been scheduled to resume but are then told to switch out,
// we'll need to cancel it.
@@ -190,6 +193,27 @@ TimingSimpleCPU::takeOverFrom(BaseCPU *oldCPU)
break;
}
}
+
+ if (_status != Running) {
+ _status = Idle;
+ }
+
+ Port *peer;
+ if (icachePort.getPeer() == NULL) {
+ peer = oldCPU->getPort("icache_port")->getPeer();
+ icachePort.setPeer(peer);
+ } else {
+ peer = icachePort.getPeer();
+ }
+ peer->setPeer(&icachePort);
+
+ if (dcachePort.getPeer() == NULL) {
+ peer = oldCPU->getPort("dcache_port")->getPeer();
+ dcachePort.setPeer(peer);
+ } else {
+ peer = dcachePort.getPeer();
+ }
+ peer->setPeer(&dcachePort);
}
@@ -424,6 +448,9 @@ TimingSimpleCPU::fetch()
// fetch fault: advance directly to next instruction (fault handler)
advanceInst(fault);
}
+
+ numCycles += curTick - previousTick;
+ previousTick = curTick;
}
@@ -454,6 +481,9 @@ TimingSimpleCPU::completeIfetch(Packet *pkt)
delete pkt->req;
delete pkt;
+ numCycles += curTick - previousTick;
+ previousTick = curTick;
+
if (getState() == SimObject::Draining) {
completeDrain();
return;
@@ -533,14 +563,8 @@ TimingSimpleCPU::completeDataAccess(Packet *pkt)
assert(_status == DcacheWaitResponse);
_status = Running;
- if (getState() == SimObject::Draining) {
- completeDrain();
-
- delete pkt->req;
- delete pkt;
-
- return;
- }
+ numCycles += curTick - previousTick;
+ previousTick = curTick;
Fault fault = curStaticInst->completeAcc(pkt, this, traceData);
@@ -552,6 +576,14 @@ TimingSimpleCPU::completeDataAccess(Packet *pkt)
delete pkt;
postExecute();
+
+ if (getState() == SimObject::Draining) {
+ advancePC(fault);
+ completeDrain();
+
+ return;
+ }
+
advanceInst(fault);
}
diff --git a/src/cpu/simple/timing.hh b/src/cpu/simple/timing.hh
index 18e13aeb2..988ddeded 100644
--- a/src/cpu/simple/timing.hh
+++ b/src/cpu/simple/timing.hh
@@ -167,6 +167,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
Packet *dcache_pkt;
int cpu_id;
+ Tick previousTick;
public:
diff --git a/src/mem/physical.cc b/src/mem/physical.cc
index 96d78bd99..7303f278e 100644
--- a/src/mem/physical.cc
+++ b/src/mem/physical.cc
@@ -195,7 +195,7 @@ PhysicalMemory::checkLockedAddrList(Request *req)
void
PhysicalMemory::doFunctionalAccess(Packet *pkt)
{
- assert(pkt->getAddr() + pkt->getSize() < params()->addrRange.size());
+ assert(pkt->getAddr() + pkt->getSize() <= params()->addrRange.size());
if (pkt->isRead()) {
if (pkt->req->isLocked()) {
diff --git a/src/python/m5/objects/FUPool.py b/src/python/m5/objects/FUPool.py
index 4b4be79a6..916183bd7 100644
--- a/src/python/m5/objects/FUPool.py
+++ b/src/python/m5/objects/FUPool.py
@@ -1,6 +1,12 @@
from m5.SimObject import SimObject
from m5.params import *
+from FuncUnit import *
+from FuncUnitConfig import *
class FUPool(SimObject):
type = 'FUPool'
FUList = VectorParam.FUDesc("list of FU's for this pool")
+
+class DefaultFUPool(FUPool):
+ FUList = [ IntALU(), IntMultDiv(), FP_ALU(), FP_MultDiv(), ReadPort(),
+ WritePort(), RdWrPort(), IprPort() ]
diff --git a/src/python/m5/objects/FuncUnitConfig.py b/src/python/m5/objects/FuncUnitConfig.py
new file mode 100644
index 000000000..43d7a4bb7
--- /dev/null
+++ b/src/python/m5/objects/FuncUnitConfig.py
@@ -0,0 +1,41 @@
+from m5.SimObject import SimObject
+from m5.params import *
+from FuncUnit import *
+
+class IntALU(FUDesc):
+ opList = [ OpDesc(opClass='IntAlu') ]
+ count = 6
+
+class IntMultDiv(FUDesc):
+ opList = [ OpDesc(opClass='IntMult', opLat=3),
+ OpDesc(opClass='IntDiv', opLat=20, issueLat=19) ]
+ count=2
+
+class FP_ALU(FUDesc):
+ opList = [ OpDesc(opClass='FloatAdd', opLat=2),
+ OpDesc(opClass='FloatCmp', opLat=2),
+ OpDesc(opClass='FloatCvt', opLat=2) ]
+ count = 4
+
+class FP_MultDiv(FUDesc):
+ opList = [ OpDesc(opClass='FloatMult', opLat=4),
+ OpDesc(opClass='FloatDiv', opLat=12, issueLat=12),
+ OpDesc(opClass='FloatSqrt', opLat=24, issueLat=24) ]
+ count = 2
+
+class ReadPort(FUDesc):
+ opList = [ OpDesc(opClass='MemRead') ]
+ count = 0
+
+class WritePort(FUDesc):
+ opList = [ OpDesc(opClass='MemWrite') ]
+ count = 0
+
+class RdWrPort(FUDesc):
+ opList = [ OpDesc(opClass='MemRead'), OpDesc(opClass='MemWrite') ]
+ count = 4
+
+class IprPort(FUDesc):
+ opList = [ OpDesc(opClass='IprAccess', opLat = 3, issueLat = 3) ]
+ count = 1
+
diff --git a/src/python/m5/objects/O3CPU.py b/src/python/m5/objects/O3CPU.py
index 59b40c6e8..20eef383f 100644
--- a/src/python/m5/objects/O3CPU.py
+++ b/src/python/m5/objects/O3CPU.py
@@ -3,6 +3,7 @@ from m5.proxy import *
from m5 import build_env
from BaseCPU import BaseCPU
from Checker import O3Checker
+from FUPool import *
class DerivO3CPU(BaseCPU):
type = 'DerivO3CPU'
@@ -14,11 +15,13 @@ class DerivO3CPU(BaseCPU):
if build_env['USE_CHECKER']:
if not build_env['FULL_SYSTEM']:
checker = Param.BaseCPU(O3Checker(workload=Parent.workload,
- exitOnError=True,
+ exitOnError=False,
+ updateOnError=True,
warnOnlyOnLoadError=False),
"checker")
else:
- checker = Param.BaseCPU(O3Checker(exitOnError=True, warnOnlyOnLoadError=False), "checker")
+ checker = Param.BaseCPU(O3Checker(exitOnError=False, updateOnError=True,
+ warnOnlyOnLoadError=False), "checker")
checker.itb = Parent.itb
checker.dtb = Parent.dtb
@@ -57,7 +60,7 @@ class DerivO3CPU(BaseCPU):
issueWidth = Param.Unsigned(8, "Issue width")
wbWidth = Param.Unsigned(8, "Writeback width")
wbDepth = Param.Unsigned(1, "Writeback depth")
- fuPool = Param.FUPool("Functional Unit pool")
+ fuPool = Param.FUPool(DefaultFUPool(), "Functional Unit pool")
iewToCommitDelay = Param.Unsigned(1, "Issue/Execute/Writeback to commit "
"delay")
@@ -77,7 +80,7 @@ class DerivO3CPU(BaseCPU):
localHistoryBits = Param.Unsigned(11, "Bits for the local history")
globalPredictorSize = Param.Unsigned(8192, "Size of global predictor")
globalCtrBits = Param.Unsigned(2, "Bits per counter")
- globalHistoryBits = Param.Unsigned(4096, "Bits of history")
+ globalHistoryBits = Param.Unsigned(13, "Bits of history")
choicePredictorSize = Param.Unsigned(8192, "Size of choice predictor")
choiceCtrBits = Param.Unsigned(2, "Bits of choice counters")
diff --git a/tests/configs/o3-timing-mp.py b/tests/configs/o3-timing-mp.py
index 55af8be0d..68631b3d2 100644
--- a/tests/configs/o3-timing-mp.py
+++ b/tests/configs/o3-timing-mp.py
@@ -29,7 +29,6 @@
import m5
from m5.objects import *
m5.AddToPath('../configs/common')
-from FullO3Config import *
# --------------------
# Base L1 Cache
@@ -54,7 +53,7 @@ class L2(BaseCache):
write_buffers = 8
nb_cores = 4
-cpus = [ DetailedO3CPU(cpu_id=i) for i in xrange(nb_cores) ]
+cpus = [ DerivO3CPU(cpu_id=i) for i in xrange(nb_cores) ]
# system simulated
system = System(cpu = cpus, physmem = PhysicalMemory(), membus =
diff --git a/tests/configs/o3-timing.py b/tests/configs/o3-timing.py
index 227e1ba21..0dd7be506 100644
--- a/tests/configs/o3-timing.py
+++ b/tests/configs/o3-timing.py
@@ -29,7 +29,6 @@
import m5
from m5.objects import *
m5.AddToPath('../configs/common')
-from FullO3Config import *
class MyCache(BaseCache):
assoc = 2
@@ -38,7 +37,7 @@ class MyCache(BaseCache):
mshrs = 10
tgts_per_mshr = 5
-cpu = DetailedO3CPU()
+cpu = DerivO3CPU()
cpu.addTwoLevelCacheHierarchy(MyCache(size = '128kB'), MyCache(size = '256kB'),
MyCache(size = '2MB'))
cpu.mem = cpu.dcache