summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
authorSteve Reinhardt <steve.reinhardt@amd.com>2011-01-07 21:50:29 -0800
committerSteve Reinhardt <steve.reinhardt@amd.com>2011-01-07 21:50:29 -0800
commit6f1187943cf78c2fd0334bd7e4372ae79a587fa4 (patch)
tree8d0eac2e2f4d55d48245266d3930ad4e7f92030f /src/mem
parentc22be9f2f016872b05d65c82055ddc936b4aa075 (diff)
downloadgem5-6f1187943cf78c2fd0334bd7e4372ae79a587fa4.tar.xz
Replace curTick global variable with accessor functions.
This step makes it easy to replace the accessor functions (which still access a global variable) with ones that access per-thread curTick values.
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/bridge.cc10
-rw-r--r--src/mem/bus.cc24
-rw-r--r--src/mem/cache/base.cc2
-rw-r--r--src/mem/cache/base.hh4
-rw-r--r--src/mem/cache/blk.hh2
-rw-r--r--src/mem/cache/cache_impl.hh18
-rw-r--r--src/mem/cache/mshr.cc4
-rw-r--r--src/mem/cache/mshr.hh2
-rw-r--r--src/mem/cache/mshr_queue.hh2
-rw-r--r--src/mem/cache/tags/fa_lru.cc2
-rw-r--r--src/mem/cache/tags/iic.cc6
-rw-r--r--src/mem/cache/tags/lru.cc8
-rw-r--r--src/mem/dram.cc72
-rw-r--r--src/mem/mport.cc2
-rw-r--r--src/mem/packet.hh6
-rw-r--r--src/mem/request.hh8
-rw-r--r--src/mem/ruby/eventqueue/RubyEventQueue.hh2
-rw-r--r--src/mem/ruby/system/RubyPort.cc4
-rw-r--r--src/mem/ruby/system/Sequencer.cc4
-rw-r--r--src/mem/ruby/system/System.cc2
-rw-r--r--src/mem/tport.cc8
-rw-r--r--src/mem/tport.hh4
22 files changed, 98 insertions, 98 deletions
diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc
index 668b492e8..4b8325088 100644
--- a/src/mem/bridge.cc
+++ b/src/mem/bridge.cc
@@ -158,7 +158,7 @@ Bridge::BridgePort::nackRequest(PacketPtr pkt)
pkt->setNacked();
//put it on the list to send
- Tick readyTime = curTick + nackDelay;
+ Tick readyTime = curTick() + nackDelay;
PacketBuffer *buf = new PacketBuffer(pkt, readyTime, true);
// nothing on the list, add it and we're done
@@ -221,7 +221,7 @@ Bridge::BridgePort::queueForSendTiming(PacketPtr pkt)
- Tick readyTime = curTick + delay;
+ Tick readyTime = curTick() + delay;
PacketBuffer *buf = new PacketBuffer(pkt, readyTime);
// If we're about to put this packet at the head of the queue, we
@@ -241,7 +241,7 @@ Bridge::BridgePort::trySend()
PacketBuffer *buf = sendQueue.front();
- assert(buf->ready <= curTick);
+ assert(buf->ready <= curTick());
PacketPtr pkt = buf->pkt;
@@ -283,7 +283,7 @@ Bridge::BridgePort::trySend()
if (!sendQueue.empty()) {
buf = sendQueue.front();
DPRINTF(BusBridge, "Scheduling next send\n");
- schedule(sendEvent, std::max(buf->ready, curTick + 1));
+ schedule(sendEvent, std::max(buf->ready, curTick() + 1));
}
} else {
DPRINTF(BusBridge, " unsuccessful\n");
@@ -301,7 +301,7 @@ Bridge::BridgePort::recvRetry()
{
inRetry = false;
Tick nextReady = sendQueue.front()->ready;
- if (nextReady <= curTick)
+ if (nextReady <= curTick())
trySend();
else
schedule(sendEvent, nextReady);
diff --git a/src/mem/bus.cc b/src/mem/bus.cc
index 39399017c..c84d9fc5e 100644
--- a/src/mem/bus.cc
+++ b/src/mem/bus.cc
@@ -142,10 +142,10 @@ Bus::calcPacketTiming(PacketPtr pkt)
// a cycle boundary to take up only the following cycle. Anything
// that happens later will have to "wait" for the end of that
// cycle, and then start using the bus after that.
- if (tickNextIdle < curTick) {
- tickNextIdle = curTick;
+ if (tickNextIdle < curTick()) {
+ tickNextIdle = curTick();
if (tickNextIdle % clock != 0)
- tickNextIdle = curTick - (curTick % clock) + clock;
+ tickNextIdle = curTick() - (curTick() % clock) + clock;
}
Tick headerTime = tickNextIdle + headerCycles * clock;
@@ -181,7 +181,7 @@ void Bus::occupyBus(Tick until)
reschedule(busIdle, tickNextIdle, true);
DPRINTF(Bus, "The bus is now occupied from tick %d to %d\n",
- curTick, tickNextIdle);
+ curTick(), tickNextIdle);
}
/** Function called by the port when the bus is receiving a Timing
@@ -205,7 +205,7 @@ Bus::recvTiming(PacketPtr pkt)
// If the bus is busy, or other devices are in line ahead of the current
// one, put this device on the retry list.
if (!pkt->isExpressSnoop() &&
- (tickNextIdle > curTick ||
+ (tickNextIdle > curTick() ||
(retryList.size() && (!inRetry || src_port != retryList.front()))))
{
addToRetryList(src_port);
@@ -295,7 +295,7 @@ void
Bus::recvRetry(int id)
{
// If there's anything waiting, and the bus isn't busy...
- if (retryList.size() && curTick >= tickNextIdle) {
+ if (retryList.size() && curTick() >= tickNextIdle) {
//retryingPort = retryList.front();
inRetry = true;
DPRINTF(Bus, "Sending a retry to %s\n", retryList.front()->getPeer()->name());
@@ -308,7 +308,7 @@ Bus::recvRetry(int id)
inRetry = false;
//Bring tickNextIdle up to the present
- while (tickNextIdle < curTick)
+ while (tickNextIdle < curTick())
tickNextIdle += clock;
//Burn a cycle for the missed grant.
@@ -318,7 +318,7 @@ Bus::recvRetry(int id)
}
}
//If we weren't able to drain before, we might be able to now.
- if (drainEvent && retryList.size() == 0 && curTick >= tickNextIdle) {
+ if (drainEvent && retryList.size() == 0 && curTick() >= tickNextIdle) {
drainEvent->process();
// Clear the drain event once we're done with it.
drainEvent = NULL;
@@ -435,7 +435,7 @@ Bus::recvAtomic(PacketPtr pkt)
}
// why do we have this packet field and the return value both???
- pkt->finishTime = curTick + response_latency;
+ pkt->finishTime = curTick() + response_latency;
return response_latency;
}
@@ -649,7 +649,7 @@ Bus::drain(Event * de)
//We should check that we're not "doing" anything, and that noone is
//waiting. We might be idle but have someone waiting if the device we
//contacted for a retry didn't actually retry.
- if (retryList.size() || (curTick < tickNextIdle && busIdle.scheduled())) {
+ if (retryList.size() || (curTick() < tickNextIdle && busIdle.scheduled())) {
drainEvent = de;
return 1;
}
@@ -659,8 +659,8 @@ Bus::drain(Event * de)
void
Bus::startup()
{
- if (tickNextIdle < curTick)
- tickNextIdle = (curTick / clock) * clock + clock;
+ if (tickNextIdle < curTick())
+ tickNextIdle = (curTick() / clock) * clock + clock;
}
Bus *
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index 70bc51cda..9166e1a09 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -124,7 +124,7 @@ BaseCache::CachePort::clearBlocked()
mustSendRetry = false;
SendRetryEvent *ev = new SendRetryEvent(this, true);
// @TODO: need to find a better time (next bus cycle?)
- schedule(ev, curTick + 1);
+ schedule(ev, curTick() + 1);
}
}
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index 867d77121..e8a644296 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -434,7 +434,7 @@ class BaseCache : public MemObject
uint8_t flag = 1 << cause;
if (blocked == 0) {
blocked_causes[cause]++;
- blockedCycle = curTick;
+ blockedCycle = curTick();
cpuSidePort->setBlocked();
}
blocked |= flag;
@@ -454,7 +454,7 @@ class BaseCache : public MemObject
blocked &= ~flag;
DPRINTF(Cache,"Unblocking for cause %d, mask=%d\n", cause, blocked);
if (blocked == 0) {
- blocked_cycles[cause] += curTick - blockedCycle;
+ blocked_cycles[cause] += curTick() - blockedCycle;
cpuSidePort->clearBlocked();
}
}
diff --git a/src/mem/cache/blk.hh b/src/mem/cache/blk.hh
index bf78a2268..6be09597c 100644
--- a/src/mem/cache/blk.hh
+++ b/src/mem/cache/blk.hh
@@ -89,7 +89,7 @@ class CacheBlk
/** The current status of this block. @sa CacheBlockStatusBits */
State status;
- /** Which curTick will this block be accessable */
+ /** Which curTick() will this block be accessable */
Tick whenReady;
/**
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index c5b7ca065..e4e4a3c92 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -412,7 +412,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
// MemDebug::cacheAccess(pkt);
// we charge hitLatency for doing just about anything here
- Tick time = curTick + hitLatency;
+ Tick time = curTick() + hitLatency;
if (pkt->isResponse()) {
// must be cache-to-cache response from upper to lower level
@@ -504,7 +504,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
if (satisfied) {
if (needsResponse) {
pkt->makeTimingResponse();
- cpuSidePort->respond(pkt, curTick+lat);
+ cpuSidePort->respond(pkt, curTick()+lat);
} else {
delete pkt;
}
@@ -532,7 +532,7 @@ Cache<TagStore>::timingAccess(PacketPtr pkt)
noTargetMSHR = mshr;
setBlocked(Blocked_NoTargets);
// need to be careful with this... if this mshr isn't
- // ready yet (i.e. time > curTick_, we don't want to
+ // ready yet (i.e. time > curTick()_, we don't want to
// move it ahead of mshrs that are ready
// mshrQueue.moveToFront(mshr);
}
@@ -816,7 +816,7 @@ template<class TagStore>
void
Cache<TagStore>::handleResponse(PacketPtr pkt)
{
- Tick time = curTick + hitLatency;
+ Tick time = curTick() + hitLatency;
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
bool is_error = pkt->isError();
@@ -848,7 +848,7 @@ Cache<TagStore>::handleResponse(PacketPtr pkt)
MSHR::Target *initial_tgt = mshr->getTarget();
BlkType *blk = tags->findBlock(pkt->getAddr());
int stats_cmd_idx = initial_tgt->pkt->cmdToIndex();
- Tick miss_latency = curTick - initial_tgt->recvTime;
+ Tick miss_latency = curTick() - initial_tgt->recvTime;
PacketList writebacks;
if (pkt->req->isUncacheable()) {
@@ -1159,7 +1159,7 @@ doTimingSupplyResponse(PacketPtr req_pkt, uint8_t *blk_data,
// invalidate it.
pkt->cmd = MemCmd::ReadRespWithInvalidate;
}
- memSidePort->respond(pkt, curTick + hitLatency);
+ memSidePort->respond(pkt, curTick() + hitLatency);
}
template<class TagStore>
@@ -1430,7 +1430,7 @@ Cache<TagStore>::getNextMSHR()
// (hwpf_mshr_misses)
mshr_misses[pkt->cmdToIndex()][0/*pkt->req->threadId()*/]++;
// Don't request bus, since we already have it
- return allocateMissBuffer(pkt, curTick, false);
+ return allocateMissBuffer(pkt, curTick(), false);
}
}
}
@@ -1461,7 +1461,7 @@ Cache<TagStore>::getTimingPacket()
pkt = new Packet(tgt_pkt);
pkt->cmd = MemCmd::UpgradeFailResp;
pkt->senderState = mshr;
- pkt->firstWordTime = pkt->finishTime = curTick;
+ pkt->firstWordTime = pkt->finishTime = curTick();
handleResponse(pkt);
return NULL;
} else if (mshr->isForwardNoResponse()) {
@@ -1679,7 +1679,7 @@ Cache<TagStore>::MemSidePort::sendPacket()
// @TODO: need to facotr in prefetch requests here somehow
if (nextReady != MaxTick) {
DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
- schedule(sendEvent, std::max(nextReady, curTick + 1));
+ schedule(sendEvent, std::max(nextReady, curTick() + 1));
} else {
// no more to send right now: if we're draining, we may be done
if (drainEvent && !sendEvent->scheduled()) {
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index 54977346f..292c11c6b 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -333,7 +333,7 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
// Actual target device (typ. PhysicalMemory) will delete the
// packet on reception, so we need to save a copy here.
PacketPtr cp_pkt = new Packet(pkt, true);
- targets->add(cp_pkt, curTick, _order, Target::FromSnoop,
+ targets->add(cp_pkt, curTick(), _order, Target::FromSnoop,
downstreamPending && targets->needsExclusive);
++ntargets;
@@ -378,7 +378,7 @@ MSHR::promoteDeferredTargets()
deferredTargets->resetFlags();
order = targets->front().order;
- readyTime = std::max(curTick, targets->front().readyTime);
+ readyTime = std::max(curTick(), targets->front().readyTime);
return true;
}
diff --git a/src/mem/cache/mshr.hh b/src/mem/cache/mshr.hh
index 9b55e70ef..7920ad717 100644
--- a/src/mem/cache/mshr.hh
+++ b/src/mem/cache/mshr.hh
@@ -72,7 +72,7 @@ class MSHR : public Packet::SenderState, public Printable
Target(PacketPtr _pkt, Tick _readyTime, Counter _order,
Source _source, bool _markedPending)
- : recvTime(curTick), readyTime(_readyTime), order(_order),
+ : recvTime(curTick()), readyTime(_readyTime), order(_order),
pkt(_pkt), source(_source), markedPending(_markedPending)
{}
};
diff --git a/src/mem/cache/mshr_queue.hh b/src/mem/cache/mshr_queue.hh
index d8c495679..5a8739fc7 100644
--- a/src/mem/cache/mshr_queue.hh
+++ b/src/mem/cache/mshr_queue.hh
@@ -199,7 +199,7 @@ class MSHRQueue
*/
MSHR *getNextMSHR() const
{
- if (readyList.empty() || readyList.front()->readyTime > curTick) {
+ if (readyList.empty() || readyList.front()->readyTime > curTick()) {
return NULL;
}
return readyList.front();
diff --git a/src/mem/cache/tags/fa_lru.cc b/src/mem/cache/tags/fa_lru.cc
index 4d1c2175f..873883c1b 100644
--- a/src/mem/cache/tags/fa_lru.cc
+++ b/src/mem/cache/tags/fa_lru.cc
@@ -220,7 +220,7 @@ FALRU::findVictim(Addr addr, PacketList &writebacks)
blk->isTouched = true;
if (!warmedUp && tagsInUse.value() >= warmupBound) {
warmedUp = true;
- warmupCycle = curTick;
+ warmupCycle = curTick();
}
}
//assert(check());
diff --git a/src/mem/cache/tags/iic.cc b/src/mem/cache/tags/iic.cc
index 1315a17ee..743c6894f 100644
--- a/src/mem/cache/tags/iic.cc
+++ b/src/mem/cache/tags/iic.cc
@@ -257,8 +257,8 @@ IIC::accessBlock(Addr addr, int &lat, int context_src)
hitDepthTotal += sets[set].depth;
tag_ptr->status |= BlkReferenced;
lat = set_lat;
- if (tag_ptr->whenReady > curTick && tag_ptr->whenReady - curTick > set_lat) {
- lat = tag_ptr->whenReady - curTick;
+ if (tag_ptr->whenReady > curTick() && tag_ptr->whenReady - curTick() > set_lat) {
+ lat = tag_ptr->whenReady - curTick();
}
tag_ptr->refCount += 1;
@@ -437,7 +437,7 @@ IIC::getFreeTag(int set, PacketList & writebacks)
tagsInUse++;
if (!warmedUp && tagsInUse.value() >= warmupBound) {
warmedUp = true;
- warmupCycle = curTick;
+ warmupCycle = curTick();
}
return tag_ptr;
diff --git a/src/mem/cache/tags/lru.cc b/src/mem/cache/tags/lru.cc
index 8a8b0d0d6..25e98d293 100644
--- a/src/mem/cache/tags/lru.cc
+++ b/src/mem/cache/tags/lru.cc
@@ -126,9 +126,9 @@ LRU::accessBlock(Addr addr, int &lat, int context_src)
sets[set].moveToHead(blk);
DPRINTF(CacheRepl, "set %x: moving blk %x to MRU\n",
set, regenerateBlkAddr(tag, set));
- if (blk->whenReady > curTick
- && blk->whenReady - curTick > hitLatency) {
- lat = blk->whenReady - curTick;
+ if (blk->whenReady > curTick()
+ && blk->whenReady - curTick() > hitLatency) {
+ lat = blk->whenReady - curTick();
}
blk->refCount += 1;
}
@@ -180,7 +180,7 @@ LRU::insertBlock(Addr addr, BlkType *blk, int context_src)
blk->isTouched = true;
if (!warmedUp && tagsInUse.value() >= warmupBound) {
warmedUp = true;
- warmupCycle = curTick;
+ warmupCycle = curTick();
}
}
diff --git a/src/mem/dram.cc b/src/mem/dram.cc
index ff01ab1dc..6253f9828 100644
--- a/src/mem/dram.cc
+++ b/src/mem/dram.cc
@@ -414,14 +414,14 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
int SD_BEST_T_WRITE_READ_OBANK = (war_lat -1); /* WAR, row miss/hit, another bank */
int SD_BEST_T_WRITE_WRITE_OBANK = 0; /* WAW, row miss/hit, another bank */
- Tick time_since_last_access = curTick-time_last_access;
+ Tick time_since_last_access = curTick()-time_last_access;
Tick time_last_miss = 0; // used for keeping track of times between activations (page misses)
- //int was_idle = (curTick > busy_until);
+ //int was_idle = (curTick() > busy_until);
bool srow_flag = false;
int timing_correction = 0;
- int was_idle = (curTick > busy_until[current_bank]);
- cycles_nCKE[0] += was_idle ? MIN(curTick-busy_until[current_bank], time_since_last_access) : 0;
+ int was_idle = (curTick() > busy_until[current_bank]);
+ cycles_nCKE[0] += was_idle ? MIN(curTick()-busy_until[current_bank], time_since_last_access) : 0;
// bank is precharged
//active_row[current_bank] == DR_NUM_ROWS
@@ -441,7 +441,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if(all_precharged) {
if(was_idle) {
- cycles_all_precharge_nCKE[0] += MIN(curTick-busy_until[current_bank], time_since_last_access);
+ cycles_all_precharge_nCKE[0] += MIN(curTick()-busy_until[current_bank], time_since_last_access);
cycles_all_precharge_CKE[0] += MIN(0, busy_until[current_bank]-time_last_access);
}
else {
@@ -449,7 +449,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
}
} else { // some bank is active
if(was_idle) {
- cycles_bank_active_nCKE[0] += MIN(curTick-busy_until[current_bank], time_since_last_access);
+ cycles_bank_active_nCKE[0] += MIN(curTick()-busy_until[current_bank], time_since_last_access);
}
else {
}
@@ -462,7 +462,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
}
- time_last_access = curTick;
+ time_last_access = curTick();
////////////////////////////////////////////////////////////////////////////
if ((mem_type == "SDRAM") && (mem_actpolicy == "open"))
@@ -516,7 +516,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (memctrlpipe_enable == true)
{
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
}
else overlap = 0;
@@ -529,7 +529,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
corrected_overlap = (int) (overlap/cpu_ratio);
}
- /*fprintf(stderr,"%10.0f %10.0f %4d %4d ",(double)busy_until, (double)curTick, overlap, corrected_overlap); debugging*/
+ /*fprintf(stderr,"%10.0f %10.0f %4d %4d ",(double)busy_until, (double)curTick(), overlap, corrected_overlap); debugging*/
if (cmdIsRead == lastCmdIsRead)/*same command*/
{
@@ -889,25 +889,25 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
{
if (memctrlpipe_enable == true)
{
- busy_until[current_bank]=curTick+lat+
+ busy_until[current_bank]=curTick()+lat+
timing_correction;
}
else
{
- if (busy_until[current_bank] >= curTick)
+ if (busy_until[current_bank] >= curTick())
{
busy_until[current_bank]+=(lat+
timing_correction);
total_arb_latency += (busy_until[current_bank]
- - curTick - lat
+ - curTick() - lat
- timing_correction);
- lat=busy_until[current_bank] - curTick;
+ lat=busy_until[current_bank] - curTick();
}
- else busy_until[current_bank]=curTick+lat+
+ else busy_until[current_bank]=curTick()+lat+
timing_correction;
}
}
- else/*the memory request will be satisfied temp cycles after curTick*/
+ else/*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] +=(lat+
timing_correction);
@@ -1001,7 +1001,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (memctrlpipe_enable == true)
{
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
}
else overlap=0;
@@ -1014,7 +1014,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
corrected_overlap = (int) (overlap/cpu_ratio);
}
- /*fprintf(stderr,"%10.0f %10.0f %6d %6d %2d %2d ",(double)busy_until, (double)curTick, overlap, corrected_overlap,precharge,adjacent);debugging*/
+ /*fprintf(stderr,"%10.0f %10.0f %6d %6d %2d %2d ",(double)busy_until, (double)curTick(), overlap, corrected_overlap,precharge,adjacent);debugging*/
if (cmdIsRead == lastCmdIsRead)/*same command*/
{
@@ -2013,19 +2013,19 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
{
if (memctrlpipe_enable == true)
{
- busy_until[current_bank] =curTick+lat;
+ busy_until[current_bank] =curTick()+lat;
}
else
{
- if (busy_until[current_bank] >= curTick)
+ if (busy_until[current_bank] >= curTick())
{
busy_until[current_bank] +=lat;
- lat=busy_until[current_bank] - curTick;
+ lat=busy_until[current_bank] - curTick();
}
- else busy_until[current_bank] = curTick+lat;
+ else busy_until[current_bank] = curTick()+lat;
}
}
- else/*the memory request will be satisfied temp cycles after curTick*/
+ else/*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] +=lat;
command_overlapping++;
@@ -2073,7 +2073,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
}
total_access++;
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
if (current_bank == last_bank)/*same bank*/
{
@@ -2206,9 +2206,9 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (overlap <= 0) /*memory interface is not busy*/
{
- busy_until[current_bank] = curTick+lat;
+ busy_until[current_bank] = curTick()+lat;
}
- else /*the memory request will be satisfied temp cycles after curTick*/
+ else /*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] +=lat;
command_overlapping++;
@@ -2223,7 +2223,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
- /*fprintf(stderr,"%10.0f %10.0f %4d %4d \n",(double)busy_until, (double)curTick, overlap, lat);debug*/
+ /*fprintf(stderr,"%10.0f %10.0f %4d %4d \n",(double)busy_until, (double)curTick(), overlap, lat);debug*/
// if((_cpu_num < num_cpus) && (_cpu_num >= 0))
// cout <<"cpu id = " << _cpu_num << "current_bank = " << current_bank << endl;
// bank_access_profile[_cpu_num][current_bank]++;
@@ -2269,7 +2269,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
}
total_access++;
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
if (cpu_ratio < 1.0)
{
@@ -2432,16 +2432,16 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (overlap <= 0) /*memory interface is not busy*/
{
- busy_until[current_bank] = curTick+lat;
+ busy_until[current_bank] = curTick()+lat;
}
- else/*the memory request will be satisfied temp cycles after curTick*/
+ else/*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] +=lat;
command_overlapping++;
lat+=overlap;
}
- /*fprintf(stderr,"%10.0f %10.0f %4d %4d \n",(double)busy_until, (double)curTick, overlap, lat);*/
+ /*fprintf(stderr,"%10.0f %10.0f %4d %4d \n",(double)busy_until, (double)curTick(), overlap, lat);*/
if (cmdIsRead)
{
@@ -2494,7 +2494,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
total_access++;
lat += chunks;
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
lastCmdIsRead=cmdIsRead;
if (cpu_ratio < 1.0)
@@ -2509,9 +2509,9 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (overlap <= 0) /*memory interface is not busy*/
{
- busy_until[current_bank] = curTick+lat;
+ busy_until[current_bank] = curTick()+lat;
}
- else/*the memory request will be satisfied temp cycles after curTick*/
+ else/*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] +=lat;
command_overlapping++;
@@ -2543,7 +2543,7 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
lat = DR_T_RCD + DR_T_CWD + DR_T_PACKET; /* DR_T_RP + */
}
total_access++;
- overlap=(int)(busy_until[current_bank] - curTick);
+ overlap=(int)(busy_until[current_bank] - curTick());
lat += chunks * DR_T_PACKET; /*every 128 bit need DR_NUM_CYCLES*/
if (cpu_ratio < 1.0)
@@ -2560,9 +2560,9 @@ DRAMMemory::calculateLatency(PacketPtr pkt)
if (overlap <= 0) /*memory interface is not busy*/
{
- busy_until[current_bank] = curTick+lat;
+ busy_until[current_bank] = curTick()+lat;
}
- else/*the memory request will be satisfied temp cycles after curTick*/
+ else/*the memory request will be satisfied temp cycles after curTick()*/
{
busy_until[current_bank] += lat;
command_overlapping++;
diff --git a/src/mem/mport.cc b/src/mem/mport.cc
index 564c560c6..80393c81e 100644
--- a/src/mem/mport.cc
+++ b/src/mem/mport.cc
@@ -50,7 +50,7 @@ MessagePort::recvAtomic(PacketPtr pkt)
void
MessagePort::sendMessageTiming(PacketPtr pkt, Tick latency)
{
- schedSendTiming(pkt, curTick + latency);
+ schedSendTiming(pkt, curTick() + latency);
}
Tick
diff --git a/src/mem/packet.hh b/src/mem/packet.hh
index 41edef8a7..19fff7e3a 100644
--- a/src/mem/packet.hh
+++ b/src/mem/packet.hh
@@ -496,7 +496,7 @@ class Packet : public FastAlloc, public Printable
*/
Packet(Request *_req, MemCmd _cmd, NodeID _dest)
: flags(VALID_DST), cmd(_cmd), req(_req), data(NULL),
- dest(_dest), time(curTick), senderState(NULL)
+ dest(_dest), time(curTick()), senderState(NULL)
{
if (req->hasPaddr()) {
addr = req->getPaddr();
@@ -515,7 +515,7 @@ class Packet : public FastAlloc, public Printable
*/
Packet(Request *_req, MemCmd _cmd, NodeID _dest, int _blkSize)
: flags(VALID_DST), cmd(_cmd), req(_req), data(NULL),
- dest(_dest), time(curTick), senderState(NULL)
+ dest(_dest), time(curTick()), senderState(NULL)
{
if (req->hasPaddr()) {
addr = req->getPaddr() & ~(_blkSize - 1);
@@ -536,7 +536,7 @@ class Packet : public FastAlloc, public Printable
: cmd(pkt->cmd), req(pkt->req),
data(pkt->flags.isSet(STATIC_DATA) ? pkt->data : NULL),
addr(pkt->addr), size(pkt->size), src(pkt->src), dest(pkt->dest),
- time(curTick), senderState(pkt->senderState)
+ time(curTick()), senderState(pkt->senderState)
{
if (!clearFlags)
flags.set(pkt->flags & COPY_FLAGS);
diff --git a/src/mem/request.hh b/src/mem/request.hh
index 38daea266..ec1b8ba29 100644
--- a/src/mem/request.hh
+++ b/src/mem/request.hh
@@ -145,7 +145,7 @@ class Request : public FastAlloc
/**
* The time this request was started. Used to calculate
- * latencies. This field is set to curTick any time paddr or vaddr
+ * latencies. This field is set to curTick() any time paddr or vaddr
* is written.
*/
Tick _time;
@@ -179,7 +179,7 @@ class Request : public FastAlloc
/**
* Constructor for physical (e.g. device) requests. Initializes
- * just physical address, size, flags, and timestamp (to curTick).
+ * just physical address, size, flags, and timestamp (to curTick()).
* These fields are adequate to perform a request.
*/
Request(Addr paddr, int size, Flags flags)
@@ -240,7 +240,7 @@ class Request : public FastAlloc
void
setPhys(Addr paddr, int size, Flags flags)
{
- setPhys(paddr, size, flags, curTick);
+ setPhys(paddr, size, flags, curTick());
}
/**
@@ -255,7 +255,7 @@ class Request : public FastAlloc
_vaddr = vaddr;
_size = size;
_pc = pc;
- _time = curTick;
+ _time = curTick();
_flags.clear(~STICKY_FLAGS);
_flags.set(flags);
diff --git a/src/mem/ruby/eventqueue/RubyEventQueue.hh b/src/mem/ruby/eventqueue/RubyEventQueue.hh
index 6fa8b0ac3..3e2bc3f89 100644
--- a/src/mem/ruby/eventqueue/RubyEventQueue.hh
+++ b/src/mem/ruby/eventqueue/RubyEventQueue.hh
@@ -71,7 +71,7 @@ class RubyEventQueue : public EventManager
RubyEventQueue(EventQueue* eventq, Tick _clock);
~RubyEventQueue();
- Time getTime() const { return curTick/m_clock; }
+ Time getTime() const { return curTick()/m_clock; }
Tick getClock() const { return m_clock; }
void scheduleEvent(Consumer* consumer, Time timeDelta);
void scheduleEventAbsolute(Consumer* consumer, Time timeAbs);
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
index f707af36f..ea1ff04f0 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -318,7 +318,7 @@ bool
RubyPort::M5Port::sendTiming(PacketPtr pkt)
{
//minimum latency, must be > 0
- schedSendTiming(pkt, curTick + (1 * g_eventQueue_ptr->getClock()));
+ schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
return true;
}
@@ -326,7 +326,7 @@ bool
RubyPort::PioPort::sendTiming(PacketPtr pkt)
{
//minimum latency, must be > 0
- schedSendTiming(pkt, curTick + (1 * g_eventQueue_ptr->getClock()));
+ schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
return true;
}
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 1a0f8a66a..6357980f2 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -132,7 +132,7 @@ Sequencer::wakeup()
// If there are still outstanding requests, keep checking
schedule(deadlockCheckEvent,
m_deadlock_threshold * g_eventQueue_ptr->getClock() +
- curTick);
+ curTick());
}
}
@@ -223,7 +223,7 @@ Sequencer::insertRequest(SequencerRequest* request)
// See if we should schedule a deadlock check
if (deadlockCheckEvent.scheduled() == false) {
- schedule(deadlockCheckEvent, m_deadlock_threshold + curTick);
+ schedule(deadlockCheckEvent, m_deadlock_threshold + curTick());
}
Address line_addr(request->ruby_request.paddr);
diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc
index a6d0d87d6..a704724ac 100644
--- a/src/mem/ruby/system/System.cc
+++ b/src/mem/ruby/system/System.cc
@@ -159,7 +159,7 @@ RubySystem::unserialize(Checkpoint *cp, const string &section)
//
// The main purpose for clearing stats in the unserialize process is so
// that the profiler can correctly set its start time to the unserialized
- // value of curTick
+ // value of curTick()
//
clearStats();
}
diff --git a/src/mem/tport.cc b/src/mem/tport.cc
index 4e89544e3..61f9e143c 100644
--- a/src/mem/tport.cc
+++ b/src/mem/tport.cc
@@ -95,7 +95,7 @@ SimpleTimingPort::recvTiming(PacketPtr pkt)
// recvAtomic() should already have turned packet into
// atomic response
assert(pkt->isResponse());
- schedSendTiming(pkt, curTick + latency);
+ schedSendTiming(pkt, curTick() + latency);
} else {
delete pkt;
}
@@ -107,8 +107,8 @@ SimpleTimingPort::recvTiming(PacketPtr pkt)
void
SimpleTimingPort::schedSendTiming(PacketPtr pkt, Tick when)
{
- assert(when > curTick);
- assert(when < curTick + SimClock::Int::ms);
+ assert(when > curTick());
+ assert(when < curTick() + SimClock::Int::ms);
// Nothing is on the list: add it and schedule an event
if (transmitList.empty() || when < transmitList.front().tick) {
@@ -152,7 +152,7 @@ SimpleTimingPort::sendDeferredPacket()
if (success) {
if (!transmitList.empty() && !sendEvent->scheduled()) {
Tick time = transmitList.front().tick;
- schedule(sendEvent, time <= curTick ? curTick+1 : time);
+ schedule(sendEvent, time <= curTick() ? curTick()+1 : time);
}
if (transmitList.empty() && drainEvent && !sendEvent->scheduled()) {
diff --git a/src/mem/tport.hh b/src/mem/tport.hh
index 7dfe60b72..91a8ab9a5 100644
--- a/src/mem/tport.hh
+++ b/src/mem/tport.hh
@@ -100,7 +100,7 @@ class SimpleTimingPort : public Port
/** Check whether we have a packet ready to go on the transmit list. */
bool deferredPacketReady()
- { return !transmitList.empty() && transmitList.front().tick <= curTick; }
+ { return !transmitList.empty() && transmitList.front().tick <= curTick(); }
Tick deferredPacketReadyTime()
{ return transmitList.empty() ? MaxTick : transmitList.front().tick; }
@@ -129,7 +129,7 @@ class SimpleTimingPort : public Port
/** Attempt to send the packet at the head of the deferred packet
* list. Caller must guarantee that the deferred packet list is
- * non-empty and that the head packet is scheduled for curTick (or
+ * non-empty and that the head packet is scheduled for curTick() (or
* earlier).
*/
void sendDeferredPacket();