summaryrefslogtreecommitdiff
path: root/src/mem/cache
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2012-02-24 11:52:49 -0500
committerAndreas Hansson <andreas.hansson@arm.com>2012-02-24 11:52:49 -0500
commit0cd0a8fdd3dc1e329673e2c034e67c2694a6908e (patch)
tree3c7031ad4313e3b982c7d2294aad72538908f2f2 /src/mem/cache
parent77878d0a87ee18709ca4d6459b8ae436cc101fa7 (diff)
downloadgem5-0cd0a8fdd3dc1e329673e2c034e67c2694a6908e.tar.xz
MEM: Simplify cache ports preparing for master/slave split
This patch splits the two cache ports into a master (memory-side) and slave (cpu-side) subclass of port with slightly different functionality. For example, it is only the CPU-side port that blocks incoming requests, and only the memory-side port that schedules send events outside of what the transmit list dictates. This patch simplifies the two classes by relying further on SimpleTimingPort and also generalises the latter to better accommodate the changes (introducing trySendTiming and scheduleSend). The memory-side cache port overrides sendDeferredPacket to be able to not only send responses from the transmit list, but also send requests based on the MSHRs. A follow on patch further simplifies the SimpleTimingPort and the cache ports.
Diffstat (limited to 'src/mem/cache')
-rw-r--r--src/mem/cache/base.cc76
-rw-r--r--src/mem/cache/base.hh96
-rw-r--r--src/mem/cache/cache.hh70
-rw-r--r--src/mem/cache/cache_impl.hh139
4 files changed, 187 insertions, 194 deletions
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index 27ff6961b..c7c213cc6 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -1,4 +1,16 @@
/*
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
@@ -42,13 +54,20 @@
using namespace std;
-BaseCache::CachePort::CachePort(const std::string &_name, BaseCache *_cache,
- const std::string &_label)
- : SimpleTimingPort(_name, _cache), cache(_cache),
- label(_label), blocked(false), mustSendRetry(false)
+BaseCache::CacheMasterPort::CacheMasterPort(const std::string &_name,
+ BaseCache *_cache,
+ const std::string &_label)
+ : SimpleTimingPort(_name, _cache, _label)
{
}
+BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
+ BaseCache *_cache,
+ const std::string &_label)
+ : SimpleTimingPort(_name, _cache, _label), blocked(false),
+ mustSendRetry(false), sendRetryEvent(this)
+{
+}
BaseCache::BaseCache(const Params *p)
: MemObject(p),
@@ -69,56 +88,25 @@ BaseCache::BaseCache(const Params *p)
{
}
-
-bool
-BaseCache::CachePort::checkFunctional(PacketPtr pkt)
-{
- pkt->pushLabel(label);
- bool done = SimpleTimingPort::checkFunctional(pkt);
- pkt->popLabel();
- return done;
-}
-
-
-unsigned
-BaseCache::CachePort::deviceBlockSize() const
-{
- return cache->getBlockSize();
-}
-
-
-bool
-BaseCache::CachePort::recvRetryCommon()
-{
- assert(waitingOnRetry);
- waitingOnRetry = false;
- return false;
-}
-
-
void
-BaseCache::CachePort::setBlocked()
+BaseCache::CacheSlavePort::setBlocked()
{
assert(!blocked);
- DPRINTF(Cache, "Cache Blocking\n");
+ DPRINTF(CachePort, "Cache port %s blocking new requests\n", name());
blocked = true;
- //Clear the retry flag
- mustSendRetry = false;
}
void
-BaseCache::CachePort::clearBlocked()
+BaseCache::CacheSlavePort::clearBlocked()
{
assert(blocked);
- DPRINTF(Cache, "Cache Unblocking\n");
+ DPRINTF(CachePort, "Cache port %s accepting new requests\n", name());
blocked = false;
- if (mustSendRetry)
- {
- DPRINTF(Cache, "Cache Sending Retry\n");
+ if (mustSendRetry) {
+ DPRINTF(CachePort, "Cache port %s sending retry\n", name());
mustSendRetry = false;
- SendRetryEvent *ev = new SendRetryEvent(this, true);
// @TODO: need to find a better time (next bus cycle?)
- cache->schedule(ev, curTick() + 1);
+ owner->schedule(sendRetryEvent, curTick() + 1);
}
}
@@ -126,8 +114,8 @@ BaseCache::CachePort::clearBlocked()
void
BaseCache::init()
{
- if (!cpuSidePort || !memSidePort)
- panic("Cache not hooked up on both sides\n");
+ if (!cpuSidePort->isConnected() || !memSidePort->isConnected())
+ panic("Cache %s not hooked up on both sides\n", name());
cpuSidePort->sendRangeChange();
}
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index cff8813cd..e522bc0c9 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -1,4 +1,16 @@
/*
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
@@ -97,50 +109,88 @@ class BaseCache : public MemObject
protected:
- class CachePort : public SimpleTimingPort
+ /**
+ * A cache master port is used for the memory-side port of the
+ * cache, and in addition to the basic timing port that only sends
+ * response packets through a transmit list, it also offers the
+ * ability to schedule and send request packets (requests &
+ * writebacks). The send event is scheduled through requestBus,
+ * and the sendDeferredPacket of the timing port is modified to
+ * consider both the transmit list and the requests from the MSHR.
+ */
+ class CacheMasterPort : public SimpleTimingPort
{
+
public:
- BaseCache *cache;
- protected:
- CachePort(const std::string &_name, BaseCache *_cache,
- const std::string &_label);
+ /**
+ * Schedule a send of a request packet (from the MSHR). Note
+ * that we could already have a retry or a transmit list of
+ * responses outstanding.
+ */
+ void requestBus(RequestCause cause, Tick time)
+ {
+ DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
+ schedSendEvent(time);
+ }
- virtual unsigned deviceBlockSize() const;
+ void respond(PacketPtr pkt, Tick time) {
+ schedSendTiming(pkt, time);
+ }
- bool recvRetryCommon();
+ protected:
- typedef EventWrapper<Port, &Port::sendRetry>
- SendRetryEvent;
+ CacheMasterPort(const std::string &_name, BaseCache *_cache,
+ const std::string &_label);
- const std::string label;
+ /**
+ * Memory-side port always snoops.
+ *
+ * return always true
+ */
+ virtual bool isSnooping() { return true; }
+ };
+
+ /**
+ * A cache slave port is used for the CPU-side port of the cache,
+ * and it is basically a simple timing port that uses a transmit
+ * list for responses to the CPU (or connected master). In
+ * addition, it has the functionality to block the port for
+ * incoming requests. If blocked, the port will issue a retry once
+ * unblocked.
+ */
+ class CacheSlavePort : public SimpleTimingPort
+ {
public:
+
+ /** Do not accept any new requests. */
void setBlocked();
+ /** Return to normal operation and accept new requests. */
void clearBlocked();
- bool checkFunctional(PacketPtr pkt);
+ void respond(PacketPtr pkt, Tick time) {
+ schedSendTiming(pkt, time);
+ }
+
+ protected:
+
+ CacheSlavePort(const std::string &_name, BaseCache *_cache,
+ const std::string &_label);
bool blocked;
bool mustSendRetry;
- void requestBus(RequestCause cause, Tick time)
- {
- DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
- if (!waitingOnRetry) {
- schedSendEvent(time);
- }
- }
+ private:
+
+ EventWrapper<Port, &Port::sendRetry> sendRetryEvent;
- void respond(PacketPtr pkt, Tick time) {
- schedSendTiming(pkt, time);
- }
};
- CachePort *cpuSidePort;
- CachePort *memSidePort;
+ CacheSlavePort *cpuSidePort;
+ CacheMasterPort *memSidePort;
protected:
diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh
index b2569648e..288395584 100644
--- a/src/mem/cache/cache.hh
+++ b/src/mem/cache/cache.hh
@@ -41,6 +41,7 @@
* Dave Greene
* Steve Reinhardt
* Ron Dreslinski
+ * Andreas Hansson
*/
/**
@@ -76,59 +77,68 @@ class Cache : public BaseCache
protected:
- class CpuSidePort : public CachePort
+ /**
+ * The CPU-side port extends the base cache slave port with access
+ * functions for functional, atomic and timing requests.
+ */
+ class CpuSidePort : public CacheSlavePort
{
- public:
- CpuSidePort(const std::string &_name,
- Cache<TagStore> *_cache,
- const std::string &_label);
+ private:
- // BaseCache::CachePort just has a BaseCache *; this function
- // lets us get back the type info we lost when we stored the
- // cache pointer there.
- Cache<TagStore> *myCache() {
- return static_cast<Cache<TagStore> *>(cache);
- }
+ // a pointer to our specific cache implementation
+ Cache<TagStore> *cache;
- virtual AddrRangeList getAddrRanges();
+ protected:
virtual bool recvTiming(PacketPtr pkt);
virtual Tick recvAtomic(PacketPtr pkt);
virtual void recvFunctional(PacketPtr pkt);
- };
- class MemSidePort : public CachePort
- {
+ virtual unsigned deviceBlockSize() const
+ { return cache->getBlockSize(); }
+
+ virtual AddrRangeList getAddrRanges();
+
public:
- MemSidePort(const std::string &_name,
- Cache<TagStore> *_cache,
+
+ CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
const std::string &_label);
- // BaseCache::CachePort just has a BaseCache *; this function
- // lets us get back the type info we lost when we stored the
- // cache pointer there.
- Cache<TagStore> *myCache() {
- return static_cast<Cache<TagStore> *>(cache);
- }
+ };
- void sendPacket();
+ /**
+ * The memory-side port extends the base cache master port with
+ * access functions for functional, atomic and timing snoops.
+ */
+ class MemSidePort : public CacheMasterPort
+ {
+ private:
- void processSendEvent();
+ // a pointer to our specific cache implementation
+ Cache<TagStore> *cache;
- virtual bool isSnooping();
+ protected:
virtual bool recvTiming(PacketPtr pkt);
- virtual void recvRetry();
-
virtual Tick recvAtomic(PacketPtr pkt);
virtual void recvFunctional(PacketPtr pkt);
- typedef EventWrapper<MemSidePort, &MemSidePort::processSendEvent>
- SendEvent;
+ virtual unsigned deviceBlockSize() const
+ { return cache->getBlockSize(); }
+
+ public:
+
+ MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
+ const std::string &_label);
+
+ /**
+ * Overload sendDeferredPacket of SimpleTimingPort.
+ */
+ virtual void sendDeferredPacket();
};
/** Tag and data Storage */
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index 87b688617..40359d31e 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -95,7 +95,7 @@ template<class TagStore>
Port *
Cache<TagStore>::getPort(const std::string &if_name, int idx)
{
- if (if_name == "" || if_name == "cpu_side") {
+ if (if_name == "cpu_side") {
return cpuSidePort;
} else if (if_name == "mem_side") {
return memSidePort;
@@ -1553,17 +1553,13 @@ Cache<TagStore>::nextMSHRReadyTime()
template<class TagStore>
AddrRangeList
-Cache<TagStore>::CpuSidePort::
-getAddrRanges()
+Cache<TagStore>::CpuSidePort::getAddrRanges()
{
- // CPU side port doesn't snoop; it's a target only. It can
- // potentially respond to any address.
AddrRangeList ranges;
- ranges.push_back(myCache()->getAddrRange());
+ ranges.push_back(cache->getAddrRange());
return ranges;
}
-
template<class TagStore>
bool
Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
@@ -1575,32 +1571,33 @@ Cache<TagStore>::CpuSidePort::recvTiming(PacketPtr pkt)
return false;
}
- myCache()->timingAccess(pkt);
+ cache->timingAccess(pkt);
return true;
}
-
template<class TagStore>
Tick
Cache<TagStore>::CpuSidePort::recvAtomic(PacketPtr pkt)
{
- return myCache()->atomicAccess(pkt);
+ assert(pkt->isRequest());
+ // atomic request
+ return cache->atomicAccess(pkt);
}
-
template<class TagStore>
void
Cache<TagStore>::CpuSidePort::recvFunctional(PacketPtr pkt)
{
- myCache()->functionalAccess(pkt, true);
+ assert(pkt->isRequest());
+ // functional request
+ cache->functionalAccess(pkt, true);
}
-
template<class TagStore>
Cache<TagStore>::
CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
const std::string &_label)
- : BaseCache::CachePort(_name, _cache, _label)
+ : BaseCache::CacheSlavePort(_name, _cache, _label), cache(_cache)
{
}
@@ -1612,17 +1609,6 @@ CpuSidePort::CpuSidePort(const std::string &_name, Cache<TagStore> *_cache,
template<class TagStore>
bool
-Cache<TagStore>::MemSidePort::isSnooping()
-{
- // Memory-side port always snoops, but never passes requests
- // through to targets on the cpu side (so we don't add anything to
- // the address range list).
- return true;
-}
-
-
-template<class TagStore>
-bool
Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
{
// this needs to be fixed so that the cache updates the mshr and sends the
@@ -1631,60 +1617,45 @@ Cache<TagStore>::MemSidePort::recvTiming(PacketPtr pkt)
if (pkt->wasNacked())
panic("Need to implement cache resending nacked packets!\n");
- if (pkt->isRequest() && blocked) {
- DPRINTF(Cache,"Scheduling a retry while blocked\n");
- mustSendRetry = true;
- return false;
- }
-
if (pkt->isResponse()) {
- myCache()->handleResponse(pkt);
+ cache->handleResponse(pkt);
} else {
- myCache()->snoopTiming(pkt);
+ cache->snoopTiming(pkt);
}
return true;
}
-
template<class TagStore>
Tick
Cache<TagStore>::MemSidePort::recvAtomic(PacketPtr pkt)
{
- // in atomic mode, responses go back to the sender via the
- // function return from sendAtomic(), not via a separate
- // sendAtomic() from the responder. Thus we should never see a
- // response packet in recvAtomic() (anywhere, not just here).
- assert(!pkt->isResponse());
- return myCache()->snoopAtomic(pkt);
+ assert(pkt->isRequest());
+ // atomic snoop
+ return cache->snoopAtomic(pkt);
}
-
template<class TagStore>
void
Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
{
- myCache()->functionalAccess(pkt, false);
+ assert(pkt->isRequest());
+ // functional snoop (note that in contrast to atomic we don't have
+ // a specific functionalSnoop method, as they have the same
+ // behaviour regardless)
+ cache->functionalAccess(pkt, false);
}
-
-
template<class TagStore>
void
-Cache<TagStore>::MemSidePort::sendPacket()
+Cache<TagStore>::MemSidePort::sendDeferredPacket()
{
- // if we have responses that are ready, they take precedence
+ // if we have a response packet waiting we have to start with that
if (deferredPacketReady()) {
- bool success = sendTiming(transmitList.front().pkt);
-
- if (success) {
- //send successful, remove packet
- transmitList.pop_front();
- }
-
- waitingOnRetry = !success;
+ // use the normal approach from the timing port
+ trySendTiming();
} else {
- // check for non-response packets (requests & writebacks)
- PacketPtr pkt = myCache()->getTimingPacket();
+ // check for request packets (requests & writebacks)
+ PacketPtr pkt = cache->getTimingPacket();
if (pkt == NULL) {
// can happen if e.g. we attempt a writeback and fail, but
// before the retry, the writeback is eliminated because
@@ -1693,65 +1664,39 @@ Cache<TagStore>::MemSidePort::sendPacket()
} else {
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
- bool success = sendTiming(pkt);
+ waitingOnRetry = !sendTiming(pkt);
- waitingOnRetry = !success;
if (waitingOnRetry) {
DPRINTF(CachePort, "now waiting on a retry\n");
if (!mshr->isForwardNoResponse()) {
+ // we are awaiting a retry, but we
+ // delete the packet and will be creating a new packet
+ // when we get the opportunity
delete pkt;
}
+ // note that we have now masked any requestBus and
+ // schedSendEvent (we will wait for a retry before
+ // doing anything), and this is so even if we do not
+ // care about this packet and might override it before
+ // it gets retried
} else {
- myCache()->markInService(mshr, pkt);
+ cache->markInService(mshr, pkt);
}
}
}
-
- // tried to send packet... if it was successful (no retry), see if
- // we need to rerequest bus or not
+ // if we succeeded and are not waiting for a retry, schedule the
+ // next send, not only looking at the response transmit list, but
+ // also considering when the next MSHR is ready
if (!waitingOnRetry) {
- Tick nextReady = std::min(deferredPacketReadyTime(),
- myCache()->nextMSHRReadyTime());
- // @TODO: need to facotr in prefetch requests here somehow
- if (nextReady != MaxTick) {
- DPRINTF(CachePort, "more packets to send @ %d\n", nextReady);
- cache->schedule(sendEvent, std::max(nextReady, curTick() + 1));
- } else {
- // no more to send right now: if we're draining, we may be done
- if (drainEvent && !sendEvent->scheduled()) {
- drainEvent->process();
- drainEvent = NULL;
- }
- }
+ scheduleSend(cache->nextMSHRReadyTime());
}
}
template<class TagStore>
-void
-Cache<TagStore>::MemSidePort::recvRetry()
-{
- assert(waitingOnRetry);
- sendPacket();
-}
-
-
-template<class TagStore>
-void
-Cache<TagStore>::MemSidePort::processSendEvent()
-{
- assert(!waitingOnRetry);
- sendPacket();
-}
-
-
-template<class TagStore>
Cache<TagStore>::
MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
const std::string &_label)
- : BaseCache::CachePort(_name, _cache, _label)
+ : BaseCache::CacheMasterPort(_name, _cache, _label), cache(_cache)
{
- // override default send event from SimpleTimingPort
- delete sendEvent;
- sendEvent = new SendEvent(this);
}