summaryrefslogtreecommitdiff
path: root/src/mem/cache/base.hh
diff options
context:
space:
mode:
authorSteve Reinhardt <Steve.Reinhardt@amd.com>2008-07-16 11:10:33 -0700
committerSteve Reinhardt <Steve.Reinhardt@amd.com>2008-07-16 11:10:33 -0700
commit6629d9b2bc58a885bfebce1517fd12483497b6e4 (patch)
tree22e2bc30405ba483ac571951f49cc77d7e713477 /src/mem/cache/base.hh
parent05d8c9acb8a5a985956998fc13551288496e5cdc (diff)
downloadgem5-6629d9b2bc58a885bfebce1517fd12483497b6e4.tar.xz
mem: use single BadAddr responder per system.
Previously there was one per bus, which caused some coherence problems when more than one decided to respond. Now there is just one on the main memory bus. The default bus responder on all other buses is now the downstream cache's cpu_side port. Caches no longer need to do address range filtering; instead, we just have a simple flag to prevent snoops from propagating to the I/O bus.
Diffstat (limited to 'src/mem/cache/base.hh')
-rw-r--r--src/mem/cache/base.hh20
1 files changed, 13 insertions, 7 deletions
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index 4319717e5..d33c655d7 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -100,8 +100,7 @@ class BaseCache : public MemObject
protected:
CachePort(const std::string &_name, BaseCache *_cache,
- const std::string &_label,
- std::vector<Range<Addr> > filter_ranges);
+ const std::string &_label);
virtual void recvStatusChange(Status status);
@@ -129,9 +128,6 @@ class BaseCache : public MemObject
bool mustSendRetry;
- /** filter ranges */
- std::vector<Range<Addr> > filterRanges;
-
void requestBus(RequestCause cause, Tick time)
{
DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
@@ -194,8 +190,8 @@ class BaseCache : public MemObject
/** The number of targets for each MSHR. */
const int numTarget;
- /** Increasing order number assigned to each incoming request. */
- uint64_t order;
+ /** Do we forward snoops from mem side port through to cpu side port? */
+ bool forwardSnoops;
/**
* Bit vector of the blocking reasons for the access path.
@@ -203,6 +199,9 @@ class BaseCache : public MemObject
*/
uint8_t blocked;
+ /** Increasing order number assigned to each incoming request. */
+ uint64_t order;
+
/** Stores time the cache blocked for statistics. */
Tick blockedCycle;
@@ -215,6 +214,11 @@ class BaseCache : public MemObject
/** The drain event. */
Event *drainEvent;
+ /**
+ * The address range to which the cache responds on the CPU side.
+ * Normally this is all possible memory addresses. */
+ Range<Addr> addrRange;
+
public:
// Statistics
/**
@@ -377,6 +381,8 @@ class BaseCache : public MemObject
Addr blockAlign(Addr addr) const { return (addr & ~(blkSize - 1)); }
+ const Range<Addr> &getAddrRange() const { return addrRange; }
+
MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
{
assert(!pkt->req->isUncacheable());