summaryrefslogtreecommitdiff
path: root/src/mem/cache/mshr.cc
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2015-11-06 03:26:41 -0500
committerAndreas Hansson <andreas.hansson@arm.com>2015-11-06 03:26:41 -0500
commit654266f39cd67055d6176d22a46c7d678f6340c4 (patch)
tree250cf876eca7a4370ecc3a3e3fa6d9ba695f2830 /src/mem/cache/mshr.cc
parentf02a9338c1efaf7680f598a57ff6607e9b11120e (diff)
downloadgem5-654266f39cd67055d6176d22a46c7d678f6340c4.tar.xz
mem: Add cache clusivity
This patch adds a parameter to control the cache clusivity, that is if the cache is mostly inclusive or exclusive. At the moment there is no intention to support strict policies, and thus the options are: 1) mostly inclusive, or 2) mostly exclusive. The choice of policy guides the behaviuor on a cache fill, and a new helper function, allocOnFill, is created to encapsulate the decision making process. For the timing mode, the decision is annotated on the MSHR on sending out the downstream packet, and in atomic we directly pass the decision to handleFill. We (ab)use the tempBlock in cases where we are not allocating on fill, leaving the rest of the cache unaffected. Simple and effective. This patch also makes it more explicit that multiple caches are allowed to consider a block writable (this is the case also before this patch). That is, for a mostly inclusive cache, multiple caches upstream may also consider the block exclusive. The caches considering the block writable/exclusive all appear along the same path to memory, and from a coherency protocol point of view it works due to the fact that we always snoop upwards in zero time before querying any downstream cache. Note that this patch does not introduce clean writebacks. Thus, for clean lines we are essentially removing a cache level if it is made mostly exclusive. For example, lines from the read-only L1 instruction cache or table-walker cache are always clean, and simply get dropped rather than being passed to the L2. If the L2 is mostly exclusive and does not allocate on fill it will thus never hold the line. A follow on patch adds the clean writebacks. The patch changes the L2 of the O3_ARM_v7a CPU configuration to be mostly exclusive (and stats are affected accordingly).
Diffstat (limited to 'src/mem/cache/mshr.cc')
-rw-r--r--src/mem/cache/mshr.cc14
1 files changed, 11 insertions, 3 deletions
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index f71ff6524..b58c256cd 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -66,7 +66,8 @@ MSHR::MSHR() : readyTime(0), _isUncacheable(false), downstreamPending(false),
postInvalidate(false), postDowngrade(false),
queue(NULL), order(0), blkAddr(0),
blkSize(0), isSecure(false), inService(false),
- isForward(false), threadNum(InvalidThreadID), data(NULL)
+ isForward(false), allocOnFill(false),
+ threadNum(InvalidThreadID), data(NULL)
{
}
@@ -202,7 +203,7 @@ MSHR::TargetList::print(std::ostream &os, int verbosity,
void
MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
- Tick when_ready, Counter _order)
+ Tick when_ready, Counter _order, bool alloc_on_fill)
{
blkAddr = blk_addr;
blkSize = blk_size;
@@ -211,6 +212,7 @@ MSHR::allocate(Addr blk_addr, unsigned blk_size, PacketPtr target,
order = _order;
assert(target);
isForward = false;
+ allocOnFill = alloc_on_fill;
_isUncacheable = target->req->isUncacheable();
inService = false;
downstreamPending = false;
@@ -274,7 +276,8 @@ MSHR::deallocate()
* Adds a target to an MSHR
*/
void
-MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
+MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order,
+ bool alloc_on_fill)
{
// assume we'd never issue a prefetch when we've got an
// outstanding miss
@@ -285,6 +288,10 @@ MSHR::allocateTarget(PacketPtr pkt, Tick whenReady, Counter _order)
// have targets addded if originally allocated uncacheable
assert(!_isUncacheable);
+ // potentially re-evaluate whether we should allocate on a fill or
+ // not
+ allocOnFill = allocOnFill || alloc_on_fill;
+
// if there's a request already in service for this MSHR, we will
// have to defer the new target until after the response if any of
// the following are true:
@@ -478,6 +485,7 @@ MSHR::print(std::ostream &os, int verbosity, const std::string &prefix) const
prefix, blkAddr, blkAddr + blkSize - 1,
isSecure ? "s" : "ns",
isForward ? "Forward" : "",
+ allocOnFill ? "AllocOnFill" : "",
isForwardNoResponse() ? "ForwNoResp" : "",
needsExclusive() ? "Excl" : "",
_isUncacheable ? "Unc" : "",