summaryrefslogtreecommitdiff
path: root/src/mem/cache/mshr.cc
diff options
context:
space:
mode:
authorNikos Nikoleris <nikos.nikoleris@arm.com>2016-12-05 16:48:19 -0500
committerNikos Nikoleris <nikos.nikoleris@arm.com>2016-12-05 16:48:19 -0500
commit0bd9dfb8dee9afae4f87b89435c11fa581a71983 (patch)
tree6339641038d51b40b7410e9c1ee862aef63bf11d /src/mem/cache/mshr.cc
parentd28c2906f4125ce8704ce9cefa471f1a5050eeae (diff)
downloadgem5-0bd9dfb8dee9afae4f87b89435c11fa581a71983.tar.xz
mem: Service only the 1st FromCPU MSHR target on ReadRespWithInv
A response to a ReadReq can either be a ReadResp or a ReadRespWithInvalidate. As we add targets to an MSHR for a ReadReq we assume that the response will be a ReadResp. When the response is invalidating (ReadRespWithInvalidate) servicing more than one targets can potentially violate the memory ordering. This change fixes the way we handle a ReadRespWithInvalidate. When a cache receives a ReadRespWithInvalidate we service only the first FromCPU target and all the FromSnoop targets from the MSHR target list. The rest of the FromCPU targets are deferred and serviced by a new request. Change-Id: I75c30c268851987ee5f8644acb46f440b4eeeec2 Reviewed-by: Andreas Hansson <andreas.hansson@arm.com> Reviewed-by: Stephan Diestelhorst <stephan.diestelhorst@arm.com>
Diffstat (limited to 'src/mem/cache/mshr.cc')
-rw-r--r--src/mem/cache/mshr.cc50
1 files changed, 44 insertions, 6 deletions
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index 86e77b186..e3ee44cc6 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -190,6 +190,7 @@ MSHR::TargetList::clearDownstreamPending()
if (mshr != nullptr) {
mshr->clearDownstreamPending();
}
+ t.markedPending = false;
}
}
}
@@ -455,17 +456,54 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
return true;
}
+MSHR::TargetList
+MSHR::extractServiceableTargets(PacketPtr pkt)
+{
+ TargetList ready_targets;
+ // If the downstream MSHR got an invalidation request then we only
+ // service the first of the FromCPU targets and any other
+ // non-FromCPU target. This way the remaining FromCPU targets
+ // issue a new request and get a fresh copy of the block and we
+ // avoid memory consistency violations.
+ if (pkt->cmd == MemCmd::ReadRespWithInvalidate) {
+ auto it = targets.begin();
+ assert(it->source == Target::FromCPU);
+ ready_targets.push_back(*it);
+ it = targets.erase(it);
+ while (it != targets.end()) {
+ if (it->source == Target::FromCPU) {
+ it++;
+ } else {
+ assert(it->source == Target::FromSnoop);
+ ready_targets.push_back(*it);
+ it = targets.erase(it);
+ }
+ }
+ ready_targets.populateFlags();
+ } else {
+ std::swap(ready_targets, targets);
+ }
+ targets.populateFlags();
+
+ return ready_targets;
+}
bool
MSHR::promoteDeferredTargets()
{
- assert(targets.empty());
- if (deferredTargets.empty()) {
- return false;
- }
+ if (targets.empty()) {
+ if (deferredTargets.empty()) {
+ return false;
+ }
- // swap targets & deferredTargets lists
- std::swap(targets, deferredTargets);
+ std::swap(targets, deferredTargets);
+ } else {
+ // If the targets list is not empty then we have one targets
+ // from the deferredTargets list to the targets list. A new
+ // request will then service the targets list.
+ targets.splice(targets.end(), deferredTargets);
+ targets.populateFlags();
+ }
// clear deferredTargets flags
deferredTargets.resetFlags();