summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorNikos Nikoleris <nikos.nikoleris@arm.com>2018-05-04 15:57:44 +0100
committerNikos Nikoleris <nikos.nikoleris@arm.com>2018-05-31 15:10:42 +0000
commit4976ff51d711ac999afffafe0fd701a2d8997999 (patch)
treefa028e699a451a7023e47e3505c062a591836d11 /src
parent18312bc263d7b3568044d0b361ce376077f432c9 (diff)
downloadgem5-4976ff51d711ac999afffafe0fd701a2d8997999.tar.xz
mem-cache: Refactor the recvAtomic function
The recvAtomic function in the cache handles atomic requests. Over time, recvAtomic has grown in complexity and code size. This change factors out some of its functionality in a separate functiona. The new functions handles atomic requests that miss. Change-Id: If77d2de1e3e802e1da37f889f68910e700c59209 Reviewed-on: https://gem5-review.googlesource.com/10425 Reviewed-by: Jason Lowe-Power <jason@lowepower.com> Reviewed-by: Daniel Carvalho <odanrc@yahoo.com.br> Maintainer: Nikos Nikoleris <nikos.nikoleris@arm.com>
Diffstat (limited to 'src')
-rw-r--r--src/mem/cache/cache.cc168
-rw-r--r--src/mem/cache/cache.hh16
2 files changed, 108 insertions, 76 deletions
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index 9b26675fb..5d7fcca84 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -1067,6 +1067,96 @@ Cache::createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
}
+Cycles
+Cache::handleAtomicReqMiss(PacketPtr pkt, CacheBlk *blk,
+ PacketList &writebacks)
+{
+ // deal with the packets that go through the write path of
+ // the cache, i.e. any evictions and writes
+ if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
+ (pkt->req->isUncacheable() && pkt->isWrite())) {
+ Cycles latency = ticksToCycles(memSidePort->sendAtomic(pkt));
+
+ // at this point, if the request was an uncacheable write
+ // request, it has been satisfied by a memory below and the
+ // packet carries the response back
+ assert(!(pkt->req->isUncacheable() && pkt->isWrite()) ||
+ pkt->isResponse());
+
+ return latency;
+ }
+
+ // only misses left
+
+ PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable());
+
+ bool is_forward = (bus_pkt == nullptr);
+
+ if (is_forward) {
+ // just forwarding the same request to the next level
+ // no local cache operation involved
+ bus_pkt = pkt;
+ }
+
+ DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
+ bus_pkt->print());
+
+#if TRACING_ON
+ CacheBlk::State old_state = blk ? blk->status : 0;
+#endif
+
+ Cycles latency = ticksToCycles(memSidePort->sendAtomic(bus_pkt));
+
+ bool is_invalidate = bus_pkt->isInvalidate();
+
+ // We are now dealing with the response handling
+ DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
+ bus_pkt->print(), old_state);
+
+ // If packet was a forward, the response (if any) is already
+ // in place in the bus_pkt == pkt structure, so we don't need
+ // to do anything. Otherwise, use the separate bus_pkt to
+ // generate response to pkt and then delete it.
+ if (!is_forward) {
+ if (pkt->needsResponse()) {
+ assert(bus_pkt->isResponse());
+ if (bus_pkt->isError()) {
+ pkt->makeAtomicResponse();
+ pkt->copyError(bus_pkt);
+ } else if (pkt->cmd == MemCmd::WriteLineReq) {
+ // note the use of pkt, not bus_pkt here.
+
+ // write-line request to the cache that promoted
+ // the write to a whole line
+ blk = handleFill(pkt, blk, writebacks,
+ allocOnFill(pkt->cmd));
+ assert(blk != NULL);
+ is_invalidate = false;
+ satisfyRequest(pkt, blk);
+ } else if (bus_pkt->isRead() ||
+ bus_pkt->cmd == MemCmd::UpgradeResp) {
+ // we're updating cache state to allow us to
+ // satisfy the upstream request from the cache
+ blk = handleFill(bus_pkt, blk, writebacks,
+ allocOnFill(pkt->cmd));
+ satisfyRequest(pkt, blk);
+ maintainClusivity(pkt->fromCache(), blk);
+ } else {
+ // we're satisfying the upstream request without
+ // modifying cache state, e.g., a write-through
+ pkt->makeAtomicResponse();
+ }
+ }
+ delete bus_pkt;
+ }
+
+ if (is_invalidate && blk && blk->isValid()) {
+ invalidateBlock(blk);
+ }
+
+ return latency;
+}
+
Tick
Cache::recvAtomic(PacketPtr pkt)
{
@@ -1118,84 +1208,10 @@ Cache::recvAtomic(PacketPtr pkt)
// handle writebacks resulting from the access here to ensure they
// logically proceed anything happening below
doWritebacksAtomic(writebacks);
+ assert(writebacks.empty());
if (!satisfied) {
- // MISS
-
- // deal with the packets that go through the write path of
- // the cache, i.e. any evictions and writes
- if (pkt->isEviction() || pkt->cmd == MemCmd::WriteClean ||
- (pkt->req->isUncacheable() && pkt->isWrite())) {
- lat += ticksToCycles(memSidePort->sendAtomic(pkt));
- return lat * clockPeriod();
- }
- // only misses left
-
- PacketPtr bus_pkt = createMissPacket(pkt, blk, pkt->needsWritable());
-
- bool is_forward = (bus_pkt == nullptr);
-
- if (is_forward) {
- // just forwarding the same request to the next level
- // no local cache operation involved
- bus_pkt = pkt;
- }
-
- DPRINTF(Cache, "%s: Sending an atomic %s\n", __func__,
- bus_pkt->print());
-
-#if TRACING_ON
- CacheBlk::State old_state = blk ? blk->status : 0;
-#endif
-
- lat += ticksToCycles(memSidePort->sendAtomic(bus_pkt));
-
- bool is_invalidate = bus_pkt->isInvalidate();
-
- // We are now dealing with the response handling
- DPRINTF(Cache, "%s: Receive response: %s in state %i\n", __func__,
- bus_pkt->print(), old_state);
-
- // If packet was a forward, the response (if any) is already
- // in place in the bus_pkt == pkt structure, so we don't need
- // to do anything. Otherwise, use the separate bus_pkt to
- // generate response to pkt and then delete it.
- if (!is_forward) {
- if (pkt->needsResponse()) {
- assert(bus_pkt->isResponse());
- if (bus_pkt->isError()) {
- pkt->makeAtomicResponse();
- pkt->copyError(bus_pkt);
- } else if (pkt->cmd == MemCmd::WriteLineReq) {
- // note the use of pkt, not bus_pkt here.
-
- // write-line request to the cache that promoted
- // the write to a whole line
- blk = handleFill(pkt, blk, writebacks,
- allocOnFill(pkt->cmd));
- assert(blk != NULL);
- is_invalidate = false;
- satisfyRequest(pkt, blk);
- } else if (bus_pkt->isRead() ||
- bus_pkt->cmd == MemCmd::UpgradeResp) {
- // we're updating cache state to allow us to
- // satisfy the upstream request from the cache
- blk = handleFill(bus_pkt, blk, writebacks,
- allocOnFill(pkt->cmd));
- satisfyRequest(pkt, blk);
- maintainClusivity(pkt->fromCache(), blk);
- } else {
- // we're satisfying the upstream request without
- // modifying cache state, e.g., a write-through
- pkt->makeAtomicResponse();
- }
- }
- delete bus_pkt;
- }
-
- if (is_invalidate && blk && blk->isValid()) {
- invalidateBlock(blk);
- }
+ lat += handleAtomicReqMiss(pkt, blk, writebacks);
}
// Note that we don't invoke the prefetcher at all in atomic mode.
diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh
index 0b04aadea..4a56f14f1 100644
--- a/src/mem/cache/cache.hh
+++ b/src/mem/cache/cache.hh
@@ -422,6 +422,22 @@ class Cache : public BaseCache
*/
void recvTimingSnoopResp(PacketPtr pkt);
+
+ /**
+ * Handle a request in atomic mode that missed in this cache
+ *
+ * Creates a downstream request, sends it to the memory below and
+ * handles the response. As we are in atomic mode all operations
+ * are performed immediately.
+ *
+ * @param pkt The packet with the requests
+ * @param blk The referenced block
+ * @parma writebacks A list with packets for any performed writebacks
+ * @return Cycles for handling the request
+ */
+ Cycles handleAtomicReqMiss(PacketPtr pkt, CacheBlk *blk,
+ PacketList &writebacks);
+
/**
* Performs the access specified by the request.
* @param pkt The request to perform.