summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mem/cache/base.hh60
-rw-r--r--src/mem/cache/cache_impl.hh50
-rw-r--r--src/mem/cache/prefetch/queued.cc3
3 files changed, 37 insertions, 76 deletions
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index 041b1f6a5..c3bf6fe87 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -97,16 +97,6 @@ class BaseCache : public MemObject
NUM_BLOCKED_CAUSES
};
- /**
- * Reasons for cache to request a bus.
- */
- enum RequestCause {
- Request_MSHR = MSHRQueue_MSHRs,
- Request_WB = MSHRQueue_WriteBuffer,
- Request_PF,
- NUM_REQUEST_CAUSES
- };
-
protected:
/**
@@ -114,7 +104,7 @@ class BaseCache : public MemObject
* cache, and in addition to the basic timing port that only sends
* response packets through a transmit list, it also offers the
* ability to schedule and send request packets (requests &
- * writebacks). The send event is scheduled through requestBus,
+ * writebacks). The send event is scheduled through schedSendEvent,
* and the sendDeferredPacket of the timing port is modified to
* consider both the transmit list and the requests from the MSHR.
*/
@@ -127,10 +117,9 @@ class BaseCache : public MemObject
* Schedule a send of a request packet (from the MSHR). Note
* that we could already have a retry outstanding.
*/
- void requestBus(RequestCause cause, Tick time)
+ void schedSendEvent(Tick time)
{
- DPRINTF(CachePort, "Scheduling request at %llu due to %d\n",
- time, cause);
+ DPRINTF(CachePort, "Scheduling send event at %llu\n", time);
reqQueue.schedSendEvent(time);
}
@@ -213,7 +202,8 @@ class BaseCache : public MemObject
* - MSHR allocateMissBuffer (miss in MSHR queue);
*/
MSHR *allocateBufferInternal(MSHRQueue *mq, Addr addr, int size,
- PacketPtr pkt, Tick time, bool requestBus)
+ PacketPtr pkt, Tick time,
+ bool sched_send)
{
// check that the address is block aligned since we rely on
// this in a number of places when checking for matches and
@@ -226,9 +216,9 @@ class BaseCache : public MemObject
setBlocked((BlockedCause)mq->index);
}
- if (requestBus) {
- requestMemSideBus((RequestCause)mq->index, time);
- }
+ if (sched_send)
+ // schedule the send
+ schedMemSideSendEvent(time);
return mshr;
}
@@ -510,21 +500,21 @@ class BaseCache : public MemObject
const AddrRangeList &getAddrRanges() const { return addrRanges; }
- MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool requestBus)
+ MSHR *allocateMissBuffer(PacketPtr pkt, Tick time, bool sched_send = true)
{
return allocateBufferInternal(&mshrQueue,
blockAlign(pkt->getAddr()), blkSize,
- pkt, time, requestBus);
+ pkt, time, sched_send);
}
- MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time, bool requestBus)
+ MSHR *allocateWriteBuffer(PacketPtr pkt, Tick time)
{
// should only see clean evictions in a read-only cache
assert(!isReadOnly || pkt->cmd == MemCmd::CleanEvict);
assert(pkt->isWrite() && !pkt->isRead());
return allocateBufferInternal(&writeBuffer,
blockAlign(pkt->getAddr()), blkSize,
- pkt, time, requestBus);
+ pkt, time, true);
}
/**
@@ -571,26 +561,16 @@ class BaseCache : public MemObject
}
/**
- * Request the master bus for the given cause and time.
- * @param cause The reason for the request.
- * @param time The time to make the request.
- */
- void requestMemSideBus(RequestCause cause, Tick time)
- {
- memSidePort->requestBus(cause, time);
- }
-
- /**
- * Clear the master bus request for the given cause.
- * @param cause The request reason to clear.
+ * Schedule a send event for the memory-side port. If already
+ * scheduled, this may reschedule the event at an earlier
+ * time. When the specified time is reached, the port is free to
+ * send either a response, a request, or a prefetch request.
+ *
+ * @param time The time when to attempt sending a packet.
*/
- void deassertMemSideBusRequest(RequestCause cause)
+ void schedMemSideSendEvent(Tick time)
{
- // Obsolete... we no longer signal bus requests explicitly so
- // we can't deassert them. Leaving this in as a no-op since
- // the prefetcher calls it to indicate that it no longer wants
- // to request a prefetch, and someday that might be
- // interesting again.
+ memSidePort->schedSendEvent(time);
}
virtual bool inCache(Addr addr, bool is_secure) const = 0;
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index 4c602478f..8089d122c 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -263,16 +263,6 @@ void
Cache::markInService(MSHR *mshr, bool pending_dirty_resp)
{
markInServiceInternal(mshr, pending_dirty_resp);
-#if 0
- if (mshr->originalCmd == MemCmd::HardPFReq) {
- DPRINTF(HWPrefetch, "Marking a HW_PF in service\n");
- //Also clear pending if need be
- if (!prefetcher->havePending())
- {
- deassertMemSideBusRequest(Request_PF);
- }
- }
-#endif
}
@@ -476,14 +466,14 @@ Cache::doWritebacks(PacketList& writebacks, Tick forward_time)
// the Writeback does not reset the bit corresponding to this
// address in the snoop filter below.
wbPkt->setBlockCached();
- allocateWriteBuffer(wbPkt, forward_time, true);
+ allocateWriteBuffer(wbPkt, forward_time);
}
} else {
// If the block is not cached above, send packet below. Both
// CleanEvict and Writeback with BLOCK_CACHED flag cleared will
// reset the bit corresponding to this address in the snoop filter
// below.
- allocateWriteBuffer(wbPkt, forward_time, true);
+ allocateWriteBuffer(wbPkt, forward_time);
}
writebacks.pop_front();
}
@@ -808,11 +798,8 @@ Cache::recvTimingReq(PacketPtr pkt)
if (pkt->evictingBlock() ||
(pkt->req->isUncacheable() && pkt->isWrite())) {
// We use forward_time here because there is an
- // uncached memory write, forwarded to WriteBuffer. It
- // specifies the latency to allocate an internal buffer and to
- // schedule an event to the queued port and also takes into
- // account the additional delay of the xbar.
- allocateWriteBuffer(pkt, forward_time, true);
+ // uncached memory write, forwarded to WriteBuffer.
+ allocateWriteBuffer(pkt, forward_time);
} else {
if (blk && blk->isValid()) {
// should have flushed and have no valid block
@@ -839,12 +826,8 @@ Cache::recvTimingReq(PacketPtr pkt)
}
// Here we are using forward_time, modelling the latency of
// a miss (outbound) just as forwardLatency, neglecting the
- // lookupLatency component. In this case this latency value
- // specifies the latency to allocate an internal buffer and to
- // schedule an event to the queued port, when a cacheable miss
- // is forwarded to MSHR queue.
- // We take also into account the additional delay of the xbar.
- allocateMissBuffer(pkt, forward_time, true);
+ // lookupLatency component.
+ allocateMissBuffer(pkt, forward_time);
}
if (prefetcher) {
@@ -854,10 +837,9 @@ Cache::recvTimingReq(PacketPtr pkt)
}
}
}
- // Here we condiser just forward_time.
+
if (next_pf_time != MaxTick)
- requestMemSideBus(Request_PF, std::max(clockEdge(forwardLatency),
- next_pf_time));
+ schedMemSideSendEvent(next_pf_time);
return true;
}
@@ -1405,8 +1387,7 @@ Cache::recvTimingResp(PacketPtr pkt)
}
mq = mshr->queue;
mq->markPending(mshr);
- requestMemSideBus((RequestCause)mq->index, clockEdge() +
- pkt->payloadDelay);
+ schedMemSideSendEvent(clockEdge() + pkt->payloadDelay);
} else {
mq->deallocate(mshr);
if (wasFull && !mq->isFull()) {
@@ -1417,9 +1398,9 @@ Cache::recvTimingResp(PacketPtr pkt)
// MSHRs for a prefetch to take place
if (prefetcher && mq == &mshrQueue && mshrQueue.canPrefetch()) {
Tick next_pf_time = std::max(prefetcher->nextPrefetchReadyTime(),
- curTick());
+ clockEdge());
if (next_pf_time != MaxTick)
- requestMemSideBus(Request_PF, next_pf_time);
+ schedMemSideSendEvent(next_pf_time);
}
}
// reset the xbar additional timinig as it is now accounted for
@@ -1436,7 +1417,7 @@ Cache::recvTimingResp(PacketPtr pkt)
// queued port.
if (blk->isDirty()) {
PacketPtr wbPkt = writebackBlk(blk);
- allocateWriteBuffer(wbPkt, forward_time, true);
+ allocateWriteBuffer(wbPkt, forward_time);
// Set BLOCK_CACHED flag if cached above.
if (isCachedAbove(wbPkt))
wbPkt->setBlockCached();
@@ -1447,7 +1428,7 @@ Cache::recvTimingResp(PacketPtr pkt)
if (isCachedAbove(wcPkt))
delete wcPkt;
else
- allocateWriteBuffer(wcPkt, forward_time, true);
+ allocateWriteBuffer(wcPkt, forward_time);
}
blk->invalidate();
}
@@ -2151,7 +2132,10 @@ Cache::getNextMSHR()
// (hwpf_mshr_misses)
assert(pkt->req->masterId() < system->maxMasters());
mshr_misses[pkt->cmdToIndex()][pkt->req->masterId()]++;
- // Don't request bus, since we already have it
+
+ // allocate an MSHR and return it, note
+ // that we send the packet straight away, so do not
+ // schedule the send
return allocateMissBuffer(pkt, curTick(), false);
} else {
// free the request and packet
diff --git a/src/mem/cache/prefetch/queued.cc b/src/mem/cache/prefetch/queued.cc
index 58b33a4ae..03ca3188f 100644
--- a/src/mem/cache/prefetch/queued.cc
+++ b/src/mem/cache/prefetch/queued.cc
@@ -79,9 +79,6 @@ QueuedPrefetcher::notify(const PacketPtr &pkt)
++itr;
}
}
-
- if (pfq.empty())
- cache->deassertMemSideBusRequest(BaseCache::Request_PF);
}
// Calculate prefetches given this access