summaryrefslogtreecommitdiff
path: root/src/mem/cache
diff options
context:
space:
mode:
authorRon Dreslinski <rdreslin@umich.edu>2006-10-12 13:59:03 -0400
committerRon Dreslinski <rdreslin@umich.edu>2006-10-12 13:59:03 -0400
commitf89b56b61ad9cb4605d0c3297b5d563672812ce9 (patch)
tree8e2d0bfc47f669f292af91710a41d738bc8e6fc9 /src/mem/cache
parent6ffdc7b4d7be6d27be3a1ae83262787565063b36 (diff)
downloadgem5-f89b56b61ad9cb4605d0c3297b5d563672812ce9.tar.xz
Check the response queue on functional accesses.
The response queue is not tying up an MSHR, should we change that or assume infinite storage for responses? src/mem/cache/base_cache.cc: src/mem/tport.cc: Add in functional check of retry queued packets. --HG-- extra : convert_revision : 0cb40b3a96d37a5e9eec95312d660ec6a9ce526a
Diffstat (limited to 'src/mem/cache')
-rw-r--r--src/mem/cache/base_cache.cc36
1 files changed, 36 insertions, 0 deletions
diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc
index 71ea58416..4a4a81f73 100644
--- a/src/mem/cache/base_cache.cc
+++ b/src/mem/cache/base_cache.cc
@@ -107,6 +107,42 @@ BaseCache::CachePort::recvAtomic(Packet *pkt)
void
BaseCache::CachePort::recvFunctional(Packet *pkt)
{
+ //Check storage here first
+ list<Packet *>::iterator i = drainList.begin();
+ list<Packet *>::iterator end = drainList.end();
+ for (; i != end; ++i) {
+ Packet * target = *i;
+ // If the target contains data, and it overlaps the
+ // probed request, need to update data
+ if (target->intersect(pkt)) {
+ uint8_t* pkt_data;
+ uint8_t* write_data;
+ int data_size;
+ if (target->getAddr() < pkt->getAddr()) {
+ int offset = pkt->getAddr() - target->getAddr();
+ pkt_data = pkt->getPtr<uint8_t>();
+ write_data = target->getPtr<uint8_t>() + offset;
+ data_size = target->getSize() - offset;
+ assert(data_size > 0);
+ if (data_size > pkt->getSize())
+ data_size = pkt->getSize();
+ } else {
+ int offset = target->getAddr() - pkt->getAddr();
+ pkt_data = pkt->getPtr<uint8_t>() + offset;
+ write_data = target->getPtr<uint8_t>();
+ data_size = pkt->getSize() - offset;
+ assert(data_size > pkt->getSize());
+ if (data_size > target->getSize())
+ data_size = target->getSize();
+ }
+
+ if (pkt->isWrite()) {
+ memcpy(pkt_data, write_data, data_size);
+ } else {
+ memcpy(write_data, pkt_data, data_size);
+ }
+ }
+ }
cache->doFunctionalAccess(pkt, isCpuSide);
}