diff options
author | Ron Dreslinski <rdreslin@umich.edu> | 2006-10-12 13:59:03 -0400 |
---|---|---|
committer | Ron Dreslinski <rdreslin@umich.edu> | 2006-10-12 13:59:03 -0400 |
commit | f89b56b61ad9cb4605d0c3297b5d563672812ce9 (patch) | |
tree | 8e2d0bfc47f669f292af91710a41d738bc8e6fc9 /src/mem/cache | |
parent | 6ffdc7b4d7be6d27be3a1ae83262787565063b36 (diff) | |
download | gem5-f89b56b61ad9cb4605d0c3297b5d563672812ce9.tar.xz |
Check the response queue on functional accesses.
The response queue is not tying up an MSHR, should we change that or assume infinite storage for responses?
src/mem/cache/base_cache.cc:
src/mem/tport.cc:
Add in functional check of retry queued packets.
--HG--
extra : convert_revision : 0cb40b3a96d37a5e9eec95312d660ec6a9ce526a
Diffstat (limited to 'src/mem/cache')
-rw-r--r-- | src/mem/cache/base_cache.cc | 36 |
1 files changed, 36 insertions, 0 deletions
diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc index 71ea58416..4a4a81f73 100644 --- a/src/mem/cache/base_cache.cc +++ b/src/mem/cache/base_cache.cc @@ -107,6 +107,42 @@ BaseCache::CachePort::recvAtomic(Packet *pkt) void BaseCache::CachePort::recvFunctional(Packet *pkt) { + //Check storage here first + list<Packet *>::iterator i = drainList.begin(); + list<Packet *>::iterator end = drainList.end(); + for (; i != end; ++i) { + Packet * target = *i; + // If the target contains data, and it overlaps the + // probed request, need to update data + if (target->intersect(pkt)) { + uint8_t* pkt_data; + uint8_t* write_data; + int data_size; + if (target->getAddr() < pkt->getAddr()) { + int offset = pkt->getAddr() - target->getAddr(); + pkt_data = pkt->getPtr<uint8_t>(); + write_data = target->getPtr<uint8_t>() + offset; + data_size = target->getSize() - offset; + assert(data_size > 0); + if (data_size > pkt->getSize()) + data_size = pkt->getSize(); + } else { + int offset = target->getAddr() - pkt->getAddr(); + pkt_data = pkt->getPtr<uint8_t>() + offset; + write_data = target->getPtr<uint8_t>(); + data_size = pkt->getSize() - offset; + assert(data_size > pkt->getSize()); + if (data_size > target->getSize()) + data_size = target->getSize(); + } + + if (pkt->isWrite()) { + memcpy(pkt_data, write_data, data_size); + } else { + memcpy(write_data, pkt_data, data_size); + } + } + } cache->doFunctionalAccess(pkt, isCpuSide); } |