diff options
author | Andreas Hansson <andreas.hansson@arm.com> | 2016-03-17 09:51:22 -0400 |
---|---|---|
committer | Andreas Hansson <andreas.hansson@arm.com> | 2016-03-17 09:51:22 -0400 |
commit | abcbc4e51e21c95fa241d19ed13978ea25b26982 (patch) | |
tree | 40dc2f9b3fa227212c2dde335451122fbf4e8411 /src/mem | |
parent | 7a40e7864a99140f18049a6f97163eebca2c891e (diff) | |
download | gem5-abcbc4e51e21c95fa241d19ed13978ea25b26982.tar.xz |
mem: Adjust cache queue reserve to more conservative values
The cache queue reserve is there as an overflow to give us enough
headroom based on when we block the cache, and how many transactions
we may already have accepted before actually blocking. The previous
values were probably chosen to be "big enough", when we actually know
that we check the MSHRs after every single allocation, and for the
write buffers we know that we implicitly may need one entry for every
outstanding MSHR.
* * *
mem: Adjust cache queue reserve to more conservative values
The cache queue reserve is there as an overflow to give us enough
headroom based on when we block the cache, and how many transactions
we may already have accepted before actually blocking. The previous
values were probably chosen to be "big enough", when we actually know
that we check the MSHRs after every single allocation, and for the
write buffers we know that we implicitly may need one entry for every
outstanding MSHR.
Diffstat (limited to 'src/mem')
-rw-r--r-- | src/mem/cache/base.cc | 10 | ||||
-rw-r--r-- | src/mem/cache/mshr_queue.hh | 4 | ||||
-rw-r--r-- | src/mem/cache/queue.hh | 8 |
3 files changed, 15 insertions, 7 deletions
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc index 1cbfe713b..ecbd3526e 100644 --- a/src/mem/cache/base.cc +++ b/src/mem/cache/base.cc @@ -68,8 +68,8 @@ BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name, BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) : MemObject(p), cpuSidePort(nullptr), memSidePort(nullptr), - mshrQueue("MSHRs", p->mshrs, 4, p->demand_mshr_reserve), - writeBuffer("write buffer", p->write_buffers, p->mshrs+1000), + mshrQueue("MSHRs", p->mshrs, 0, p->demand_mshr_reserve), // see below + writeBuffer("write buffer", p->write_buffers, p->mshrs), // see below blkSize(blk_size), lookupLatency(p->hit_latency), forwardLatency(p->hit_latency), @@ -85,6 +85,12 @@ BaseCache::BaseCache(const BaseCacheParams *p, unsigned blk_size) addrRanges(p->addr_ranges.begin(), p->addr_ranges.end()), system(p->system) { + // the MSHR queue has no reserve entries as we check the MSHR + // queue on every single allocation, whereas the write queue has + // as many reserve entries as we have MSHRs, since every MSHR may + // eventually require a writeback, and we do not check the write + // buffer before committing to an MSHR + // forward snoops is overridden in init() once we can query // whether the connected master is actually snooping or not } diff --git a/src/mem/cache/mshr_queue.hh b/src/mem/cache/mshr_queue.hh index ff78cbebb..f0b5c2ab0 100644 --- a/src/mem/cache/mshr_queue.hh +++ b/src/mem/cache/mshr_queue.hh @@ -141,7 +141,9 @@ class MSHRQueue : public Queue<MSHR> */ bool canPrefetch() const { - return (allocated < numEntries - (numReserve + demandReserve)); + // @todo we may want to revisit the +1, currently added to + // keep regressions unchanged + return (allocated < numEntries - (numReserve + 1 + demandReserve)); } }; diff --git a/src/mem/cache/queue.hh b/src/mem/cache/queue.hh index 11d456e11..fb3e73608 100644 --- a/src/mem/cache/queue.hh +++ b/src/mem/cache/queue.hh @@ -69,7 +69,7 @@ class Queue : public Drainable /** * The total number of entries in this queue. This number is set - * as the number of entries requested plus (numReserve - 1). This + * as the number of entries requested plus any reserve. This * allows for the same number of effective entries while still * maintaining an overflow reserve. */ @@ -120,10 +120,10 @@ class Queue : public Drainable * Create a queue with a given number of entries. * * @param num_entries The number of entries in this queue. - * @param num_overflow The extra overflow entries needed. + * @param reserve The extra overflow entries needed. */ Queue(const std::string &_label, int num_entries, int reserve) : - label(_label), numEntries(num_entries + reserve - 1), + label(_label), numEntries(num_entries + reserve), numReserve(reserve), entries(numEntries), _numInService(0), allocated(0) { @@ -139,7 +139,7 @@ class Queue : public Drainable bool isFull() const { - return (allocated > numEntries - numReserve); + return (allocated >= numEntries - numReserve); } int numInService() const |