summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
Diffstat (limited to 'src')
-rw-r--r--src/mem/ruby/structures/Cache.py1
-rw-r--r--src/mem/ruby/structures/CacheMemory.cc1
-rw-r--r--src/mem/ruby/structures/CacheMemory.hh3
-rw-r--r--src/mem/ruby/system/Sequencer.cc17
-rw-r--r--src/mem/ruby/system/Sequencer.hh7
-rw-r--r--src/mem/ruby/system/Sequencer.py6
6 files changed, 26 insertions, 9 deletions
diff --git a/src/mem/ruby/structures/Cache.py b/src/mem/ruby/structures/Cache.py
index 7f26e659f..4eb87ac74 100644
--- a/src/mem/ruby/structures/Cache.py
+++ b/src/mem/ruby/structures/Cache.py
@@ -37,7 +37,6 @@ class RubyCache(SimObject):
cxx_class = 'CacheMemory'
cxx_header = "mem/ruby/structures/CacheMemory.hh"
size = Param.MemorySize("capacity in bytes");
- latency = Param.Cycles("");
assoc = Param.Int("");
replacement_policy = Param.ReplacementPolicy(PseudoLRUReplacementPolicy(),
"")
diff --git a/src/mem/ruby/structures/CacheMemory.cc b/src/mem/ruby/structures/CacheMemory.cc
index e444ae09c..64a8e9e8a 100644
--- a/src/mem/ruby/structures/CacheMemory.cc
+++ b/src/mem/ruby/structures/CacheMemory.cc
@@ -60,7 +60,6 @@ CacheMemory::CacheMemory(const Params *p)
p->start_index_bit, p->ruby_system)
{
m_cache_size = p->size;
- m_latency = p->latency;
m_cache_assoc = p->assoc;
m_replacementPolicy_ptr = p->replacement_policy;
m_replacementPolicy_ptr->setCache(this);
diff --git a/src/mem/ruby/structures/CacheMemory.hh b/src/mem/ruby/structures/CacheMemory.hh
index 57f2885b6..792d8fd93 100644
--- a/src/mem/ruby/structures/CacheMemory.hh
+++ b/src/mem/ruby/structures/CacheMemory.hh
@@ -96,7 +96,6 @@ class CacheMemory : public SimObject
AbstractCacheEntry* lookup(const Address& address);
const AbstractCacheEntry* lookup(const Address& address) const;
- Cycles getLatency() const { return m_latency; }
Cycles getTagLatency() const { return tagArray.getLatency(); }
Cycles getDataLatency() const { return dataArray.getLatency(); }
@@ -159,8 +158,6 @@ class CacheMemory : public SimObject
CacheMemory& operator=(const CacheMemory& obj);
private:
- Cycles m_latency;
-
// Data Members (m_prefix)
bool m_is_instruction_only_cache;
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 01b868017..36bd9cd62 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -58,6 +58,8 @@ Sequencer::Sequencer(const Params *p)
m_instCache_ptr = p->icache;
m_dataCache_ptr = p->dcache;
+ m_data_cache_hit_latency = p->dcache_hit_latency;
+ m_inst_cache_hit_latency = p->icache_hit_latency;
m_max_outstanding_requests = p->max_outstanding_requests;
m_deadlock_threshold = p->deadlock_threshold;
@@ -65,6 +67,8 @@ Sequencer::Sequencer(const Params *p)
assert(m_deadlock_threshold > 0);
assert(m_instCache_ptr != NULL);
assert(m_dataCache_ptr != NULL);
+ assert(m_data_cache_hit_latency > 0);
+ assert(m_inst_cache_hit_latency > 0);
m_usingNetworkTester = p->using_network_tester;
}
@@ -691,12 +695,17 @@ Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
msg->getPhysicalAddress(),
RubyRequestType_to_string(secondary_type));
- Cycles latency(0); // initialzed to an null value
-
+ // The Sequencer currently assesses instruction and data cache hit latency
+ // for the top-level caches at the beginning of a memory access.
+ // TODO: Eventually, this latency should be moved to represent the actual
+ // cache access latency portion of the memory access. This will require
+ // changing cache controller protocol files to assess the latency on the
+ // access response path.
+ Cycles latency(0); // Initialize to zero to catch misconfigured latency
if (secondary_type == RubyRequestType_IFETCH)
- latency = m_instCache_ptr->getLatency();
+ latency = m_inst_cache_hit_latency;
else
- latency = m_dataCache_ptr->getLatency();
+ latency = m_data_cache_hit_latency;
// Send the message to the cache controller
assert(latency > 0);
diff --git a/src/mem/ruby/system/Sequencer.hh b/src/mem/ruby/system/Sequencer.hh
index d5cd17f5f..505b3f3bc 100644
--- a/src/mem/ruby/system/Sequencer.hh
+++ b/src/mem/ruby/system/Sequencer.hh
@@ -180,6 +180,13 @@ class Sequencer : public RubyPort
CacheMemory* m_dataCache_ptr;
CacheMemory* m_instCache_ptr;
+ // The cache access latency for top-level caches (L0/L1). These are
+ // currently assessed at the beginning of each memory access through the
+ // sequencer.
+ // TODO: Migrate these latencies into top-level cache controllers.
+ Cycles m_data_cache_hit_latency;
+ Cycles m_inst_cache_hit_latency;
+
typedef m5::hash_map<Address, SequencerRequest*> RequestTable;
RequestTable m_writeRequestTable;
RequestTable m_readRequestTable;
diff --git a/src/mem/ruby/system/Sequencer.py b/src/mem/ruby/system/Sequencer.py
index e545000cf..7494986e9 100644
--- a/src/mem/ruby/system/Sequencer.py
+++ b/src/mem/ruby/system/Sequencer.py
@@ -61,6 +61,12 @@ class RubySequencer(RubyPort):
icache = Param.RubyCache("")
dcache = Param.RubyCache("")
+ # Cache latencies currently assessed at the beginning of each access
+ # NOTE: Setting these values to a value greater than one will result in
+ # O3 CPU pipeline bubbles and negatively impact performance
+ # TODO: Latencies should be migrated into each top-level cache controller
+ icache_hit_latency = Param.Cycles(1, "Inst cache hit latency")
+ dcache_hit_latency = Param.Cycles(1, "Data cache hit latency")
max_outstanding_requests = Param.Int(16,
"max requests (incl. prefetches) outstanding")
deadlock_threshold = Param.Cycles(500000,