summaryrefslogtreecommitdiff
path: root/src/mem/ruby/system
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem/ruby/system')
-rw-r--r--src/mem/ruby/system/GPUCoalescer.cc10
-rw-r--r--src/mem/ruby/system/GPUCoalescer.hh5
-rw-r--r--src/mem/ruby/system/GPUCoalescer.py1
-rw-r--r--src/mem/ruby/system/Sequencer.cc21
-rw-r--r--src/mem/ruby/system/Sequencer.py7
5 files changed, 8 insertions, 36 deletions
diff --git a/src/mem/ruby/system/GPUCoalescer.cc b/src/mem/ruby/system/GPUCoalescer.cc
index 8b8c9566f..5f8725249 100644
--- a/src/mem/ruby/system/GPUCoalescer.cc
+++ b/src/mem/ruby/system/GPUCoalescer.cc
@@ -143,8 +143,6 @@ GPUCoalescer::GPUCoalescer(const Params *p)
assert(m_instCache_ptr);
assert(m_dataCache_ptr);
- m_data_cache_hit_latency = p->dcache_hit_latency;
-
m_runningGarnetStandalone = p->garnet_standalone;
assumingRfOCoherence = p->assume_rfo;
}
@@ -950,12 +948,12 @@ GPUCoalescer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
fatal_if(secondary_type == RubyRequestType_IFETCH,
"there should not be any I-Fetch requests in the GPU Coalescer");
- // Send the message to the cache controller
- fatal_if(m_data_cache_hit_latency == 0,
- "should not have a latency of zero");
+ Tick latency = cyclesToTicks(
+ m_controller->mandatoryQueueLatency(secondary_type));
+ assert(latency > 0);
assert(m_mandatory_q_ptr);
- m_mandatory_q_ptr->enqueue(msg, clockEdge(), m_data_cache_hit_latency);
+ m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
}
template <class KEY, class VALUE>
diff --git a/src/mem/ruby/system/GPUCoalescer.hh b/src/mem/ruby/system/GPUCoalescer.hh
index 6576ecb36..6e40238c1 100644
--- a/src/mem/ruby/system/GPUCoalescer.hh
+++ b/src/mem/ruby/system/GPUCoalescer.hh
@@ -266,11 +266,6 @@ class GPUCoalescer : public RubyPort
CacheMemory* m_dataCache_ptr;
CacheMemory* m_instCache_ptr;
- // The cache access latency for this GPU data cache. This is assessed at the
- // beginning of each access. This should be very similar to the
- // implementation in Sequencer() as this is very much like a Sequencer
- Cycles m_data_cache_hit_latency;
-
// We need to track both the primary and secondary request types.
// The secondary request type comprises a subset of RubyRequestTypes that
// are understood by the L1 Controller. A primary request type can be any
diff --git a/src/mem/ruby/system/GPUCoalescer.py b/src/mem/ruby/system/GPUCoalescer.py
index ec6429342..eeb05c42a 100644
--- a/src/mem/ruby/system/GPUCoalescer.py
+++ b/src/mem/ruby/system/GPUCoalescer.py
@@ -54,4 +54,3 @@ class RubyGPUCoalescer(RubyPort):
"max outstanding cycles for a request before " \
"deadlock/livelock declared")
garnet_standalone = Param.Bool(False, "")
- dcache_hit_latency = Param.Cycles(1, "Data cache hit latency")
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 41ec6ea6c..a282995da 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -60,8 +60,6 @@ Sequencer::Sequencer(const Params *p)
m_instCache_ptr = p->icache;
m_dataCache_ptr = p->dcache;
- m_data_cache_hit_latency = p->dcache_hit_latency;
- m_inst_cache_hit_latency = p->icache_hit_latency;
m_max_outstanding_requests = p->max_outstanding_requests;
m_deadlock_threshold = p->deadlock_threshold;
@@ -70,8 +68,6 @@ Sequencer::Sequencer(const Params *p)
assert(m_deadlock_threshold > 0);
assert(m_instCache_ptr != NULL);
assert(m_dataCache_ptr != NULL);
- assert(m_data_cache_hit_latency > 0);
- assert(m_inst_cache_hit_latency > 0);
m_runningGarnetStandalone = p->garnet_standalone;
}
@@ -650,23 +646,12 @@ Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
printAddress(msg->getPhysicalAddress()),
RubyRequestType_to_string(secondary_type));
- // The Sequencer currently assesses instruction and data cache hit latency
- // for the top-level caches at the beginning of a memory access.
- // TODO: Eventually, this latency should be moved to represent the actual
- // cache access latency portion of the memory access. This will require
- // changing cache controller protocol files to assess the latency on the
- // access response path.
- Cycles latency(0); // Initialize to zero to catch misconfigured latency
- if (secondary_type == RubyRequestType_IFETCH)
- latency = m_inst_cache_hit_latency;
- else
- latency = m_data_cache_hit_latency;
-
- // Send the message to the cache controller
+ Tick latency = cyclesToTicks(
+ m_controller->mandatoryQueueLatency(secondary_type));
assert(latency > 0);
assert(m_mandatory_q_ptr != NULL);
- m_mandatory_q_ptr->enqueue(msg, clockEdge(), cyclesToTicks(latency));
+ m_mandatory_q_ptr->enqueue(msg, clockEdge(), latency);
}
template <class KEY, class VALUE>
diff --git a/src/mem/ruby/system/Sequencer.py b/src/mem/ruby/system/Sequencer.py
index 2aede349d..47f51462b 100644
--- a/src/mem/ruby/system/Sequencer.py
+++ b/src/mem/ruby/system/Sequencer.py
@@ -63,12 +63,7 @@ class RubySequencer(RubyPort):
icache = Param.RubyCache("")
dcache = Param.RubyCache("")
- # Cache latencies currently assessed at the beginning of each access
- # NOTE: Setting these values to a value greater than one will result in
- # O3 CPU pipeline bubbles and negatively impact performance
- # TODO: Latencies should be migrated into each top-level cache controller
- icache_hit_latency = Param.Cycles(1, "Inst cache hit latency")
- dcache_hit_latency = Param.Cycles(1, "Data cache hit latency")
+
max_outstanding_requests = Param.Int(16,
"max requests (incl. prefetches) outstanding")
deadlock_threshold = Param.Cycles(500000,