summaryrefslogtreecommitdiff
path: root/src/mem/ruby
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem/ruby')
-rw-r--r--src/mem/ruby/recorder/CacheRecorder.cc78
-rw-r--r--src/mem/ruby/recorder/CacheRecorder.hh4
-rw-r--r--src/mem/ruby/system/System.cc17
3 files changed, 64 insertions, 35 deletions
diff --git a/src/mem/ruby/recorder/CacheRecorder.cc b/src/mem/ruby/recorder/CacheRecorder.cc
index 1e029b400..a63dbd48e 100644
--- a/src/mem/ruby/recorder/CacheRecorder.cc
+++ b/src/mem/ruby/recorder/CacheRecorder.cc
@@ -44,18 +44,29 @@ TraceRecord::print(ostream& out) const
CacheRecorder::CacheRecorder()
: m_uncompressed_trace(NULL),
- m_uncompressed_trace_size(0)
+ m_uncompressed_trace_size(0),
+ m_block_size_bytes(RubySystem::getBlockSizeBytes())
{
}
CacheRecorder::CacheRecorder(uint8_t* uncompressed_trace,
uint64_t uncompressed_trace_size,
- std::vector<Sequencer*>& seq_map)
+ std::vector<Sequencer*>& seq_map,
+ uint64_t block_size_bytes)
: m_uncompressed_trace(uncompressed_trace),
m_uncompressed_trace_size(uncompressed_trace_size),
m_seq_map(seq_map), m_bytes_read(0), m_records_read(0),
- m_records_flushed(0)
+ m_records_flushed(0), m_block_size_bytes(block_size_bytes)
{
+ if (m_uncompressed_trace != NULL) {
+ if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) {
+ // Block sizes larger than when the trace was recorded are not
+ // supported, as we cannot reliably turn accesses to smaller blocks
+ // into larger ones.
+ panic("Recorded cache block size (%d) < current block size (%d) !!",
+ m_block_size_bytes, RubySystem::getBlockSizeBytes());
+ }
+ }
}
CacheRecorder::~CacheRecorder()
@@ -74,7 +85,7 @@ CacheRecorder::enqueueNextFlushRequest()
TraceRecord* rec = m_records[m_records_flushed];
m_records_flushed++;
Request* req = new Request(rec->m_data_address,
- RubySystem::getBlockSizeBytes(),0,
+ m_block_size_bytes, 0,
Request::funcMasterId);
MemCmd::Command requestType = MemCmd::FlushReq;
Packet *pkt = new Packet(req, requestType);
@@ -95,33 +106,36 @@ CacheRecorder::enqueueNextFetchRequest()
m_bytes_read);
DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord);
- Request* req = new Request();
- MemCmd::Command requestType;
-
- if (traceRecord->m_type == RubyRequestType_LD) {
- requestType = MemCmd::ReadReq;
- req->setPhys(traceRecord->m_data_address,
- RubySystem::getBlockSizeBytes(),0, Request::funcMasterId);
- } else if (traceRecord->m_type == RubyRequestType_IFETCH) {
- requestType = MemCmd::ReadReq;
- req->setPhys(traceRecord->m_data_address,
- RubySystem::getBlockSizeBytes(),
- Request::INST_FETCH, Request::funcMasterId);
- } else {
- requestType = MemCmd::WriteReq;
- req->setPhys(traceRecord->m_data_address,
- RubySystem::getBlockSizeBytes(),0, Request::funcMasterId);
- }
- Packet *pkt = new Packet(req, requestType);
- pkt->dataStatic(traceRecord->m_data);
+ for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes;
+ rec_bytes_read += RubySystem::getBlockSizeBytes()) {
+ Request* req = new Request();
+ MemCmd::Command requestType;
+
+ if (traceRecord->m_type == RubyRequestType_LD) {
+ requestType = MemCmd::ReadReq;
+ req->setPhys(traceRecord->m_data_address + rec_bytes_read,
+ RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
+ } else if (traceRecord->m_type == RubyRequestType_IFETCH) {
+ requestType = MemCmd::ReadReq;
+ req->setPhys(traceRecord->m_data_address + rec_bytes_read,
+ RubySystem::getBlockSizeBytes(),
+ Request::INST_FETCH, Request::funcMasterId);
+ } else {
+ requestType = MemCmd::WriteReq;
+ req->setPhys(traceRecord->m_data_address + rec_bytes_read,
+ RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
+ }
- Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id];
- assert(m_sequencer_ptr != NULL);
- m_sequencer_ptr->makeRequest(pkt);
+ Packet *pkt = new Packet(req, requestType);
+ pkt->dataStatic(traceRecord->m_data + rec_bytes_read);
+
+ Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id];
+ assert(m_sequencer_ptr != NULL);
+ m_sequencer_ptr->makeRequest(pkt);
+ }
- m_bytes_read += (sizeof(TraceRecord) +
- RubySystem::getBlockSizeBytes());
+ m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes);
m_records_read++;
}
}
@@ -132,14 +146,14 @@ CacheRecorder::addRecord(int cntrl, const physical_address_t data_addr,
RubyRequestType type, Time time, DataBlock& data)
{
TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) +
- RubySystem::getBlockSizeBytes());
+ m_block_size_bytes);
rec->m_cntrl_id = cntrl;
rec->m_time = time;
rec->m_data_address = data_addr;
rec->m_pc_address = pc_addr;
rec->m_type = type;
- memcpy(rec->m_data, data.getData(0, RubySystem::getBlockSizeBytes()),
- RubySystem::getBlockSizeBytes());
+ memcpy(rec->m_data, data.getData(0, m_block_size_bytes),
+ m_block_size_bytes);
m_records.push_back(rec);
}
@@ -151,7 +165,7 @@ CacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size)
int size = m_records.size();
uint64 current_size = 0;
- int record_size = sizeof(TraceRecord) + RubySystem::getBlockSizeBytes();
+ int record_size = sizeof(TraceRecord) + m_block_size_bytes;
for (int i = 0; i < size; ++i) {
// Determine if we need to expand the buffer size
diff --git a/src/mem/ruby/recorder/CacheRecorder.hh b/src/mem/ruby/recorder/CacheRecorder.hh
index 839c4f6b1..2156b0689 100644
--- a/src/mem/ruby/recorder/CacheRecorder.hh
+++ b/src/mem/ruby/recorder/CacheRecorder.hh
@@ -71,7 +71,8 @@ class CacheRecorder
CacheRecorder(uint8_t* uncompressed_trace,
uint64_t uncompressed_trace_size,
- std::vector<Sequencer*>& SequencerMap);
+ std::vector<Sequencer*>& SequencerMap,
+ uint64_t block_size_bytes);
void addRecord(int cntrl, const physical_address_t data_addr,
const physical_address_t pc_addr, RubyRequestType type,
Time time, DataBlock& data);
@@ -109,6 +110,7 @@ class CacheRecorder
uint64_t m_bytes_read;
uint64_t m_records_read;
uint64_t m_records_flushed;
+ uint64_t m_block_size_bytes;
};
inline bool
diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc
index b2f439178..dad5b8aa6 100644
--- a/src/mem/ruby/system/System.cc
+++ b/src/mem/ruby/system/System.cc
@@ -182,9 +182,16 @@ RubySystem::serialize(std::ostream &os)
}
}
+ // Store the cache-block size, so we are able to restore on systems with a
+ // different cache-block size. CacheRecorder depends on the correct
+ // cache-block size upon unserializing.
+ uint64 block_size_bytes = getBlockSizeBytes();
+ SERIALIZE_SCALAR(block_size_bytes);
+
DPRINTF(RubyCacheTrace, "Recording Cache Trace\n");
// Create the CacheRecorder and record the cache trace
- m_cache_recorder = new CacheRecorder(NULL, 0, sequencer_map);
+ m_cache_recorder = new CacheRecorder(NULL, 0, sequencer_map,
+ block_size_bytes);
for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) {
m_abs_cntrl_vec[cntrl]->recordCacheTrace(cntrl, m_cache_recorder);
@@ -277,6 +284,12 @@ RubySystem::unserialize(Checkpoint *cp, const string &section)
{
uint8_t *uncompressed_trace = NULL;
+ // This value should be set to the checkpoint-system's block-size.
+ // Optional, as checkpoints without it can be run if the
+ // checkpoint-system's block-size == current block-size.
+ uint64 block_size_bytes = getBlockSizeBytes();
+ UNSERIALIZE_OPT_SCALAR(block_size_bytes);
+
if (m_mem_vec != NULL) {
string memory_trace_file;
uint64 memory_trace_size = 0;
@@ -320,7 +333,7 @@ RubySystem::unserialize(Checkpoint *cp, const string &section)
}
m_cache_recorder = new CacheRecorder(uncompressed_trace, cache_trace_size,
- sequencer_map);
+ sequencer_map, block_size_bytes);
}
void