diff options
author | Marco Elver <marco.elver@ed.ac.uk> | 2014-04-19 09:00:30 -0500 |
---|---|---|
committer | Marco Elver <marco.elver@ed.ac.uk> | 2014-04-19 09:00:30 -0500 |
commit | d9fa950396e8f331bbfb1023348c8c680967b1be (patch) | |
tree | de8a7a5902da5952c70f88d43fd8fc89c496377a | |
parent | 097aadc2cddafdd6433aa8f57b141f0e01222e45 (diff) | |
download | gem5-d9fa950396e8f331bbfb1023348c8c680967b1be.tar.xz |
ruby: recorder: Fix (de-)serializing with different cache block-sizes
Upon aggregating records, serialize system's cache-block size, as the
cache-block size can be different when restoring from a checkpoint. This way,
we can correctly read all records when restoring from a checkpoints, even if
the cache-block size is different.
Note, that it is only possible to restore from a checkpoint if the
desired cache-block size is smaller or equal to the cache-block size
when the checkpoint was taken; we can split one larger request into
multiple small ones, but it is not reliable to do the opposite.
Committed by: Nilay Vaish <nilay@cs.wisc.edu>
-rw-r--r-- | src/mem/ruby/recorder/CacheRecorder.cc | 78 | ||||
-rw-r--r-- | src/mem/ruby/recorder/CacheRecorder.hh | 4 | ||||
-rw-r--r-- | src/mem/ruby/system/System.cc | 17 | ||||
-rw-r--r-- | src/sim/serialize.hh | 2 | ||||
-rwxr-xr-x | util/cpt_upgrader.py | 8 |
5 files changed, 73 insertions, 36 deletions
diff --git a/src/mem/ruby/recorder/CacheRecorder.cc b/src/mem/ruby/recorder/CacheRecorder.cc index 1e029b400..a63dbd48e 100644 --- a/src/mem/ruby/recorder/CacheRecorder.cc +++ b/src/mem/ruby/recorder/CacheRecorder.cc @@ -44,18 +44,29 @@ TraceRecord::print(ostream& out) const CacheRecorder::CacheRecorder() : m_uncompressed_trace(NULL), - m_uncompressed_trace_size(0) + m_uncompressed_trace_size(0), + m_block_size_bytes(RubySystem::getBlockSizeBytes()) { } CacheRecorder::CacheRecorder(uint8_t* uncompressed_trace, uint64_t uncompressed_trace_size, - std::vector<Sequencer*>& seq_map) + std::vector<Sequencer*>& seq_map, + uint64_t block_size_bytes) : m_uncompressed_trace(uncompressed_trace), m_uncompressed_trace_size(uncompressed_trace_size), m_seq_map(seq_map), m_bytes_read(0), m_records_read(0), - m_records_flushed(0) + m_records_flushed(0), m_block_size_bytes(block_size_bytes) { + if (m_uncompressed_trace != NULL) { + if (m_block_size_bytes < RubySystem::getBlockSizeBytes()) { + // Block sizes larger than when the trace was recorded are not + // supported, as we cannot reliably turn accesses to smaller blocks + // into larger ones. + panic("Recorded cache block size (%d) < current block size (%d) !!", + m_block_size_bytes, RubySystem::getBlockSizeBytes()); + } + } } CacheRecorder::~CacheRecorder() @@ -74,7 +85,7 @@ CacheRecorder::enqueueNextFlushRequest() TraceRecord* rec = m_records[m_records_flushed]; m_records_flushed++; Request* req = new Request(rec->m_data_address, - RubySystem::getBlockSizeBytes(),0, + m_block_size_bytes, 0, Request::funcMasterId); MemCmd::Command requestType = MemCmd::FlushReq; Packet *pkt = new Packet(req, requestType); @@ -95,33 +106,36 @@ CacheRecorder::enqueueNextFetchRequest() m_bytes_read); DPRINTF(RubyCacheTrace, "Issuing %s\n", *traceRecord); - Request* req = new Request(); - MemCmd::Command requestType; - - if (traceRecord->m_type == RubyRequestType_LD) { - requestType = MemCmd::ReadReq; - req->setPhys(traceRecord->m_data_address, - RubySystem::getBlockSizeBytes(),0, Request::funcMasterId); - } else if (traceRecord->m_type == RubyRequestType_IFETCH) { - requestType = MemCmd::ReadReq; - req->setPhys(traceRecord->m_data_address, - RubySystem::getBlockSizeBytes(), - Request::INST_FETCH, Request::funcMasterId); - } else { - requestType = MemCmd::WriteReq; - req->setPhys(traceRecord->m_data_address, - RubySystem::getBlockSizeBytes(),0, Request::funcMasterId); - } - Packet *pkt = new Packet(req, requestType); - pkt->dataStatic(traceRecord->m_data); + for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes; + rec_bytes_read += RubySystem::getBlockSizeBytes()) { + Request* req = new Request(); + MemCmd::Command requestType; + + if (traceRecord->m_type == RubyRequestType_LD) { + requestType = MemCmd::ReadReq; + req->setPhys(traceRecord->m_data_address + rec_bytes_read, + RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); + } else if (traceRecord->m_type == RubyRequestType_IFETCH) { + requestType = MemCmd::ReadReq; + req->setPhys(traceRecord->m_data_address + rec_bytes_read, + RubySystem::getBlockSizeBytes(), + Request::INST_FETCH, Request::funcMasterId); + } else { + requestType = MemCmd::WriteReq; + req->setPhys(traceRecord->m_data_address + rec_bytes_read, + RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId); + } - Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id]; - assert(m_sequencer_ptr != NULL); - m_sequencer_ptr->makeRequest(pkt); + Packet *pkt = new Packet(req, requestType); + pkt->dataStatic(traceRecord->m_data + rec_bytes_read); + + Sequencer* m_sequencer_ptr = m_seq_map[traceRecord->m_cntrl_id]; + assert(m_sequencer_ptr != NULL); + m_sequencer_ptr->makeRequest(pkt); + } - m_bytes_read += (sizeof(TraceRecord) + - RubySystem::getBlockSizeBytes()); + m_bytes_read += (sizeof(TraceRecord) + m_block_size_bytes); m_records_read++; } } @@ -132,14 +146,14 @@ CacheRecorder::addRecord(int cntrl, const physical_address_t data_addr, RubyRequestType type, Time time, DataBlock& data) { TraceRecord* rec = (TraceRecord*)malloc(sizeof(TraceRecord) + - RubySystem::getBlockSizeBytes()); + m_block_size_bytes); rec->m_cntrl_id = cntrl; rec->m_time = time; rec->m_data_address = data_addr; rec->m_pc_address = pc_addr; rec->m_type = type; - memcpy(rec->m_data, data.getData(0, RubySystem::getBlockSizeBytes()), - RubySystem::getBlockSizeBytes()); + memcpy(rec->m_data, data.getData(0, m_block_size_bytes), + m_block_size_bytes); m_records.push_back(rec); } @@ -151,7 +165,7 @@ CacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size) int size = m_records.size(); uint64 current_size = 0; - int record_size = sizeof(TraceRecord) + RubySystem::getBlockSizeBytes(); + int record_size = sizeof(TraceRecord) + m_block_size_bytes; for (int i = 0; i < size; ++i) { // Determine if we need to expand the buffer size diff --git a/src/mem/ruby/recorder/CacheRecorder.hh b/src/mem/ruby/recorder/CacheRecorder.hh index 839c4f6b1..2156b0689 100644 --- a/src/mem/ruby/recorder/CacheRecorder.hh +++ b/src/mem/ruby/recorder/CacheRecorder.hh @@ -71,7 +71,8 @@ class CacheRecorder CacheRecorder(uint8_t* uncompressed_trace, uint64_t uncompressed_trace_size, - std::vector<Sequencer*>& SequencerMap); + std::vector<Sequencer*>& SequencerMap, + uint64_t block_size_bytes); void addRecord(int cntrl, const physical_address_t data_addr, const physical_address_t pc_addr, RubyRequestType type, Time time, DataBlock& data); @@ -109,6 +110,7 @@ class CacheRecorder uint64_t m_bytes_read; uint64_t m_records_read; uint64_t m_records_flushed; + uint64_t m_block_size_bytes; }; inline bool diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc index b2f439178..dad5b8aa6 100644 --- a/src/mem/ruby/system/System.cc +++ b/src/mem/ruby/system/System.cc @@ -182,9 +182,16 @@ RubySystem::serialize(std::ostream &os) } } + // Store the cache-block size, so we are able to restore on systems with a + // different cache-block size. CacheRecorder depends on the correct + // cache-block size upon unserializing. + uint64 block_size_bytes = getBlockSizeBytes(); + SERIALIZE_SCALAR(block_size_bytes); + DPRINTF(RubyCacheTrace, "Recording Cache Trace\n"); // Create the CacheRecorder and record the cache trace - m_cache_recorder = new CacheRecorder(NULL, 0, sequencer_map); + m_cache_recorder = new CacheRecorder(NULL, 0, sequencer_map, + block_size_bytes); for (int cntrl = 0; cntrl < m_abs_cntrl_vec.size(); cntrl++) { m_abs_cntrl_vec[cntrl]->recordCacheTrace(cntrl, m_cache_recorder); @@ -277,6 +284,12 @@ RubySystem::unserialize(Checkpoint *cp, const string §ion) { uint8_t *uncompressed_trace = NULL; + // This value should be set to the checkpoint-system's block-size. + // Optional, as checkpoints without it can be run if the + // checkpoint-system's block-size == current block-size. + uint64 block_size_bytes = getBlockSizeBytes(); + UNSERIALIZE_OPT_SCALAR(block_size_bytes); + if (m_mem_vec != NULL) { string memory_trace_file; uint64 memory_trace_size = 0; @@ -320,7 +333,7 @@ RubySystem::unserialize(Checkpoint *cp, const string §ion) } m_cache_recorder = new CacheRecorder(uncompressed_trace, cache_trace_size, - sequencer_map); + sequencer_map, block_size_bytes); } void diff --git a/src/sim/serialize.hh b/src/sim/serialize.hh index bbf759cf6..5fc9d7b55 100644 --- a/src/sim/serialize.hh +++ b/src/sim/serialize.hh @@ -58,7 +58,7 @@ class EventQueue; * SimObject shouldn't cause the version number to increase, only changes to * existing objects such as serializing/unserializing more state, changing sizes * of serialized arrays, etc. */ -static const uint64_t gem5CheckpointVersion = 0x0000000000000009; +static const uint64_t gem5CheckpointVersion = 0x000000000000000a; template <class T> void paramOut(std::ostream &os, const std::string &name, const T ¶m); diff --git a/util/cpt_upgrader.py b/util/cpt_upgrader.py index fac9e07a7..e14274806 100755 --- a/util/cpt_upgrader.py +++ b/util/cpt_upgrader.py @@ -545,6 +545,13 @@ def from_8(cpt): cpt.set(new_sec, 'bootUncacheability', 'false') cpt.set(new_sec, 'num_entries', '0') +# Version 10 adds block_size_bytes to system.ruby +def from_9(cpt): + for sec in cpt.sections(): + if sec == 'system.ruby': + # Use Gem5's default of 64; this should be changed if the to be + # upgraded checkpoints were not taken with block-size 64! + cpt.set(sec, 'block_size_bytes', '64') migrations = [] migrations.append(from_0) @@ -556,6 +563,7 @@ migrations.append(from_5) migrations.append(from_6) migrations.append(from_7) migrations.append(from_8) +migrations.append(from_9) verbose_print = False |