diff options
author | Nilay Vaish <nilay@cs.wisc.edu> | 2015-08-29 10:19:23 -0500 |
---|---|---|
committer | Nilay Vaish <nilay@cs.wisc.edu> | 2015-08-29 10:19:23 -0500 |
commit | 4727fc26f885d09f07f18a10fabe6c75dffe165f (patch) | |
tree | f66861617159e8b49b56347f849cb628ac76d517 | |
parent | e9d6bf5e35b732df925f65a7b7adaa746f6a7f3b (diff) | |
download | gem5-4727fc26f885d09f07f18a10fabe6c75dffe165f.tar.xz |
ruby: eliminate type uint64 and int64
These types are being replaced with uint64_t and int64_t.
32 files changed, 116 insertions, 122 deletions
diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.hh b/src/cpu/testers/directedtest/RubyDirectedTester.hh index 2a1e7fc1f..74a891178 100644 --- a/src/cpu/testers/directedtest/RubyDirectedTester.hh +++ b/src/cpu/testers/directedtest/RubyDirectedTester.hh @@ -109,9 +109,9 @@ class RubyDirectedTester : public MemObject RubyDirectedTester(const RubyDirectedTester& obj); RubyDirectedTester& operator=(const RubyDirectedTester& obj); - uint64 m_requests_completed; + uint64_t m_requests_completed; std::vector<MasterPort*> ports; - uint64 m_requests_to_complete; + uint64_t m_requests_to_complete; DirectedGenerator* generator; }; diff --git a/src/cpu/testers/rubytest/RubyTester.hh b/src/cpu/testers/rubytest/RubyTester.hh index c9f0b8dfc..94a982e32 100644 --- a/src/cpu/testers/rubytest/RubyTester.hh +++ b/src/cpu/testers/rubytest/RubyTester.hh @@ -143,10 +143,10 @@ class RubyTester : public MemObject std::vector<Cycles> m_last_progress_vector; int m_num_cpus; - uint64 m_checks_completed; + uint64_t m_checks_completed; std::vector<MasterPort*> writePorts; std::vector<MasterPort*> readPorts; - uint64 m_checks_to_complete; + uint64_t m_checks_to_complete; int m_deadlock_threshold; int m_num_writers; int m_num_readers; diff --git a/src/mem/ruby/common/Histogram.cc b/src/mem/ruby/common/Histogram.cc index e377bc253..31de160cf 100644 --- a/src/mem/ruby/common/Histogram.cc +++ b/src/mem/ruby/common/Histogram.cc @@ -84,7 +84,7 @@ Histogram::doubleBinSize() } void -Histogram::add(int64 value) +Histogram::add(int64_t value) { assert(value >= 0); m_max = max(m_max, value); diff --git a/src/mem/ruby/common/Histogram.hh b/src/mem/ruby/common/Histogram.hh index c34e39af1..f02c4bedd 100644 --- a/src/mem/ruby/common/Histogram.hh +++ b/src/mem/ruby/common/Histogram.hh @@ -40,7 +40,7 @@ class Histogram Histogram(int binsize = 1, uint32_t bins = 50); ~Histogram(); - void add(int64 value); + void add(int64_t value); void add(Histogram& hist); void doubleBinSize(); @@ -51,10 +51,10 @@ class Histogram uint64_t size() const { return m_count; } uint32_t getBins() const { return m_data.size(); } int getBinSize() const { return m_binsize; } - int64 getTotal() const { return m_sumSamples; } + int64_t getTotal() const { return m_sumSamples; } uint64_t getSquaredTotal() const { return m_sumSquaredSamples; } uint64_t getData(int index) const { return m_data[index]; } - int64 getMax() const { return m_max; } + int64_t getMax() const { return m_max; } void printWithMultiplier(std::ostream& out, double multiplier) const; void printPercent(std::ostream& out) const; @@ -62,12 +62,12 @@ class Histogram private: std::vector<uint64_t> m_data; - int64 m_max; // the maximum value seen so far + int64_t m_max; // the maximum value seen so far uint64_t m_count; // the number of elements added int m_binsize; // the size of each bucket uint32_t m_largest_bin; // the largest bin used - int64 m_sumSamples; // the sum of all samples + int64_t m_sumSamples; // the sum of all samples uint64_t m_sumSquaredSamples; // the sum of the square of all samples double getStandardDeviation() const; diff --git a/src/mem/ruby/common/TypeDefines.hh b/src/mem/ruby/common/TypeDefines.hh index 203b63779..f29efe8b5 100644 --- a/src/mem/ruby/common/TypeDefines.hh +++ b/src/mem/ruby/common/TypeDefines.hh @@ -30,9 +30,6 @@ #ifndef TYPEDEFINES_H #define TYPEDEFINES_H -typedef unsigned long long uint64; -typedef long long int64; - typedef unsigned int LinkID; typedef unsigned int NodeID; typedef unsigned int SwitchID; diff --git a/src/mem/ruby/filters/H3BloomFilter.cc b/src/mem/ruby/filters/H3BloomFilter.cc index 21b9152be..b0d277782 100644 --- a/src/mem/ruby/filters/H3BloomFilter.cc +++ b/src/mem/ruby/filters/H3BloomFilter.cc @@ -507,8 +507,8 @@ H3BloomFilter::print(ostream& out) const int H3BloomFilter::get_index(Addr addr, int i) { - uint64 x = makeLineAddress(addr); - // uint64 y = (x*mults_list[i] + adds_list[i]) % primes_list[i]; + uint64_t x = makeLineAddress(addr); + // uint64_t y = (x*mults_list[i] + adds_list[i]) % primes_list[i]; int y = hash_H3(x,i); if (isParallel) { @@ -519,10 +519,10 @@ H3BloomFilter::get_index(Addr addr, int i) } int -H3BloomFilter::hash_H3(uint64 value, int index) +H3BloomFilter::hash_H3(uint64_t value, int index) { - uint64 mask = 1; - uint64 val = value; + uint64_t mask = 1; + uint64_t val = value; int result = 0; for (int i = 0; i < 64; i++) { diff --git a/src/mem/ruby/filters/H3BloomFilter.hh b/src/mem/ruby/filters/H3BloomFilter.hh index 8596d6acb..b6628f5e1 100644 --- a/src/mem/ruby/filters/H3BloomFilter.hh +++ b/src/mem/ruby/filters/H3BloomFilter.hh @@ -68,7 +68,7 @@ class H3BloomFilter : public AbstractBloomFilter private: int get_index(Addr addr, int hashNumber); - int hash_H3(uint64 value, int index); + int hash_H3(uint64_t value, int index); std::vector<int> m_filter; int m_filter_size; diff --git a/src/mem/ruby/filters/MultiBitSelBloomFilter.cc b/src/mem/ruby/filters/MultiBitSelBloomFilter.cc index 3cdca7e3b..f326030e9 100644 --- a/src/mem/ruby/filters/MultiBitSelBloomFilter.cc +++ b/src/mem/ruby/filters/MultiBitSelBloomFilter.cc @@ -171,7 +171,7 @@ MultiBitSelBloomFilter::get_index(Addr addr, int i) // m_skip_bits is used to perform BitSelect after skipping some // bits. Used to simulate BitSel hashing on larger than cache-line // granularities - uint64 x = (makeLineAddress(addr) >> m_skip_bits); + uint64_t x = (makeLineAddress(addr) >> m_skip_bits); int y = hash_bitsel(x, i, m_num_hashes, 30, m_filter_size_bits); //36-bit addresses, 6-bit cache lines @@ -183,10 +183,10 @@ MultiBitSelBloomFilter::get_index(Addr addr, int i) } int -MultiBitSelBloomFilter::hash_bitsel(uint64 value, int index, int jump, +MultiBitSelBloomFilter::hash_bitsel(uint64_t value, int index, int jump, int maxBits, int numBits) { - uint64 mask = 1; + uint64_t mask = 1; int result = 0; int bit, i; diff --git a/src/mem/ruby/filters/MultiBitSelBloomFilter.hh b/src/mem/ruby/filters/MultiBitSelBloomFilter.hh index e43dcd6f1..b4fac0671 100644 --- a/src/mem/ruby/filters/MultiBitSelBloomFilter.hh +++ b/src/mem/ruby/filters/MultiBitSelBloomFilter.hh @@ -68,7 +68,7 @@ class MultiBitSelBloomFilter : public AbstractBloomFilter private: int get_index(Addr addr, int hashNumber); - int hash_bitsel(uint64 value, int index, int jump, int maxBits, + int hash_bitsel(uint64_t value, int index, int jump, int maxBits, int numBits); std::vector<int> m_filter; diff --git a/src/mem/ruby/network/MessageBuffer.hh b/src/mem/ruby/network/MessageBuffer.hh index 732b7ec6c..4209aea0f 100644 --- a/src/mem/ruby/network/MessageBuffer.hh +++ b/src/mem/ruby/network/MessageBuffer.hh @@ -184,7 +184,7 @@ class MessageBuffer : public SimObject int m_not_avail_count; // count the # of times I didn't have N // slots available - uint64 m_msg_counter; + uint64_t m_msg_counter; int m_priority_rank; const bool m_strict_fifo; const bool m_randomization; diff --git a/src/mem/ruby/profiler/AccessTraceForAddress.hh b/src/mem/ruby/profiler/AccessTraceForAddress.hh index af42489bc..3e9d54499 100644 --- a/src/mem/ruby/profiler/AccessTraceForAddress.hh +++ b/src/mem/ruby/profiler/AccessTraceForAddress.hh @@ -67,12 +67,12 @@ class AccessTraceForAddress private: Addr m_addr; - uint64 m_loads; - uint64 m_stores; - uint64 m_atomics; - uint64 m_total; - uint64 m_user; - uint64 m_sharing; + uint64_t m_loads; + uint64_t m_stores; + uint64_t m_atomics; + uint64_t m_total; + uint64_t m_user; + uint64_t m_sharing; Set m_touched_by; Histogram* m_histogram_ptr; }; diff --git a/src/mem/ruby/profiler/AddressProfiler.cc b/src/mem/ruby/profiler/AddressProfiler.cc index 0e7ea7e36..52c693330 100644 --- a/src/mem/ruby/profiler/AddressProfiler.cc +++ b/src/mem/ruby/profiler/AddressProfiler.cc @@ -67,7 +67,7 @@ printSorted(ostream& out, int num_of_sequencers, const AddressMap &record_map, { const int records_printed = 100; - uint64 misses = 0; + uint64_t misses = 0; std::vector<const AccessTraceForAddress *> sorted; AddressMap::const_iterator i = record_map.begin(); @@ -95,8 +95,8 @@ printSorted(ostream& out, int num_of_sequencers, const AddressMap &record_map, Histogram all_records_log(-1); // Allows us to track how many lines where touched by n processors - std::vector<int64> m_touched_vec; - std::vector<int64> m_touched_weighted_vec; + std::vector<int64_t> m_touched_vec; + std::vector<int64_t> m_touched_weighted_vec; m_touched_vec.resize(num_of_sequencers+1); m_touched_weighted_vec.resize(num_of_sequencers+1); for (int j = 0; j < m_touched_vec.size(); j++) { diff --git a/src/mem/ruby/profiler/AddressProfiler.hh b/src/mem/ruby/profiler/AddressProfiler.hh index 39544c0a2..ebd44080b 100644 --- a/src/mem/ruby/profiler/AddressProfiler.hh +++ b/src/mem/ruby/profiler/AddressProfiler.hh @@ -75,7 +75,7 @@ class AddressProfiler AddressProfiler(const AddressProfiler& obj); AddressProfiler& operator=(const AddressProfiler& obj); - int64 m_sharing_miss_counter; + int64_t m_sharing_miss_counter; AddressMap m_dataAccessTrace; AddressMap m_macroBlockAccessTrace; diff --git a/src/mem/ruby/profiler/StoreTrace.cc b/src/mem/ruby/profiler/StoreTrace.cc index 40bf2e7b6..c3c1f8a19 100644 --- a/src/mem/ruby/profiler/StoreTrace.cc +++ b/src/mem/ruby/profiler/StoreTrace.cc @@ -33,7 +33,7 @@ using namespace std; bool StoreTrace::s_init = false; // Total number of store lifetimes of // all lines -int64 StoreTrace::s_total_samples = 0; // Total number of store +int64_t StoreTrace::s_total_samples = 0; // Total number of store // lifetimes of all lines Histogram* StoreTrace::s_store_count_ptr = NULL; Histogram* StoreTrace::s_store_first_to_stolen_ptr = NULL; diff --git a/src/mem/ruby/profiler/StoreTrace.hh b/src/mem/ruby/profiler/StoreTrace.hh index 9c1b83cd6..a686594f8 100644 --- a/src/mem/ruby/profiler/StoreTrace.hh +++ b/src/mem/ruby/profiler/StoreTrace.hh @@ -53,7 +53,7 @@ class StoreTrace private: static bool s_init; - static int64 s_total_samples; // Total number of store lifetimes + static int64_t s_total_samples; // Total number of store lifetimes // of all lines static Histogram* s_store_count_ptr; static Histogram* s_store_first_to_stolen_ptr; @@ -66,7 +66,7 @@ class StoreTrace Tick m_last_store; int m_stores_this_interval; - int64 m_total_samples; // Total number of store lifetimes of this line + int64_t m_total_samples; // Total number of store lifetimes of this line Histogram m_store_count; Histogram m_store_first_to_stolen; Histogram m_store_last_to_stolen; diff --git a/src/mem/ruby/structures/AbstractReplacementPolicy.cc b/src/mem/ruby/structures/AbstractReplacementPolicy.cc index fbcce6e2d..d802ecd31 100644 --- a/src/mem/ruby/structures/AbstractReplacementPolicy.cc +++ b/src/mem/ruby/structures/AbstractReplacementPolicy.cc @@ -66,7 +66,7 @@ AbstractReplacementPolicy::~AbstractReplacementPolicy() } Tick -AbstractReplacementPolicy::getLastAccess(int64 set, int64 way) +AbstractReplacementPolicy::getLastAccess(int64_t set, int64_t way) { return m_last_ref_ptr[set][way]; } diff --git a/src/mem/ruby/structures/AbstractReplacementPolicy.hh b/src/mem/ruby/structures/AbstractReplacementPolicy.hh index 03ef0d2fd..c118f3c11 100644 --- a/src/mem/ruby/structures/AbstractReplacementPolicy.hh +++ b/src/mem/ruby/structures/AbstractReplacementPolicy.hh @@ -44,13 +44,13 @@ class AbstractReplacementPolicy : public SimObject virtual ~AbstractReplacementPolicy(); /* touch a block. a.k.a. update timestamp */ - virtual void touch(int64 set, int64 way, Tick time) = 0; + virtual void touch(int64_t set, int64_t way, Tick time) = 0; /* returns the way to replace */ - virtual int64 getVictim(int64 set) const = 0; + virtual int64_t getVictim(int64_t set) const = 0; /* get the time of the last access */ - Tick getLastAccess(int64 set, int64 way); + Tick getLastAccess(int64_t set, int64_t way); virtual bool useOccupancy() const { return false; } diff --git a/src/mem/ruby/structures/BankedArray.cc b/src/mem/ruby/structures/BankedArray.cc index 8bc3cf584..b25962df6 100644 --- a/src/mem/ruby/structures/BankedArray.cc +++ b/src/mem/ruby/structures/BankedArray.cc @@ -49,7 +49,7 @@ BankedArray::BankedArray(unsigned int banks, Cycles accessLatency, } bool -BankedArray::tryAccess(int64 idx) +BankedArray::tryAccess(int64_t idx) { if (accessLatency == 0) return true; @@ -65,7 +65,7 @@ BankedArray::tryAccess(int64 idx) } void -BankedArray::reserve(int64 idx) +BankedArray::reserve(int64_t idx) { if (accessLatency == 0) return; @@ -91,7 +91,7 @@ BankedArray::reserve(int64 idx) } unsigned int -BankedArray::mapIndexToBank(int64 idx) +BankedArray::mapIndexToBank(int64_t idx) { if (banks == 1) { return 0; diff --git a/src/mem/ruby/structures/BankedArray.hh b/src/mem/ruby/structures/BankedArray.hh index 438186944..179676f19 100644 --- a/src/mem/ruby/structures/BankedArray.hh +++ b/src/mem/ruby/structures/BankedArray.hh @@ -51,7 +51,7 @@ class BankedArray { public: AccessRecord() : idx(0), startAccess(0), endAccess(0) {} - int64 idx; + int64_t idx; Tick startAccess; Tick endAccess; }; @@ -60,7 +60,7 @@ class BankedArray // otherwise, schedule the event and wait for it to complete std::vector<AccessRecord> busyBanks; - unsigned int mapIndexToBank(int64 idx); + unsigned int mapIndexToBank(int64_t idx); public: BankedArray(unsigned int banks, Cycles accessLatency, @@ -68,9 +68,9 @@ class BankedArray // Note: We try the access based on the cache index, not the address // This is so we don't get aliasing on blocks being replaced - bool tryAccess(int64 idx); + bool tryAccess(int64_t idx); - void reserve(int64 idx); + void reserve(int64_t idx); Cycles getLatency() const { return accessLatency; } }; diff --git a/src/mem/ruby/structures/CacheMemory.cc b/src/mem/ruby/structures/CacheMemory.cc index bb26ff03c..17c13502d 100644 --- a/src/mem/ruby/structures/CacheMemory.cc +++ b/src/mem/ruby/structures/CacheMemory.cc @@ -98,7 +98,7 @@ CacheMemory::~CacheMemory() } // convert a Address to its location in the cache -int64 +int64_t CacheMemory::addressToCacheSet(Addr address) const { assert(address == makeLineAddress(address)); @@ -109,7 +109,7 @@ CacheMemory::addressToCacheSet(Addr address) const // Given a cache index: returns the index of the tag in a set. // returns -1 if the tag is not found. int -CacheMemory::findTagInSet(int64 cacheSet, Addr tag) const +CacheMemory::findTagInSet(int64_t cacheSet, Addr tag) const { assert(tag == makeLineAddress(tag)); // search the set for the tags @@ -124,7 +124,7 @@ CacheMemory::findTagInSet(int64 cacheSet, Addr tag) const // Given a cache index: returns the index of the tag in a set. // returns -1 if the tag is not found. int -CacheMemory::findTagInSetIgnorePermissions(int64 cacheSet, +CacheMemory::findTagInSetIgnorePermissions(int64_t cacheSet, Addr tag) const { assert(tag == makeLineAddress(tag)); @@ -164,7 +164,7 @@ CacheMemory::tryCacheAccess(Addr address, RubyRequestType type, { assert(address == makeLineAddress(address)); DPRINTF(RubyCache, "address: %s\n", address); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if (loc != -1) { // Do we even have a tag match? @@ -191,7 +191,7 @@ CacheMemory::testCacheAccess(Addr address, RubyRequestType type, { assert(address == makeLineAddress(address)); DPRINTF(RubyCache, "address: %s\n", address); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if (loc != -1) { @@ -213,7 +213,7 @@ bool CacheMemory::isTagPresent(Addr address) const { assert(address == makeLineAddress(address)); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if (loc == -1) { @@ -233,7 +233,7 @@ CacheMemory::cacheAvail(Addr address) const { assert(address == makeLineAddress(address)); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); for (int i = 0; i < m_cache_assoc; i++) { AbstractCacheEntry* entry = m_cache[cacheSet][i]; @@ -259,7 +259,7 @@ CacheMemory::allocate(Addr address, AbstractCacheEntry* entry, bool touch) DPRINTF(RubyCache, "address: %s\n", address); // Find the first open slot - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet]; for (int i = 0; i < m_cache_assoc; i++) { if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) { @@ -287,7 +287,7 @@ CacheMemory::deallocate(Addr address) assert(address == makeLineAddress(address)); assert(isTagPresent(address)); DPRINTF(RubyCache, "address: %s\n", address); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if (loc != -1) { delete m_cache[cacheSet][loc]; @@ -303,7 +303,7 @@ CacheMemory::cacheProbe(Addr address) const assert(address == makeLineAddress(address)); assert(!cacheAvail(address)); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]-> m_Address; } @@ -313,7 +313,7 @@ AbstractCacheEntry* CacheMemory::lookup(Addr address) { assert(address == makeLineAddress(address)); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if(loc == -1) return NULL; return m_cache[cacheSet][loc]; @@ -324,7 +324,7 @@ const AbstractCacheEntry* CacheMemory::lookup(Addr address) const { assert(address == makeLineAddress(address)); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if(loc == -1) return NULL; return m_cache[cacheSet][loc]; @@ -334,7 +334,7 @@ CacheMemory::lookup(Addr address) const void CacheMemory::setMRU(Addr address) { - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); if(loc != -1) @@ -344,9 +344,9 @@ CacheMemory::setMRU(Addr address) void CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const { - uint64 warmedUpBlocks = 0; - uint64 totalBlocks M5_VAR_USED = (uint64)m_cache_num_sets - * (uint64)m_cache_assoc; + uint64_t warmedUpBlocks = 0; + uint64_t totalBlocks M5_VAR_USED = (uint64_t)m_cache_num_sets * + (uint64_t)m_cache_assoc; for (int i = 0; i < m_cache_num_sets; i++) { for (int j = 0; j < m_cache_assoc; j++) { @@ -376,8 +376,7 @@ CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks" "recorded %.2f%% \n", name().c_str(), warmedUpBlocks, - (uint64)m_cache_num_sets * (uint64)m_cache_assoc, - (float(warmedUpBlocks)/float(totalBlocks))*100.0); + totalBlocks, (float(warmedUpBlocks) / float(totalBlocks)) * 100.0); } void @@ -410,7 +409,7 @@ CacheMemory::setLocked(Addr address, int context) { DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context); assert(address == makeLineAddress(address)); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); assert(loc != -1); m_cache[cacheSet][loc]->setLocked(context); @@ -421,7 +420,7 @@ CacheMemory::clearLocked(Addr address) { DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address); assert(address == makeLineAddress(address)); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); assert(loc != -1); m_cache[cacheSet][loc]->clearLocked(); @@ -431,7 +430,7 @@ bool CacheMemory::isLocked(Addr address, int context) { assert(address == makeLineAddress(address)); - int64 cacheSet = addressToCacheSet(address); + int64_t cacheSet = addressToCacheSet(address); int loc = findTagInSet(cacheSet, address); assert(loc != -1); DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n", @@ -594,13 +593,13 @@ CacheMemory::checkResourceAvailable(CacheResourceType res, Addr addr) } bool -CacheMemory::isBlockInvalid(int64 cache_set, int64 loc) +CacheMemory::isBlockInvalid(int64_t cache_set, int64_t loc) { return (m_cache[cache_set][loc]->m_Permission == AccessPermission_Invalid); } bool -CacheMemory::isBlockNotBusy(int64 cache_set, int64 loc) +CacheMemory::isBlockNotBusy(int64_t cache_set, int64_t loc) { return (m_cache[cache_set][loc]->m_Permission != AccessPermission_Busy); } diff --git a/src/mem/ruby/structures/CacheMemory.hh b/src/mem/ruby/structures/CacheMemory.hh index 6c719cb4f..1af446950 100644 --- a/src/mem/ruby/structures/CacheMemory.hh +++ b/src/mem/ruby/structures/CacheMemory.hh @@ -98,8 +98,8 @@ class CacheMemory : public SimObject Cycles getTagLatency() const { return tagArray.getLatency(); } Cycles getDataLatency() const { return dataArray.getLatency(); } - bool isBlockInvalid(int64 cache_set, int64 loc); - bool isBlockNotBusy(int64 cache_set, int64 loc); + bool isBlockInvalid(int64_t cache_set, int64_t loc); + bool isBlockNotBusy(int64_t cache_set, int64_t loc); // Hook for checkpointing the contents of the cache void recordCacheContents(int cntrl, CacheRecorder* tr) const; @@ -149,12 +149,12 @@ class CacheMemory : public SimObject private: // convert a Address to its location in the cache - int64 addressToCacheSet(Addr address) const; + int64_t addressToCacheSet(Addr address) const; // Given a cache tag: returns the index of the tag in a set. // returns -1 if the tag is not found. - int findTagInSet(int64 line, Addr tag) const; - int findTagInSetIgnorePermissions(int64 cacheSet, Addr tag) const; + int findTagInSet(int64_t line, Addr tag) const; + int findTagInSetIgnorePermissions(int64_t cacheSet, Addr tag) const; // Private copy constructor and assignment operator CacheMemory(const CacheMemory& obj); diff --git a/src/mem/ruby/structures/LRUPolicy.cc b/src/mem/ruby/structures/LRUPolicy.cc index a1e3b277e..1c4990291 100644 --- a/src/mem/ruby/structures/LRUPolicy.cc +++ b/src/mem/ruby/structures/LRUPolicy.cc @@ -50,7 +50,7 @@ LRUReplacementPolicyParams::create() void -LRUPolicy::touch(int64 set, int64 index, Tick time) +LRUPolicy::touch(int64_t set, int64_t index, Tick time) { assert(index >= 0 && index < m_assoc); assert(set >= 0 && set < m_num_sets); @@ -58,13 +58,11 @@ LRUPolicy::touch(int64 set, int64 index, Tick time) m_last_ref_ptr[set][index] = time; } -int64 -LRUPolicy::getVictim(int64 set) const +int64_t +LRUPolicy::getVictim(int64_t set) const { Tick time, smallest_time; - int64 smallest_index; - - smallest_index = 0; + int64_t smallest_index = 0; smallest_time = m_last_ref_ptr[set][0]; for (unsigned i = 0; i < m_assoc; i++) { diff --git a/src/mem/ruby/structures/LRUPolicy.hh b/src/mem/ruby/structures/LRUPolicy.hh index 9a9c9e3eb..388718319 100644 --- a/src/mem/ruby/structures/LRUPolicy.hh +++ b/src/mem/ruby/structures/LRUPolicy.hh @@ -41,8 +41,8 @@ class LRUPolicy : public AbstractReplacementPolicy LRUPolicy(const Params * p); ~LRUPolicy(); - void touch(int64 set, int64 way, Tick time); - int64 getVictim(int64 set) const; + void touch(int64_t set, int64_t way, Tick time); + int64_t getVictim(int64_t set) const; }; #endif // __MEM_RUBY_STRUCTURES_LRUPOLICY_HH__ diff --git a/src/mem/ruby/structures/PseudoLRUPolicy.cc b/src/mem/ruby/structures/PseudoLRUPolicy.cc index 8eee0821b..a2b21a625 100644 --- a/src/mem/ruby/structures/PseudoLRUPolicy.cc +++ b/src/mem/ruby/structures/PseudoLRUPolicy.cc @@ -38,7 +38,7 @@ PseudoLRUPolicy::PseudoLRUPolicy(const Params * p) // associativity cannot exceed capacity of tree representation assert(m_num_sets > 0 && m_assoc > 1 && - m_assoc <= (int64) sizeof(uint64)*4); + m_assoc <= (int64_t) sizeof(uint64_t)*4); m_trees = NULL; m_num_levels = 0; @@ -55,7 +55,7 @@ PseudoLRUPolicy::PseudoLRUPolicy(const Params * p) m_num_levels++; } assert(m_num_levels < sizeof(unsigned int)*4); - m_trees = new uint64[m_num_sets]; + m_trees = new uint64_t[m_num_sets]; for (unsigned i = 0; i < m_num_sets; i++) { m_trees[i] = 0; } @@ -75,7 +75,7 @@ PseudoLRUPolicy::~PseudoLRUPolicy() } void -PseudoLRUPolicy::touch(int64 set, int64 index, Tick time) +PseudoLRUPolicy::touch(int64_t set, int64_t index, Tick time) { assert(index >= 0 && index < m_assoc); assert(set >= 0 && set < m_num_sets); @@ -93,10 +93,10 @@ PseudoLRUPolicy::touch(int64 set, int64 index, Tick time) m_last_ref_ptr[set][index] = time; } -int64 -PseudoLRUPolicy::getVictim(int64 set) const +int64_t +PseudoLRUPolicy::getVictim(int64_t set) const { - int64 index = 0; + int64_t index = 0; int tree_index = 0; int node_val; diff --git a/src/mem/ruby/structures/PseudoLRUPolicy.hh b/src/mem/ruby/structures/PseudoLRUPolicy.hh index fc5add8b1..a96c802b2 100644 --- a/src/mem/ruby/structures/PseudoLRUPolicy.hh +++ b/src/mem/ruby/structures/PseudoLRUPolicy.hh @@ -53,13 +53,13 @@ class PseudoLRUPolicy : public AbstractReplacementPolicy PseudoLRUPolicy(const Params * p); ~PseudoLRUPolicy(); - void touch(int64 set, int64 way, Tick time); - int64 getVictim(int64 set) const; + void touch(int64_t set, int64_t way, Tick time); + int64_t getVictim(int64_t set) const; private: unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */ unsigned int m_num_levels; /** number of levels in the tree */ - uint64* m_trees; /** bit representation of the + uint64_t* m_trees; /** bit representation of the * trees, one for each set */ }; diff --git a/src/mem/ruby/structures/RubyMemoryControl.cc b/src/mem/ruby/structures/RubyMemoryControl.cc index 0521aac06..413850627 100644 --- a/src/mem/ruby/structures/RubyMemoryControl.cc +++ b/src/mem/ruby/structures/RubyMemoryControl.cc @@ -176,7 +176,7 @@ void RubyMemoryControl::init() { m_msg_counter = 0; - assert(m_tFaw <= 62); // must fit in a uint64 shift register + assert(m_tFaw <= 62); // must fit in a uint64_t shift register m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel; m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel; @@ -213,7 +213,7 @@ RubyMemoryControl::init() // m_tfaw_count keeps track of how many 1 bits are set // in each shift register. When m_tfaw_count is >= 4, // new activates are not allowed. - m_tfaw_shift = new uint64[m_total_ranks]; + m_tfaw_shift = new uint64_t[m_total_ranks]; m_tfaw_count = new int[m_total_ranks]; for (int i = 0; i < m_total_ranks; i++) { m_tfaw_shift[i] = 0; @@ -236,7 +236,7 @@ RubyMemoryControl::reset() { m_msg_counter = 0; - assert(m_tFaw <= 62); // must fit in a uint64 shift register + assert(m_tFaw <= 62); // must fit in a uint64_t shift register m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel; m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel; diff --git a/src/mem/ruby/structures/RubyMemoryControl.hh b/src/mem/ruby/structures/RubyMemoryControl.hh index c68a2da6c..376ce4d75 100644 --- a/src/mem/ruby/structures/RubyMemoryControl.hh +++ b/src/mem/ruby/structures/RubyMemoryControl.hh @@ -162,11 +162,11 @@ class RubyMemoryControl : public AbstractMemory, public Consumer // Each entry indicates number of address-bus cycles until bank // is reschedulable: - int* m_bankBusyCounter; - int* m_oldRequest; + int *m_bankBusyCounter; + int *m_oldRequest; - uint64* m_tfaw_shift; - int* m_tfaw_count; + uint64_t *m_tfaw_shift; + int *m_tfaw_count; // Each of these indicates number of address-bus cycles until // we can issue a new request of the corresponding type: @@ -182,12 +182,12 @@ class RubyMemoryControl : public AbstractMemory, public Consumer int m_ageCounter; // age of old requests; to detect starvation int m_idleCount; // watchdog timer for shutting down - MemCntrlProfiler* m_profiler_ptr; + MemCntrlProfiler *m_profiler_ptr; class MemCntrlEvent : public Event { public: - MemCntrlEvent(RubyMemoryControl* _mem_cntrl) + MemCntrlEvent(RubyMemoryControl *_mem_cntrl) { mem_cntrl = _mem_cntrl; } diff --git a/src/mem/ruby/system/CacheRecorder.cc b/src/mem/ruby/system/CacheRecorder.cc index a2ac6bdf8..339cf1b4f 100644 --- a/src/mem/ruby/system/CacheRecorder.cc +++ b/src/mem/ruby/system/CacheRecorder.cc @@ -161,13 +161,13 @@ CacheRecorder::addRecord(int cntrl, Addr data_addr, Addr pc_addr, m_records.push_back(rec); } -uint64 -CacheRecorder::aggregateRecords(uint8_t** buf, uint64 total_size) +uint64_t +CacheRecorder::aggregateRecords(uint8_t **buf, uint64_t total_size) { std::sort(m_records.begin(), m_records.end(), compareTraceRecords); int size = m_records.size(); - uint64 current_size = 0; + uint64_t current_size = 0; int record_size = sizeof(TraceRecord) + m_block_size_bytes; for (int i = 0; i < size; ++i) { diff --git a/src/mem/ruby/system/CacheRecorder.hh b/src/mem/ruby/system/CacheRecorder.hh index a4a7261f4..44110cf9f 100644 --- a/src/mem/ruby/system/CacheRecorder.hh +++ b/src/mem/ruby/system/CacheRecorder.hh @@ -77,7 +77,7 @@ class CacheRecorder void addRecord(int cntrl, Addr data_addr, Addr pc_addr, RubyRequestType type, Tick time, DataBlock& data); - uint64 aggregateRecords(uint8_t** data, uint64 size); + uint64_t aggregateRecords(uint8_t **data, uint64_t size); /*! * Function for flushing the memory contents of the caches to the diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc index fd712a47a..6ebdd3ad6 100644 --- a/src/mem/ruby/system/System.cc +++ b/src/mem/ruby/system/System.cc @@ -102,8 +102,8 @@ RubySystem::~RubySystem() void RubySystem::makeCacheRecorder(uint8_t *uncompressed_trace, - uint64 cache_trace_size, - uint64 block_size_bytes) + uint64_t cache_trace_size, + uint64_t block_size_bytes) { vector<Sequencer*> sequencer_map; Sequencer* sequencer_ptr = NULL; @@ -207,7 +207,7 @@ RubySystem::memWriteback() void RubySystem::writeCompressedTrace(uint8_t *raw_data, string filename, - uint64 uncompressed_trace_size) + uint64_t uncompressed_trace_size) { // Create the checkpoint file for the memory string thefile = CheckpointIn::dir() + "/" + filename.c_str(); @@ -240,7 +240,7 @@ RubySystem::serialize(CheckpointOut &cp) const // Store the cache-block size, so we are able to restore on systems with a // different cache-block size. CacheRecorder depends on the correct // cache-block size upon unserializing. - uint64 block_size_bytes = getBlockSizeBytes(); + uint64_t block_size_bytes = getBlockSizeBytes(); SERIALIZE_SCALAR(block_size_bytes); // Check that there's a valid trace to use. If not, then memory won't be @@ -252,7 +252,7 @@ RubySystem::serialize(CheckpointOut &cp) const // Aggregate the trace entries together into a single array uint8_t *raw_data = new uint8_t[4096]; - uint64 cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data, + uint64_t cache_trace_size = m_cache_recorder->aggregateRecords(&raw_data, 4096); string cache_trace_file = name() + ".cache.gz"; writeCompressedTrace(raw_data, cache_trace_file, cache_trace_size); @@ -274,7 +274,7 @@ RubySystem::drainResume() void RubySystem::readCompressedTrace(string filename, uint8_t *&raw_data, - uint64& uncompressed_trace_size) + uint64_t &uncompressed_trace_size) { // Read the trace file gzFile compressedTrace; @@ -311,11 +311,11 @@ RubySystem::unserialize(CheckpointIn &cp) // This value should be set to the checkpoint-system's block-size. // Optional, as checkpoints without it can be run if the // checkpoint-system's block-size == current block-size. - uint64 block_size_bytes = getBlockSizeBytes(); + uint64_t block_size_bytes = getBlockSizeBytes(); UNSERIALIZE_OPT_SCALAR(block_size_bytes); string cache_trace_file; - uint64 cache_trace_size = 0; + uint64_t cache_trace_size = 0; UNSERIALIZE_SCALAR(cache_trace_file); UNSERIALIZE_SCALAR(cache_trace_size); diff --git a/src/mem/ruby/system/System.hh b/src/mem/ruby/system/System.hh index a7afac45c..210a633e8 100644 --- a/src/mem/ruby/system/System.hh +++ b/src/mem/ruby/system/System.hh @@ -119,14 +119,14 @@ class RubySystem : public ClockedObject RubySystem& operator=(const RubySystem& obj); void makeCacheRecorder(uint8_t *uncompressed_trace, - uint64 cache_trace_size, - uint64 block_size_bytes); + uint64_t cache_trace_size, + uint64_t block_size_bytes); static void readCompressedTrace(std::string filename, uint8_t *&raw_data, - uint64& uncompressed_trace_size); + uint64_t &uncompressed_trace_size); static void writeCompressedTrace(uint8_t *raw_data, std::string file, - uint64 uncompressed_trace_size); + uint64_t uncompressed_trace_size); private: // configuration parameters diff --git a/src/mem/slicc/symbols/StateMachine.py b/src/mem/slicc/symbols/StateMachine.py index 03c78c8bf..e22c53fe8 100644 --- a/src/mem/slicc/symbols/StateMachine.py +++ b/src/mem/slicc/symbols/StateMachine.py @@ -320,9 +320,9 @@ class $c_ident : public AbstractController void countTransition(${ident}_State state, ${ident}_Event event); void possibleTransition(${ident}_State state, ${ident}_Event event); - uint64 getEventCount(${ident}_Event event); + uint64_t getEventCount(${ident}_Event event); bool isPossible(${ident}_State state, ${ident}_Event event); - uint64 getTransitionCount(${ident}_State state, ${ident}_Event event); + uint64_t getTransitionCount(${ident}_State state, ${ident}_Event event); private: ''') @@ -802,7 +802,7 @@ $c_ident::possibleTransition(${ident}_State state, m_possible[state][event] = true; } -uint64 +uint64_t $c_ident::getEventCount(${ident}_Event event) { return m_event_counters[event]; @@ -814,7 +814,7 @@ $c_ident::isPossible(${ident}_State state, ${ident}_Event event) return m_possible[state][event]; } -uint64 +uint64_t $c_ident::getTransitionCount(${ident}_State state, ${ident}_Event event) { |