diff options
author | Nathan Binkert <nate@binkert.org> | 2010-06-10 23:17:07 -0700 |
---|---|---|
committer | Nathan Binkert <nate@binkert.org> | 2010-06-10 23:17:07 -0700 |
commit | 3df84fd8a0ce3959c0deb4c206d910fc0d050f47 (patch) | |
tree | e9532570ee56986f92c40511f1fc83991d6691c9 /src/mem/ruby/system | |
parent | 006818aeea6176c4500c5f7414e9f2a822c77062 (diff) | |
download | gem5-3df84fd8a0ce3959c0deb4c206d910fc0d050f47.tar.xz |
ruby: get rid of the Map class
Diffstat (limited to 'src/mem/ruby/system')
-rw-r--r-- | src/mem/ruby/system/MemoryControl.cc | 1 | ||||
-rw-r--r-- | src/mem/ruby/system/MemoryControl.hh | 1 | ||||
-rw-r--r-- | src/mem/ruby/system/PerfectCacheMemory.hh | 16 | ||||
-rw-r--r-- | src/mem/ruby/system/PersistentTable.cc | 104 | ||||
-rw-r--r-- | src/mem/ruby/system/PersistentTable.hh | 5 | ||||
-rw-r--r-- | src/mem/ruby/system/Sequencer.cc | 158 | ||||
-rw-r--r-- | src/mem/ruby/system/Sequencer.hh | 8 | ||||
-rw-r--r-- | src/mem/ruby/system/TBETable.hh | 20 | ||||
-rw-r--r-- | src/mem/ruby/system/TimerTable.cc | 39 | ||||
-rw-r--r-- | src/mem/ruby/system/TimerTable.hh | 7 |
10 files changed, 203 insertions, 156 deletions
diff --git a/src/mem/ruby/system/MemoryControl.cc b/src/mem/ruby/system/MemoryControl.cc index 5c455049e..91260a907 100644 --- a/src/mem/ruby/system/MemoryControl.cc +++ b/src/mem/ruby/system/MemoryControl.cc @@ -105,7 +105,6 @@ */ #include "base/cprintf.hh" -#include "mem/gems_common/Map.hh" #include "mem/ruby/common/Address.hh" #include "mem/ruby/common/Consumer.hh" #include "mem/ruby/common/Global.hh" diff --git a/src/mem/ruby/system/MemoryControl.hh b/src/mem/ruby/system/MemoryControl.hh index 839fd007c..0d5e2c38e 100644 --- a/src/mem/ruby/system/MemoryControl.hh +++ b/src/mem/ruby/system/MemoryControl.hh @@ -33,7 +33,6 @@ #include <list> #include <string> -#include "mem/gems_common/Map.hh" #include "mem/protocol/MemoryMsg.hh" #include "mem/ruby/common/Address.hh" #include "mem/ruby/common/Consumer.hh" diff --git a/src/mem/ruby/system/PerfectCacheMemory.hh b/src/mem/ruby/system/PerfectCacheMemory.hh index e1d816923..823dd7071 100644 --- a/src/mem/ruby/system/PerfectCacheMemory.hh +++ b/src/mem/ruby/system/PerfectCacheMemory.hh @@ -29,7 +29,7 @@ #ifndef __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__ #define __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__ -#include "mem/gems_common/Map.hh" +#include "base/hashmap.hh" #include "mem/protocol/AccessPermission.hh" #include "mem/ruby/common/Address.hh" #include "mem/ruby/common/Global.hh" @@ -94,7 +94,7 @@ class PerfectCacheMemory PerfectCacheMemory& operator=(const PerfectCacheMemory& obj); // Data Members (m_prefix) - Map<Address, PerfectCacheLineState<ENTRY> > m_map; + m5::hash_map<Address, PerfectCacheLineState<ENTRY> > m_map; }; template<class ENTRY> @@ -131,7 +131,7 @@ template<class ENTRY> inline bool PerfectCacheMemory<ENTRY>::isTagPresent(const Address& address) const { - return m_map.exist(line_address(address)); + return m_map.count(line_address(address)) > 0; } template<class ENTRY> @@ -150,7 +150,7 @@ PerfectCacheMemory<ENTRY>::allocate(const Address& address) PerfectCacheLineState<ENTRY> line_state; line_state.m_permission = AccessPermission_Busy; line_state.m_entry = ENTRY(); - m_map.add(line_address(address), line_state); + m_map[line_address(address)] = line_state; } // deallocate entry @@ -174,7 +174,7 @@ template<class ENTRY> inline ENTRY& PerfectCacheMemory<ENTRY>::lookup(const Address& address) { - return m_map.lookup(line_address(address)).m_entry; + return m_map[line_address(address)].m_entry; } // looks an address up in the cache @@ -182,14 +182,14 @@ template<class ENTRY> inline const ENTRY& PerfectCacheMemory<ENTRY>::lookup(const Address& address) const { - return m_map.lookup(line_address(address)).m_entry; + return m_map[line_address(address)].m_entry; } template<class ENTRY> inline AccessPermission PerfectCacheMemory<ENTRY>::getPermission(const Address& address) const { - return m_map.lookup(line_address(address)).m_permission; + return m_map[line_address(address)].m_permission; } template<class ENTRY> @@ -199,7 +199,7 @@ PerfectCacheMemory<ENTRY>::changePermission(const Address& address, { Address line_address = address; line_address.makeLineAddress(); - PerfectCacheLineState<ENTRY>& line_state = m_map.lookup(line_address); + PerfectCacheLineState<ENTRY>& line_state = m_map[line_address]; AccessPermission old_perm = line_state.m_permission; line_state.m_permission = new_perm; } diff --git a/src/mem/ruby/system/PersistentTable.cc b/src/mem/ruby/system/PersistentTable.cc index a8e6b0323..c60d39b8a 100644 --- a/src/mem/ruby/system/PersistentTable.cc +++ b/src/mem/ruby/system/PersistentTable.cc @@ -40,13 +40,10 @@ int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, PersistentTable::PersistentTable() { - m_map_ptr = new Map<Address, PersistentTableEntry>; } PersistentTable::~PersistentTable() { - delete m_map_ptr; - m_map_ptr = NULL; } void @@ -63,28 +60,25 @@ PersistentTable::persistentRequestLock(const Address& address, #endif assert(address == line_address(address)); - if (!m_map_ptr->exist(address)) { - // Allocate if not present - PersistentTableEntry entry; - entry.m_starving.add(locker); - if (type == AccessType_Write) { - entry.m_request_to_write.add(locker); - } - m_map_ptr->add(address, entry); - } else { - PersistentTableEntry& entry = m_map_ptr->lookup(address); - // + static const PersistentTableEntry dflt; + pair<AddressMap::iterator, bool> r = + m_map.insert(AddressMap::value_type(address, dflt)); + bool present = !r.second; + AddressMap::iterator i = r.first; + PersistentTableEntry &entry = i->second; + + if (present) { // Make sure we're not already in the locked set - // assert(!(entry.m_starving.isElement(locker))); + } - entry.m_starving.add(locker); - if (type == AccessType_Write) { - entry.m_request_to_write.add(locker); - } + entry.m_starving.add(locker); + if (type == AccessType_Write) + entry.m_request_to_write.add(locker); + + if (present) assert(entry.m_marked.isSubset(entry.m_starving)); - } } void @@ -100,8 +94,8 @@ PersistentTable::persistentRequestUnlock(const Address& address, #endif assert(address == line_address(address)); - assert(m_map_ptr->exist(address)); - PersistentTableEntry& entry = m_map_ptr->lookup(address); + assert(m_map.count(address)); + PersistentTableEntry& entry = m_map[address]; // // Make sure we're in the locked set @@ -116,7 +110,7 @@ PersistentTable::persistentRequestUnlock(const Address& address, // Deallocate if empty if (entry.m_starving.isEmpty()) { assert(entry.m_marked.isEmpty()); - m_map_ptr->erase(address); + m_map.erase(address); } } @@ -125,24 +119,31 @@ PersistentTable::okToIssueStarving(const Address& address, MachineID machId) const { assert(address == line_address(address)); - if (!m_map_ptr->exist(address)) { + + AddressMap::const_iterator i = m_map.find(address); + if (i == m_map.end()) { // No entry present return true; - } else if (m_map_ptr->lookup(address).m_starving.isElement(machId)) { + } + + const PersistentTableEntry &entry = i->second; + + if (entry.m_starving.isElement(machId)) { // We can't issue another lockdown until are previous unlock // has occurred return false; - } else { - return m_map_ptr->lookup(address).m_marked.isEmpty(); } + + return entry.m_marked.isEmpty(); } MachineID PersistentTable::findSmallest(const Address& address) const { assert(address == line_address(address)); - assert(m_map_ptr->exist(address)); - const PersistentTableEntry& entry = m_map_ptr->lookup(address); + AddressMap::const_iterator i = m_map.find(address); + assert(i != m_map.end()); + const PersistentTableEntry& entry = i->second; return entry.m_starving.smallestElement(); } @@ -150,8 +151,9 @@ AccessType PersistentTable::typeOfSmallest(const Address& address) const { assert(address == line_address(address)); - assert(m_map_ptr->exist(address)); - const PersistentTableEntry& entry = m_map_ptr->lookup(address); + AddressMap::const_iterator i = m_map.find(address); + assert(i != m_map.end()); + const PersistentTableEntry& entry = i->second; if (entry.m_request_to_write. isElement(entry.m_starving.smallestElement())) { return AccessType_Write; @@ -164,15 +166,17 @@ void PersistentTable::markEntries(const Address& address) { assert(address == line_address(address)); - if (m_map_ptr->exist(address)) { - PersistentTableEntry& entry = m_map_ptr->lookup(address); + AddressMap::iterator i = m_map.find(address); + if (i == m_map.end()) + return; - // None should be marked - assert(entry.m_marked.isEmpty()); + PersistentTableEntry& entry = i->second; - // Mark all the nodes currently in the table - entry.m_marked = entry.m_starving; - } + // None should be marked + assert(entry.m_marked.isEmpty()); + + // Mark all the nodes currently in the table + entry.m_marked = entry.m_starving; } bool @@ -181,29 +185,31 @@ PersistentTable::isLocked(const Address& address) const assert(address == line_address(address)); // If an entry is present, it must be locked - return m_map_ptr->exist(address); + return m_map.count(address) > 0; } int PersistentTable::countStarvingForAddress(const Address& address) const { - if (m_map_ptr->exist(address)) { - PersistentTableEntry& entry = m_map_ptr->lookup(address); - return (entry.m_starving.count()); - } else { + assert(address == line_address(address)); + AddressMap::const_iterator i = m_map.find(address); + if (i == m_map.end()) return 0; - } + + const PersistentTableEntry& entry = i->second; + return entry.m_starving.count(); } int PersistentTable::countReadStarvingForAddress(const Address& address) const { - if (m_map_ptr->exist(address)) { - PersistentTableEntry& entry = m_map_ptr->lookup(address); - return (entry.m_starving.count() - entry.m_request_to_write.count()); - } else { + assert(address == line_address(address)); + AddressMap::const_iterator i = m_map.find(address); + if (i == m_map.end()) return 0; - } + + const PersistentTableEntry& entry = i->second; + return entry.m_starving.count() - entry.m_request_to_write.count(); } void diff --git a/src/mem/ruby/system/PersistentTable.hh b/src/mem/ruby/system/PersistentTable.hh index 667d68dcb..356406cbd 100644 --- a/src/mem/ruby/system/PersistentTable.hh +++ b/src/mem/ruby/system/PersistentTable.hh @@ -31,7 +31,7 @@ #include <iostream> -#include "mem/gems_common/Map.hh" +#include "base/hashmap.hh" #include "mem/protocol/AccessType.hh" #include "mem/ruby/common/Address.hh" #include "mem/ruby/common/Global.hh" @@ -79,7 +79,8 @@ class PersistentTable PersistentTable& operator=(const PersistentTable& obj); // Data Members (m_prefix) - Map<Address, PersistentTableEntry>* m_map_ptr; + typedef m5::hash_map<Address, PersistentTableEntry> AddressMap; + AddressMap m_map; }; inline std::ostream& diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc index 9ba150f11..87cf59a44 100644 --- a/src/mem/ruby/system/Sequencer.cc +++ b/src/mem/ruby/system/Sequencer.cc @@ -28,7 +28,6 @@ #include "base/str.hh" #include "cpu/rubytest/RubyTester.hh" -#include "mem/gems_common/Map.hh" #include "mem/protocol/CacheMsg.hh" #include "mem/protocol/Protocol.hh" #include "mem/protocol/Protocol.hh" @@ -92,35 +91,39 @@ Sequencer::wakeup() // Check across all outstanding requests int total_outstanding = 0; - std::vector<Address> keys = m_readRequestTable.keys(); - for (int i = 0; i < keys.size(); i++) { - SequencerRequest* request = m_readRequestTable.lookup(keys[i]); - if (current_time - request->issue_time >= m_deadlock_threshold) { - WARN_MSG("Possible Deadlock detected"); - WARN_EXPR(request); - WARN_EXPR(m_version); - WARN_EXPR(request->ruby_request.paddr); - WARN_EXPR(keys.size()); - WARN_EXPR(current_time); - WARN_EXPR(request->issue_time); - WARN_EXPR(current_time - request->issue_time); - ERROR_MSG("Aborting"); - } + RequestTable::iterator read = m_readRequestTable.begin(); + RequestTable::iterator read_end = m_readRequestTable.end(); + for (; read != read_end; ++read) { + SequencerRequest* request = read->second; + if (current_time - request->issue_time < m_deadlock_threshold) + continue; + + WARN_MSG("Possible Deadlock detected"); + WARN_EXPR(request); + WARN_EXPR(m_version); + WARN_EXPR(request->ruby_request.paddr); + WARN_EXPR(m_readRequestTable.size()); + WARN_EXPR(current_time); + WARN_EXPR(request->issue_time); + WARN_EXPR(current_time - request->issue_time); + ERROR_MSG("Aborting"); } - keys = m_writeRequestTable.keys(); - for (int i = 0; i < keys.size(); i++) { - SequencerRequest* request = m_writeRequestTable.lookup(keys[i]); - if (current_time - request->issue_time >= m_deadlock_threshold) { - WARN_MSG("Possible Deadlock detected"); - WARN_EXPR(request); - WARN_EXPR(m_version); - WARN_EXPR(current_time); - WARN_EXPR(request->issue_time); - WARN_EXPR(current_time - request->issue_time); - WARN_EXPR(keys.size()); - ERROR_MSG("Aborting"); - } + RequestTable::iterator write = m_writeRequestTable.begin(); + RequestTable::iterator write_end = m_writeRequestTable.end(); + for (; write != write_end; ++write) { + SequencerRequest* request = write->second; + if (current_time - request->issue_time < m_deadlock_threshold) + continue; + + WARN_MSG("Possible Deadlock detected"); + WARN_EXPR(request); + WARN_EXPR(m_version); + WARN_EXPR(current_time); + WARN_EXPR(request->issue_time); + WARN_EXPR(current_time - request->issue_time); + WARN_EXPR(m_writeRequestTable.size()); + ERROR_MSG("Aborting"); } total_outstanding += m_writeRequestTable.size(); @@ -160,13 +163,14 @@ Sequencer::printProgress(ostream& out) const out << "---------------" << endl; out << "outstanding requests" << endl; - std::vector<Address> rkeys = m_readRequestTable.keys(); - int read_size = rkeys.size(); - out << "proc " << m_version << " Read Requests = " << read_size << endl; + out << "proc " << m_Read + << " version Requests = " << m_readRequestTable.size() << endl; // print the request table - for (int i = 0; i < read_size; ++i) { - SequencerRequest *request = m_readRequestTable.lookup(rkeys[i]); + RequestTable::iterator read = m_readRequestTable.begin(); + RequestTable::iterator read_end = m_readRequestTable.end(); + for (; read != read_end; ++read) { + SequencerRequest* request = read->second; out << "\tRequest[ " << i << " ] = " << request->type << " Address " << rkeys[i] << " Posted " << request->issue_time @@ -174,13 +178,14 @@ Sequencer::printProgress(ostream& out) const total_demand++; } - std::vector<Address> wkeys = m_writeRequestTable.keys(); - int write_size = wkeys.size(); - out << "proc " << m_version << " Write Requests = " << write_size << endl; + out << "proc " << m_version + << " Write Requests = " << m_writeRequestTable.size << endl; // print the request table - for (int i = 0; i < write_size; ++i){ - CacheMsg &request = m_writeRequestTable.lookup(wkeys[i]); + RequestTable::iterator write = m_writeRequestTable.begin(); + RequestTable::iterator write_end = m_writeRequestTable.end(); + for (; write != write_end; ++write) { + SequencerRequest* request = write->second; out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() @@ -231,26 +236,32 @@ Sequencer::insertRequest(SequencerRequest* request) (request->ruby_request.type == RubyRequestType_RMW_Write) || (request->ruby_request.type == RubyRequestType_Locked_Read) || (request->ruby_request.type == RubyRequestType_Locked_Write)) { - if (m_writeRequestTable.exist(line_addr)) { - m_writeRequestTable.lookup(line_addr) = request; + pair<RequestTable::iterator, bool> r = + m_writeRequestTable.insert(RequestTable::value_type(line_addr, 0)); + bool success = r.second; + RequestTable::iterator i = r.first; + if (!success) { + i->second = request; // return true; // drh5: isn't this an error? do you lose the initial request? assert(0); } - m_writeRequestTable.allocate(line_addr); - m_writeRequestTable.lookup(line_addr) = request; + i->second = request; m_outstanding_count++; } else { - if (m_readRequestTable.exist(line_addr)) { - m_readRequestTable.lookup(line_addr) = request; + pair<RequestTable::iterator, bool> r = + m_readRequestTable.insert(RequestTable::value_type(line_addr, 0)); + bool success = r.second; + RequestTable::iterator i = r.first; + if (!success) { + i->second = request; // return true; // drh5: isn't this an error? do you lose the initial request? assert(0); } - m_readRequestTable.allocate(line_addr); - m_readRequestTable.lookup(line_addr) = request; + i->second = request; m_outstanding_count++; } @@ -263,6 +274,14 @@ Sequencer::insertRequest(SequencerRequest* request) } void +Sequencer::markRemoved() +{ + m_outstanding_count--; + assert(m_outstanding_count == + m_writeRequestTable.size() + m_readRequestTable.size()); +} + +void Sequencer::removeRequest(SequencerRequest* srequest) { assert(m_outstanding_count == @@ -276,24 +295,26 @@ Sequencer::removeRequest(SequencerRequest* srequest) (ruby_request.type == RubyRequestType_RMW_Write) || (ruby_request.type == RubyRequestType_Locked_Read) || (ruby_request.type == RubyRequestType_Locked_Write)) { - m_writeRequestTable.deallocate(line_addr); + m_writeRequestTable.erase(line_addr); } else { - m_readRequestTable.deallocate(line_addr); + m_readRequestTable.erase(line_addr); } - m_outstanding_count--; - assert(m_outstanding_count == m_writeRequestTable.size() + m_readRequestTable.size()); + markRemoved(); } void Sequencer::writeCallback(const Address& address, DataBlock& data) { assert(address == line_address(address)); - assert(m_writeRequestTable.exist(line_address(address))); + assert(m_writeRequestTable.count(line_address(address))); - SequencerRequest* request = m_writeRequestTable.lookup(address); + RequestTable::iterator i = m_writeRequestTable.find(address); + assert(i != m_writeRequestTable.end()); + SequencerRequest* request = i->second; - removeRequest(request); + m_writeRequestTable.erase(i); + markRemoved(); assert((request->ruby_request.type == RubyRequestType_ST) || (request->ruby_request.type == RubyRequestType_RMW_Read) || @@ -316,10 +337,14 @@ void Sequencer::readCallback(const Address& address, DataBlock& data) { assert(address == line_address(address)); - assert(m_readRequestTable.exist(line_address(address))); + assert(m_readRequestTable.count(line_address(address))); - SequencerRequest* request = m_readRequestTable.lookup(address); - removeRequest(request); + RequestTable::iterator i = m_readRequestTable.find(address); + assert(i != m_readRequestTable.end()); + SequencerRequest* request = i->second; + + m_readRequestTable.erase(i); + markRemoved(); assert((request->ruby_request.type == RubyRequestType_LD) || (request->ruby_request.type == RubyRequestType_RMW_Read) || @@ -409,9 +434,9 @@ RequestStatus Sequencer::getRequestStatus(const RubyRequest& request) { bool is_outstanding_store = - m_writeRequestTable.exist(line_address(Address(request.paddr))); + !!m_writeRequestTable.count(line_address(Address(request.paddr))); bool is_outstanding_load = - m_readRequestTable.exist(line_address(Address(request.paddr))); + !!m_readRequestTable.count(line_address(Address(request.paddr))); if (is_outstanding_store) { if ((request.type == RubyRequestType_LD) || (request.type == RubyRequestType_IFETCH) || @@ -441,7 +466,7 @@ Sequencer::getRequestStatus(const RubyRequest& request) bool Sequencer::empty() const { - return m_writeRequestTable.size() == 0 && m_readRequestTable.size() == 0; + return m_writeRequestTable.empty() && m_readRequestTable.empty(); } RequestStatus @@ -580,6 +605,21 @@ Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type, } #endif +template <class KEY, class VALUE> +std::ostream & +operator<<(ostream &out, const m5::hash_map<KEY, VALUE> &map) +{ + typename m5::hash_map<KEY, VALUE>::const_iterator i = map.begin(); + typename m5::hash_map<KEY, VALUE>::const_iterator end = map.end(); + + out << "["; + for (; i != end; ++i) + out << " " << i->first << "=" << i->second; + out << " ]"; + + return out; +} + void Sequencer::print(ostream& out) const { diff --git a/src/mem/ruby/system/Sequencer.hh b/src/mem/ruby/system/Sequencer.hh index a5b2dd544..a336751fd 100644 --- a/src/mem/ruby/system/Sequencer.hh +++ b/src/mem/ruby/system/Sequencer.hh @@ -31,7 +31,7 @@ #include <iostream> -#include "mem/gems_common/Map.hh" +#include "base/hashmap.hh" #include "mem/protocol/AccessModeType.hh" #include "mem/protocol/CacheRequestType.hh" #include "mem/protocol/GenericMachineType.hh" @@ -85,6 +85,7 @@ class Sequencer : public RubyPort, public Consumer void printStats(std::ostream& out) const; void checkCoherence(const Address& address); + void markRemoved(); void removeRequest(SequencerRequest* request); private: @@ -108,8 +109,9 @@ class Sequencer : public RubyPort, public Consumer CacheMemory* m_dataCache_ptr; CacheMemory* m_instCache_ptr; - Map<Address, SequencerRequest*> m_writeRequestTable; - Map<Address, SequencerRequest*> m_readRequestTable; + typedef m5::hash_map<Address, SequencerRequest*> RequestTable; + RequestTable m_writeRequestTable; + RequestTable m_readRequestTable; // Global outstanding request count, across all request tables int m_outstanding_count; bool m_deadlock_check_scheduled; diff --git a/src/mem/ruby/system/TBETable.hh b/src/mem/ruby/system/TBETable.hh index fa1e6c8dd..da33cc9d2 100644 --- a/src/mem/ruby/system/TBETable.hh +++ b/src/mem/ruby/system/TBETable.hh @@ -31,7 +31,7 @@ #include <iostream> -#include "mem/gems_common/Map.hh" +#include "base/hashmap.hh" #include "mem/ruby/common/Address.hh" #include "mem/ruby/common/Global.hh" #include "mem/ruby/profiler/Profiler.hh" @@ -73,7 +73,7 @@ class TBETable TBETable& operator=(const TBETable& obj); // Data Members (m_prefix) - Map<Address, ENTRY> m_map; + m5::hash_map<Address, ENTRY> m_map; private: int m_number_of_TBEs; @@ -94,23 +94,23 @@ TBETable<ENTRY>::isPresent(const Address& address) const { assert(address == line_address(address)); assert(m_map.size() <= m_number_of_TBEs); - return m_map.exist(address); + return !!m_map.count(address); } template<class ENTRY> inline void TBETable<ENTRY>::allocate(const Address& address) { - assert(isPresent(address) == false); + assert(!isPresent(address)); assert(m_map.size() < m_number_of_TBEs); - m_map.add(address, ENTRY()); + m_map[address] = ENTRY(); } template<class ENTRY> inline void TBETable<ENTRY>::deallocate(const Address& address) { - assert(isPresent(address) == true); + assert(isPresent(address)); assert(m_map.size() > 0); m_map.erase(address); } @@ -120,8 +120,8 @@ template<class ENTRY> inline ENTRY& TBETable<ENTRY>::lookup(const Address& address) { - assert(isPresent(address) == true); - return m_map.lookup(address); + assert(isPresent(address)); + return m_map.find(address)->second; } // looks an address up in the cache @@ -129,8 +129,8 @@ template<class ENTRY> inline const ENTRY& TBETable<ENTRY>::lookup(const Address& address) const { - assert(isPresent(address) == true); - return m_map.lookup(address); + assert(isPresent(address)); + return m_map.find(address)->second; } template<class ENTRY> diff --git a/src/mem/ruby/system/TimerTable.cc b/src/mem/ruby/system/TimerTable.cc index 1b5545c96..d5df8fe18 100644 --- a/src/mem/ruby/system/TimerTable.cc +++ b/src/mem/ruby/system/TimerTable.cc @@ -41,9 +41,8 @@ TimerTable::TimerTable() bool TimerTable::isReady() const { - if (m_map.size() == 0) { + if (m_map.empty()) return false; - } if (!m_next_valid) { updateNext(); @@ -69,9 +68,9 @@ TimerTable::set(const Address& address, Time relative_latency) { assert(address == line_address(address)); assert(relative_latency > 0); - assert(m_map.exist(address) == false); + assert(!m_map.count(address)); Time ready_time = g_eventQueue_ptr->getTime() + relative_latency; - m_map.add(address, ready_time); + m_map[address] = ready_time; assert(m_consumer_ptr != NULL); g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, ready_time); m_next_valid = false; @@ -86,8 +85,8 @@ void TimerTable::unset(const Address& address) { assert(address == line_address(address)); - assert(m_map.exist(address) == true); - m_map.remove(address); + assert(m_map.count(address)); + m_map.erase(address); // Don't always recalculate the next ready address if (address == m_next_address) { @@ -103,24 +102,24 @@ TimerTable::print(std::ostream& out) const void TimerTable::updateNext() const { - if (m_map.size() == 0) { - assert(m_next_valid == false); + if (m_map.empty()) { + assert(!m_next_valid); return; } - std::vector<Address> addresses = m_map.keys(); - m_next_address = addresses[0]; - m_next_time = m_map.lookup(m_next_address); - - // Search for the minimum time - int size = addresses.size(); - for (int i=1; i<size; i++) { - Address maybe_next_address = addresses[i]; - Time maybe_next_time = m_map.lookup(maybe_next_address); - if (maybe_next_time < m_next_time) { - m_next_time = maybe_next_time; - m_next_address= maybe_next_address; + AddressMap::const_iterator i = m_map.begin(); + AddressMap::const_iterator end = m_map.end(); + + m_next_address = i->first; + m_next_time = i->second; + ++i; + + for (; i != end; ++i) { + if (i->second < m_next_time) { + m_next_address = i->first; + m_next_time = i->second; } } + m_next_valid = true; } diff --git a/src/mem/ruby/system/TimerTable.hh b/src/mem/ruby/system/TimerTable.hh index 4c1d2afa3..f78d93956 100644 --- a/src/mem/ruby/system/TimerTable.hh +++ b/src/mem/ruby/system/TimerTable.hh @@ -33,7 +33,7 @@ #include <iostream> #include <string> -#include "mem/gems_common/Map.hh" +#include "base/hashmap.hh" #include "mem/ruby/common/Address.hh" #include "mem/ruby/common/Global.hh" @@ -61,7 +61,7 @@ class TimerTable bool isReady() const; const Address& readyAddress() const; - bool isSet(const Address& address) const { return m_map.exist(address); } + bool isSet(const Address& address) const { return !!m_map.count(address); } void set(const Address& address, Time relative_latency); void unset(const Address& address); void print(std::ostream& out) const; @@ -74,7 +74,8 @@ class TimerTable TimerTable& operator=(const TimerTable& obj); // Data Members (m_prefix) - Map<Address, Time> m_map; + typedef m5::hash_map<Address, Time> AddressMap; + AddressMap m_map; mutable bool m_next_valid; mutable Time m_next_time; // Only valid if m_next_valid is true mutable Address m_next_address; // Only valid if m_next_valid is true |