summaryrefslogtreecommitdiff
path: root/src/mem/ruby/system
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem/ruby/system')
-rw-r--r--src/mem/ruby/system/AbstractMemOrCache.hh42
-rw-r--r--src/mem/ruby/system/AbstractReplacementPolicy.hh62
-rw-r--r--src/mem/ruby/system/CacheMemory.hh566
-rw-r--r--src/mem/ruby/system/DirectoryMemory.cc176
-rw-r--r--src/mem/ruby/system/DirectoryMemory.hh94
-rw-r--r--src/mem/ruby/system/LRUPolicy.hh65
-rw-r--r--src/mem/ruby/system/MachineID.hh89
-rw-r--r--src/mem/ruby/system/MemoryControl.cc631
-rw-r--r--src/mem/ruby/system/MemoryControl.hh176
-rw-r--r--src/mem/ruby/system/MemoryNode.cc37
-rw-r--r--src/mem/ruby/system/MemoryNode.hh90
-rw-r--r--src/mem/ruby/system/NodeID.hh50
-rw-r--r--src/mem/ruby/system/NodePersistentTable.cc193
-rw-r--r--src/mem/ruby/system/NodePersistentTable.hh99
-rw-r--r--src/mem/ruby/system/PerfectCacheMemory.hh238
-rw-r--r--src/mem/ruby/system/PersistentArbiter.cc165
-rw-r--r--src/mem/ruby/system/PersistentArbiter.hh107
-rw-r--r--src/mem/ruby/system/PersistentTable.cc194
-rw-r--r--src/mem/ruby/system/PersistentTable.hh99
-rw-r--r--src/mem/ruby/system/PseudoLRUPolicy.hh110
-rw-r--r--src/mem/ruby/system/SConscript45
-rw-r--r--src/mem/ruby/system/Sequencer.cc960
-rw-r--r--src/mem/ruby/system/Sequencer.hh169
-rw-r--r--src/mem/ruby/system/StoreBuffer.cc302
-rw-r--r--src/mem/ruby/system/StoreBuffer.hh121
-rw-r--r--src/mem/ruby/system/StoreCache.cc178
-rw-r--r--src/mem/ruby/system/StoreCache.hh85
-rw-r--r--src/mem/ruby/system/System.cc270
-rw-r--r--src/mem/ruby/system/System.hh138
-rw-r--r--src/mem/ruby/system/TBETable.hh165
-rw-r--r--src/mem/ruby/system/TimerTable.cc129
-rw-r--r--src/mem/ruby/system/TimerTable.hh98
32 files changed, 5943 insertions, 0 deletions
diff --git a/src/mem/ruby/system/AbstractMemOrCache.hh b/src/mem/ruby/system/AbstractMemOrCache.hh
new file mode 100644
index 000000000..8e214c74b
--- /dev/null
+++ b/src/mem/ruby/system/AbstractMemOrCache.hh
@@ -0,0 +1,42 @@
+
+/*
+ * AbstractMemOrCache.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef ABSTRACT_MEM_OR_CACHE_H
+#define ABSTRACT_MEM_OR_CACHE_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+
+class AbstractMemOrCache {
+public:
+
+ virtual ~AbstractMemOrCache() {};
+ virtual void setConsumer(Consumer* consumer_ptr) = 0;
+ virtual Consumer* getConsumer() = 0;
+
+ virtual void enqueue (const MsgPtr& message, int latency ) = 0;
+ virtual void enqueueMemRef (MemoryNode& memRef) = 0;
+ virtual void dequeue () = 0;
+ virtual const Message* peek () = 0;
+ virtual bool isReady () = 0;
+ virtual MemoryNode peekNode () = 0;
+ virtual bool areNSlotsAvailable (int n) = 0;
+ virtual void printConfig (ostream& out) = 0;
+ virtual void print (ostream& out) const = 0;
+ virtual void setDebug (int debugFlag) = 0;
+
+private:
+
+};
+
+
+#endif
+
diff --git a/src/mem/ruby/system/AbstractReplacementPolicy.hh b/src/mem/ruby/system/AbstractReplacementPolicy.hh
new file mode 100644
index 000000000..e8b504b8a
--- /dev/null
+++ b/src/mem/ruby/system/AbstractReplacementPolicy.hh
@@ -0,0 +1,62 @@
+
+#ifndef ABSTRACTREPLACEMENTPOLICY_H
+#define ABSTRACTREPLACEMENTPOLICY_H
+
+#include "mem/ruby/common/Global.hh"
+
+class AbstractReplacementPolicy {
+
+public:
+
+ AbstractReplacementPolicy(Index num_sets, Index assoc);
+ virtual ~AbstractReplacementPolicy();
+
+ /* touch a block. a.k.a. update timestamp */
+ virtual void touch(Index set, Index way, Time time) = 0;
+
+ /* returns the way to replace */
+ virtual Index getVictim(Index set) const = 0;
+
+ /* get the time of the last access */
+ Time getLastAccess(Index set, Index way);
+
+ protected:
+ unsigned int m_num_sets; /** total number of sets */
+ unsigned int m_assoc; /** set associativity */
+ Time **m_last_ref_ptr; /** timestamp of last reference */
+};
+
+inline
+AbstractReplacementPolicy::AbstractReplacementPolicy(Index num_sets, Index assoc)
+{
+ m_num_sets = num_sets;
+ m_assoc = assoc;
+ m_last_ref_ptr = new Time*[m_num_sets];
+ for(unsigned int i = 0; i < m_num_sets; i++){
+ m_last_ref_ptr[i] = new Time[m_assoc];
+ for(unsigned int j = 0; j < m_assoc; j++){
+ m_last_ref_ptr[i][j] = 0;
+ }
+ }
+}
+
+inline
+AbstractReplacementPolicy::~AbstractReplacementPolicy()
+{
+ if(m_last_ref_ptr != NULL){
+ for(unsigned int i = 0; i < m_num_sets; i++){
+ if(m_last_ref_ptr[i] != NULL){
+ delete[] m_last_ref_ptr[i];
+ }
+ }
+ delete[] m_last_ref_ptr;
+ }
+}
+
+inline
+Time AbstractReplacementPolicy::getLastAccess(Index set, Index way)
+{
+ return m_last_ref_ptr[set][way];
+}
+
+#endif // ABSTRACTREPLACEMENTPOLICY_H
diff --git a/src/mem/ruby/system/CacheMemory.hh b/src/mem/ruby/system/CacheMemory.hh
new file mode 100644
index 000000000..a8306c06f
--- /dev/null
+++ b/src/mem/ruby/system/CacheMemory.hh
@@ -0,0 +1,566 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * CacheMemory.h
+ *
+ * Description:
+ *
+ * $Id: CacheMemory.h,v 3.7 2004/06/18 20:15:15 beckmann Exp $
+ *
+ */
+
+#ifndef CACHEMEMORY_H
+#define CACHEMEMORY_H
+
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/protocol/AccessPermission.hh"
+#include "mem/ruby/common/Address.hh"
+
+//dsm: PRUNED
+//#include "mem/ruby/recorder/CacheRecorder.hh"
+#include "mem/protocol/CacheRequestType.hh"
+#include "mem/gems_common/Vector.hh"
+#include "mem/ruby/common/DataBlock.hh"
+#include "mem/protocol/MachineType.hh"
+#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
+#include "mem/ruby/system/PseudoLRUPolicy.hh"
+#include "mem/ruby/system/LRUPolicy.hh"
+#include <vector>
+
+template<class ENTRY>
+class CacheMemory {
+public:
+
+ // Constructors
+ CacheMemory(AbstractChip* chip_ptr, int numSetBits, int cacheAssoc, const MachineType machType, const string& description);
+
+ // Destructor
+ ~CacheMemory();
+
+ // Public Methods
+ void printConfig(ostream& out);
+
+ // perform a cache access and see if we hit or not. Return true on a hit.
+ bool tryCacheAccess(const Address& address, CacheRequestType type, DataBlock*& data_ptr);
+
+ // similar to above, but doesn't require full access check
+ bool testCacheAccess(const Address& address, CacheRequestType type, DataBlock*& data_ptr);
+
+ // tests to see if an address is present in the cache
+ bool isTagPresent(const Address& address) const;
+
+ // Returns true if there is:
+ // a) a tag match on this address or there is
+ // b) an unused line in the same cache "way"
+ bool cacheAvail(const Address& address) const;
+
+ // find an unused entry and sets the tag appropriate for the address
+ void allocate(const Address& address);
+
+ // Explicitly free up this address
+ void deallocate(const Address& address);
+
+ // Returns with the physical address of the conflicting cache line
+ Address cacheProbe(const Address& address) const;
+
+ // looks an address up in the cache
+ ENTRY& lookup(const Address& address);
+ const ENTRY& lookup(const Address& address) const;
+
+ // Get/Set permission of cache block
+ AccessPermission getPermission(const Address& address) const;
+ void changePermission(const Address& address, AccessPermission new_perm);
+
+ // Hook for checkpointing the contents of the cache
+ void recordCacheContents(CacheRecorder& tr) const;
+ void setAsInstructionCache(bool is_icache) { m_is_instruction_cache = is_icache; }
+
+ // Set this address to most recently used
+ void setMRU(const Address& address);
+
+ void getMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes );
+ void setMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes );
+
+ // Print cache contents
+ void print(ostream& out) const;
+ void printData(ostream& out) const;
+
+private:
+ // Private Methods
+
+ // convert a Address to its location in the cache
+ Index addressToCacheSet(const Address& address) const;
+
+ // Given a cache tag: returns the index of the tag in a set.
+ // returns -1 if the tag is not found.
+ int findTagInSet(Index line, const Address& tag) const;
+ int findTagInSetIgnorePermissions(Index cacheSet, const Address& tag) const;
+
+ // Private copy constructor and assignment operator
+ CacheMemory(const CacheMemory& obj);
+ CacheMemory& operator=(const CacheMemory& obj);
+
+ // Data Members (m_prefix)
+ AbstractChip* m_chip_ptr;
+ MachineType m_machType;
+ string m_description;
+ bool m_is_instruction_cache;
+
+ // The first index is the # of cache lines.
+ // The second index is the the amount associativity.
+ Vector<Vector<ENTRY> > m_cache;
+
+ AbstractReplacementPolicy *m_replacementPolicy_ptr;
+
+ int m_cache_num_sets;
+ int m_cache_num_set_bits;
+ int m_cache_assoc;
+
+ bool is_locked; // for LL/SC
+};
+
+// Output operator declaration
+//ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+template<class ENTRY>
+inline
+ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+template<class ENTRY>
+inline
+CacheMemory<ENTRY>::CacheMemory(AbstractChip* chip_ptr, int numSetBits,
+ int cacheAssoc, const MachineType machType, const string& description)
+
+{
+ //cout << "CacheMemory constructor numThreads = " << numThreads << endl;
+ m_chip_ptr = chip_ptr;
+ m_machType = machType;
+ m_description = MachineType_to_string(m_machType)+"_"+description;
+ m_cache_num_set_bits = numSetBits;
+ m_cache_num_sets = 1 << numSetBits;
+ m_cache_assoc = cacheAssoc;
+ m_is_instruction_cache = false;
+
+ m_cache.setSize(m_cache_num_sets);
+ if(strcmp(g_REPLACEMENT_POLICY, "PSEDUO_LRU") == 0)
+ m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else if(strcmp(g_REPLACEMENT_POLICY, "LRU") == 0)
+ m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else
+ assert(false);
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ m_cache[i].setSize(m_cache_assoc);
+ for (int j = 0; j < m_cache_assoc; j++) {
+ m_cache[i][j].m_Address.setAddress(0);
+ m_cache[i][j].m_Permission = AccessPermission_NotPresent;
+ }
+ }
+
+
+ // cout << "Before setting trans address list size" << endl;
+ //create a trans address for each SMT thread
+// m_trans_address_list.setSize(numThreads);
+// for(int i=0; i < numThreads; ++i){
+// cout << "Setting list size for list " << i << endl;
+// m_trans_address_list[i].setSize(30);
+// }
+ //cout << "CacheMemory constructor finished" << endl;
+}
+
+template<class ENTRY>
+inline
+CacheMemory<ENTRY>::~CacheMemory()
+{
+ if(m_replacementPolicy_ptr != NULL)
+ delete m_replacementPolicy_ptr;
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::printConfig(ostream& out)
+{
+ out << "Cache config: " << m_description << endl;
+ out << " cache_associativity: " << m_cache_assoc << endl;
+ out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl;
+ const int cache_num_sets = 1 << m_cache_num_set_bits;
+ out << " num_cache_sets: " << cache_num_sets << endl;
+ out << " cache_set_size_bytes: " << cache_num_sets * RubyConfig::dataBlockBytes() << endl;
+ out << " cache_set_size_Kbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes()) / (1<<10) << endl;
+ out << " cache_set_size_Mbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes()) / (1<<20) << endl;
+ out << " cache_size_bytes: "
+ << cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc << endl;
+ out << " cache_size_Kbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc) / (1<<10) << endl;
+ out << " cache_size_Mbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc) / (1<<20) << endl;
+}
+
+// PRIVATE METHODS
+
+// convert a Address to its location in the cache
+template<class ENTRY>
+inline
+Index CacheMemory<ENTRY>::addressToCacheSet(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index temp = -1;
+ switch (m_machType) {
+ case MACHINETYPE_L1CACHE_ENUM:
+ temp = map_address_to_L1CacheSet(address, m_cache_num_set_bits);
+ break;
+ case MACHINETYPE_L2CACHE_ENUM:
+ temp = map_address_to_L2CacheSet(address, m_cache_num_set_bits);
+ break;
+ default:
+ ERROR_MSG("Don't recognize m_machType");
+ }
+ assert(temp < m_cache_num_sets);
+ assert(temp >= 0);
+ return temp;
+}
+
+// Given a cache index: returns the index of the tag in a set.
+// returns -1 if the tag is not found.
+template<class ENTRY>
+inline
+int CacheMemory<ENTRY>::findTagInSet(Index cacheSet, const Address& tag) const
+{
+ assert(tag == line_address(tag));
+ // search the set for the tags
+ for (int i=0; i < m_cache_assoc; i++) {
+ if ((m_cache[cacheSet][i].m_Address == tag) &&
+ (m_cache[cacheSet][i].m_Permission != AccessPermission_NotPresent)) {
+ return i;
+ }
+ }
+ return -1; // Not found
+}
+
+// Given a cache index: returns the index of the tag in a set.
+// returns -1 if the tag is not found.
+template<class ENTRY>
+inline
+int CacheMemory<ENTRY>::findTagInSetIgnorePermissions(Index cacheSet, const Address& tag) const
+{
+ assert(tag == line_address(tag));
+ // search the set for the tags
+ for (int i=0; i < m_cache_assoc; i++) {
+ if (m_cache[cacheSet][i].m_Address == tag)
+ return i;
+ }
+ return -1; // Not found
+}
+
+// PUBLIC METHODS
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::tryCacheAccess(const Address& address,
+ CacheRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == line_address(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if(loc != -1){ // Do we even have a tag match?
+ ENTRY& entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, g_eventQueue_ptr->getTime());
+ data_ptr = &(entry.getDataBlk());
+
+ if(entry.m_Permission == AccessPermission_Read_Write) {
+ return true;
+ }
+ if ((entry.m_Permission == AccessPermission_Read_Only) &&
+ (type == CacheRequestType_LD || type == CacheRequestType_IFETCH)) {
+ return true;
+ }
+ // The line must not be accessible
+ }
+ data_ptr = NULL;
+ return false;
+}
+
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::testCacheAccess(const Address& address,
+ CacheRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == line_address(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if(loc != -1){ // Do we even have a tag match?
+ ENTRY& entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, g_eventQueue_ptr->getTime());
+ data_ptr = &(entry.getDataBlk());
+
+ return (m_cache[cacheSet][loc].m_Permission != AccessPermission_NotPresent);
+ }
+ data_ptr = NULL;
+ return false;
+}
+
+// tests to see if an address is present in the cache
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::isTagPresent(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int location = findTagInSet(cacheSet, address);
+
+ if (location == -1) {
+ // We didn't find the tag
+ DEBUG_EXPR(CACHE_COMP, LowPrio, address);
+ DEBUG_MSG(CACHE_COMP, LowPrio, "No tag match");
+ return false;
+ }
+ DEBUG_EXPR(CACHE_COMP, LowPrio, address);
+ DEBUG_MSG(CACHE_COMP, LowPrio, "found");
+ return true;
+}
+
+// Returns true if there is:
+// a) a tag match on this address or there is
+// b) an unused line in the same cache "way"
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::cacheAvail(const Address& address) const
+{
+ assert(address == line_address(address));
+
+ Index cacheSet = addressToCacheSet(address);
+
+ for (int i=0; i < m_cache_assoc; i++) {
+ if (m_cache[cacheSet][i].m_Address == address) {
+ // Already in the cache
+ return true;
+ }
+
+ if (m_cache[cacheSet][i].m_Permission == AccessPermission_NotPresent) {
+ // We found an empty entry
+ return true;
+ }
+ }
+ return false;
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::allocate(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(!isTagPresent(address));
+ assert(cacheAvail(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+
+ // Find the first open slot
+ Index cacheSet = addressToCacheSet(address);
+ for (int i=0; i < m_cache_assoc; i++) {
+ if (m_cache[cacheSet][i].m_Permission == AccessPermission_NotPresent) {
+ m_cache[cacheSet][i] = ENTRY(); // Init entry
+ m_cache[cacheSet][i].m_Address = address;
+ m_cache[cacheSet][i].m_Permission = AccessPermission_Invalid;
+
+ m_replacementPolicy_ptr->touch(cacheSet, i, g_eventQueue_ptr->getTime());
+
+ return;
+ }
+ }
+ ERROR_MSG("Allocate didn't find an available entry");
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::deallocate(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(isTagPresent(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ lookup(address).m_Permission = AccessPermission_NotPresent;
+}
+
+// Returns with the physical address of the conflicting cache line
+template<class ENTRY>
+inline
+Address CacheMemory<ENTRY>::cacheProbe(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(!cacheAvail(address));
+
+ Index cacheSet = addressToCacheSet(address);
+ return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)].m_Address;
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+inline
+ENTRY& CacheMemory<ENTRY>::lookup(const Address& address)
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ return m_cache[cacheSet][loc];
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+inline
+const ENTRY& CacheMemory<ENTRY>::lookup(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ return m_cache[cacheSet][loc];
+}
+
+template<class ENTRY>
+inline
+AccessPermission CacheMemory<ENTRY>::getPermission(const Address& address) const
+{
+ assert(address == line_address(address));
+ return lookup(address).m_Permission;
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::changePermission(const Address& address, AccessPermission new_perm)
+{
+ assert(address == line_address(address));
+ lookup(address).m_Permission = new_perm;
+ assert(getPermission(address) == new_perm);
+}
+
+// Sets the most recently used bit for a cache block
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::setMRU(const Address& address)
+{
+ Index cacheSet;
+
+ cacheSet = addressToCacheSet(address);
+ m_replacementPolicy_ptr->touch(cacheSet,
+ findTagInSet(cacheSet, address),
+ g_eventQueue_ptr->getTime());
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::recordCacheContents(CacheRecorder& tr) const
+{
+//dsm: Uses CacheRecorder, PRUNED
+assert(false);
+
+/* for (int i = 0; i < m_cache_num_sets; i++) {
+ for (int j = 0; j < m_cache_assoc; j++) {
+ AccessPermission perm = m_cache[i][j].m_Permission;
+ CacheRequestType request_type = CacheRequestType_NULL;
+ if (perm == AccessPermission_Read_Only) {
+ if (m_is_instruction_cache) {
+ request_type = CacheRequestType_IFETCH;
+ } else {
+ request_type = CacheRequestType_LD;
+ }
+ } else if (perm == AccessPermission_Read_Write) {
+ request_type = CacheRequestType_ST;
+ }
+
+ if (request_type != CacheRequestType_NULL) {
+ tr.addRecord(m_chip_ptr->getID(), m_cache[i][j].m_Address,
+ Address(0), request_type, m_replacementPolicy_ptr->getLastAccess(i, j));
+ }
+ }
+ }*/
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::print(ostream& out) const
+{
+ out << "Cache dump: " << m_description << endl;
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ for (int j = 0; j < m_cache_assoc; j++) {
+ out << " Index: " << i
+ << " way: " << j
+ << " entry: " << m_cache[i][j] << endl;
+ }
+ }
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::printData(ostream& out) const
+{
+ out << "printData() not supported" << endl;
+}
+
+template<class ENTRY>
+void CacheMemory<ENTRY>::getMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ){
+ ENTRY entry = lookup(line_address(addr));
+ unsigned int startByte = addr.getAddress() - line_address(addr).getAddress();
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ value[i] = entry.m_DataBlk.getByte(i + startByte);
+ }
+}
+
+template<class ENTRY>
+void CacheMemory<ENTRY>::setMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ){
+ ENTRY& entry = lookup(line_address(addr));
+ unsigned int startByte = addr.getAddress() - line_address(addr).getAddress();
+ assert(size_in_bytes > 0);
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ entry.m_DataBlk.setByte(i + startByte, value[i]);
+ }
+
+ entry = lookup(line_address(addr));
+}
+
+#endif //CACHEMEMORY_H
+
diff --git a/src/mem/ruby/system/DirectoryMemory.cc b/src/mem/ruby/system/DirectoryMemory.cc
new file mode 100644
index 000000000..a5f1bcddc
--- /dev/null
+++ b/src/mem/ruby/system/DirectoryMemory.cc
@@ -0,0 +1,176 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DirectoryMemory.C
+ *
+ * Description: See DirectoryMemory.h
+ *
+ * $Id$
+ *
+ */
+
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/common/Driver.hh"
+#include "mem/ruby/system/DirectoryMemory.hh"
+#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/protocol/Chip.hh"
+
+DirectoryMemory::DirectoryMemory(Chip* chip_ptr, int version)
+{
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+ // THIS DOESN'T SEEM TO WORK -- MRM
+ // m_size = RubyConfig::memoryModuleBlocks()/RubyConfig::numberOfDirectory();
+ m_size = RubyConfig::memoryModuleBlocks();
+ assert(m_size > 0);
+ /*********************************************************************
+ // allocates an array of directory entry pointers & sets them to NULL
+ m_entries = new Directory_Entry*[m_size];
+ if (m_entries == NULL) {
+ ERROR_MSG("Directory Memory: unable to allocate memory.");
+ }
+
+ for (int i=0; i < m_size; i++) {
+ m_entries[i] = NULL;
+ }
+ */////////////////////////////////////////////////////////////////////
+}
+
+DirectoryMemory::~DirectoryMemory()
+{
+ /*********************************************************************
+ // free up all the directory entries
+ for (int i=0; i < m_size; i++) {
+ if (m_entries[i] != NULL) {
+ delete m_entries[i];
+ m_entries[i] = NULL;
+ }
+ }
+
+ // free up the array of directory entries
+ delete[] m_entries;
+ *//////////////////////////////////////////////////////////////////////
+ m_entries.clear();
+}
+
+// Static method
+void DirectoryMemory::printConfig(ostream& out)
+{
+ out << "Memory config:" << endl;
+ out << " memory_bits: " << RubyConfig::memorySizeBits() << endl;
+ out << " memory_size_bytes: " << RubyConfig::memorySizeBytes() << endl;
+ out << " memory_size_Kbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<10) << endl;
+ out << " memory_size_Mbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<20) << endl;
+ out << " memory_size_Gbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<30) << endl;
+
+ out << " module_bits: " << RubyConfig::memoryModuleBits() << endl;
+ out << " module_size_lines: " << RubyConfig::memoryModuleBlocks() << endl;
+ out << " module_size_bytes: " << RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes() << endl;
+ out << " module_size_Kbytes: " << double(RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes()) / (1<<10) << endl;
+ out << " module_size_Mbytes: " << double(RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes()) / (1<<20) << endl;
+}
+
+// Public method
+bool DirectoryMemory::isPresent(PhysAddress address)
+{
+ return (map_Address_to_DirectoryNode(address) == m_chip_ptr->getID()*RubyConfig::numberOfDirectoryPerChip()+m_version);
+}
+
+void DirectoryMemory::readPhysMem(uint64 address, int size, void * data)
+{
+}
+
+Directory_Entry& DirectoryMemory::lookup(PhysAddress address)
+{
+ assert(isPresent(address));
+ Index index = address.memoryModuleIndex();
+
+ if (index < 0 || index > m_size) {
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(address.getAddress());
+ WARN_EXPR(index);
+ WARN_EXPR(m_size);
+ ERROR_MSG("Directory Memory Assertion: accessing memory out of range.");
+ }
+
+ map<Index, Directory_Entry*>::iterator iter = m_entries.find(index);
+ Directory_Entry* entry = m_entries.find(index)->second;
+
+ // allocate the directory entry on demand.
+ if (iter == m_entries.end()) {
+ entry = new Directory_Entry;
+
+ // entry->getProcOwner() = m_chip_ptr->getID(); // FIXME - This should not be hard coded
+ // entry->getDirOwner() = true; // FIXME - This should not be hard-coded
+
+ // load the data from physicalMemory when first initalizing
+ physical_address_t physAddr = address.getAddress();
+ int8 * dataArray = (int8 * )malloc(RubyConfig::dataBlockBytes() * sizeof(int8));
+ readPhysMem(physAddr, RubyConfig::dataBlockBytes(), dataArray);
+
+ for(int j=0; j < RubyConfig::dataBlockBytes(); j++) {
+ entry->getDataBlk().setByte(j, dataArray[j]);
+ }
+ DEBUG_EXPR(NODE_COMP, MedPrio,entry->getDataBlk());
+ // store entry to the table
+ m_entries.insert(make_pair(index, entry));
+ }
+ return (*entry);
+}
+
+/*
+void DirectoryMemory::invalidateBlock(PhysAddress address)
+{
+ assert(isPresent(address));
+
+ Index index = address.memoryModuleIndex();
+
+ if (index < 0 || index > m_size) {
+ ERROR_MSG("Directory Memory Assertion: accessing memory out of range.");
+ }
+
+ if(m_entries[index] != NULL){
+ delete m_entries[index];
+ m_entries[index] = NULL;
+ }
+
+}
+*/
+
+void DirectoryMemory::print(ostream& out) const
+{
+ out << "Directory dump: " << endl;
+ for(map<Index, Directory_Entry*>::const_iterator it = m_entries.begin(); it != m_entries.end(); ++it) {
+ out << it->first << ": ";
+ out << *(it->second) << endl;
+ }
+}
+
diff --git a/src/mem/ruby/system/DirectoryMemory.hh b/src/mem/ruby/system/DirectoryMemory.hh
new file mode 100644
index 000000000..3307e77a7
--- /dev/null
+++ b/src/mem/ruby/system/DirectoryMemory.hh
@@ -0,0 +1,94 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DirectoryMemory.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef DIRECTORYMEMORY_H
+#define DIRECTORYMEMORY_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/protocol/Directory_Entry.hh"
+#include <map>
+
+class Chip;
+
+class DirectoryMemory {
+public:
+ // Constructors
+ DirectoryMemory(Chip* chip_ptr, int version);
+
+ // Destructor
+ ~DirectoryMemory();
+
+ // Public Methods
+ static void printConfig(ostream& out);
+ bool isPresent(PhysAddress address);
+ // dummy function
+ void readPhysMem(uint64 address, int size, void * data);
+ Directory_Entry& lookup(PhysAddress address);
+
+ void print(ostream& out) const;
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ DirectoryMemory(const DirectoryMemory& obj);
+ DirectoryMemory& operator=(const DirectoryMemory& obj);
+
+ // Data Members (m_ prefix)
+ map<Index, Directory_Entry*> m_entries;
+ Chip* m_chip_ptr;
+ int m_size; // # of memory module blocks for this directory
+ int m_version;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const DirectoryMemory& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const DirectoryMemory& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //DIRECTORYMEMORY_H
diff --git a/src/mem/ruby/system/LRUPolicy.hh b/src/mem/ruby/system/LRUPolicy.hh
new file mode 100644
index 000000000..184eb876d
--- /dev/null
+++ b/src/mem/ruby/system/LRUPolicy.hh
@@ -0,0 +1,65 @@
+
+#ifndef LRUPOLICY_H
+#define LRUPOLICY_H
+
+#include "mem/ruby/system/AbstractReplacementPolicy.hh"
+
+/* Simple true LRU replacement policy */
+
+class LRUPolicy : public AbstractReplacementPolicy {
+ public:
+
+ LRUPolicy(Index num_sets, Index assoc);
+ ~LRUPolicy();
+
+ void touch(Index set, Index way, Time time);
+ Index getVictim(Index set) const;
+};
+
+inline
+LRUPolicy::LRUPolicy(Index num_sets, Index assoc)
+ : AbstractReplacementPolicy(num_sets, assoc)
+{
+}
+
+inline
+LRUPolicy::~LRUPolicy()
+{
+}
+
+inline
+void LRUPolicy::touch(Index set, Index index, Time time){
+ assert(index >= 0 && index < m_assoc);
+ assert(set >= 0 && set < m_num_sets);
+
+ m_last_ref_ptr[set][index] = time;
+}
+
+inline
+Index LRUPolicy::getVictim(Index set) const {
+ // assert(m_assoc != 0);
+ Time time, smallest_time;
+ Index smallest_index;
+
+ smallest_index = 0;
+ smallest_time = m_last_ref_ptr[set][0];
+
+ for (unsigned int i=0; i < m_assoc; i++) {
+ time = m_last_ref_ptr[set][i];
+ //assert(m_cache[cacheSet][i].m_Permission != AccessPermission_NotPresent);
+
+ if (time < smallest_time){
+ smallest_index = i;
+ smallest_time = time;
+ }
+ }
+
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, cacheSet);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, smallest_index);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, m_cache[cacheSet][smallest_index]);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, *this);
+
+ return smallest_index;
+}
+
+#endif // PSEUDOLRUBITS_H
diff --git a/src/mem/ruby/system/MachineID.hh b/src/mem/ruby/system/MachineID.hh
new file mode 100644
index 000000000..4515fe7fa
--- /dev/null
+++ b/src/mem/ruby/system/MachineID.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NodeID.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef MACHINEID_H
+#define MACHINEID_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/gems_common/util.hh"
+#include "mem/protocol/MachineType.hh"
+
+struct MachineID {
+ MachineType type;
+ int num; // range: 0 ... number of this machine's components in the system - 1
+};
+
+extern inline
+string MachineIDToString (MachineID machine) {
+ return MachineType_to_string(machine.type)+"_"+int_to_string(machine.num);
+}
+
+extern inline
+bool operator==(const MachineID & obj1, const MachineID & obj2)
+{
+ return (obj1.type == obj2.type && obj1.num == obj2.num);
+}
+
+extern inline
+bool operator!=(const MachineID & obj1, const MachineID & obj2)
+{
+ return (obj1.type != obj2.type || obj1.num != obj2.num);
+}
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const MachineID& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MachineID& obj)
+{
+ if ((obj.type < MachineType_NUM) && (obj.type >= MachineType_FIRST)) {
+ out << MachineType_to_string(obj.type);
+ } else {
+ out << "NULL";
+ }
+ out << "-";
+ out << obj.num;
+ out << flush;
+ return out;
+}
+
+
+#endif //MACHINEID_H
diff --git a/src/mem/ruby/system/MemoryControl.cc b/src/mem/ruby/system/MemoryControl.cc
new file mode 100644
index 000000000..2f93d98d5
--- /dev/null
+++ b/src/mem/ruby/system/MemoryControl.cc
@@ -0,0 +1,631 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MemoryControl.C
+ *
+ * Description: This module simulates a basic DDR-style memory controller
+ * (and can easily be extended to do FB-DIMM as well).
+ *
+ * This module models a single channel, connected to any number of
+ * DIMMs with any number of ranks of DRAMs each. If you want multiple
+ * address/data channels, you need to instantiate multiple copies of
+ * this module.
+ *
+ * Each memory request is placed in a queue associated with a specific
+ * memory bank. This queue is of finite size; if the queue is full
+ * the request will back up in an (infinite) common queue and will
+ * effectively throttle the whole system. This sort of behavior is
+ * intended to be closer to real system behavior than if we had an
+ * infinite queue on each bank. If you want the latter, just make
+ * the bank queues unreasonably large.
+ *
+ * The head item on a bank queue is issued when all of the
+ * following are true:
+ * the bank is available
+ * the address path to the DIMM is available
+ * the data path to or from the DIMM is available
+ *
+ * Note that we are not concerned about fixed offsets in time. The bank
+ * will not be used at the same moment as the address path, but since
+ * there is no queue in the DIMM or the DRAM it will be used at a constant
+ * number of cycles later, so it is treated as if it is used at the same
+ * time.
+ *
+ * We are assuming closed bank policy; that is, we automatically close
+ * each bank after a single read or write. Adding an option for open
+ * bank policy is for future work.
+ *
+ * We are assuming "posted CAS"; that is, we send the READ or WRITE
+ * immediately after the ACTIVATE. This makes scheduling the address
+ * bus trivial; we always schedule a fixed set of cycles. For DDR-400,
+ * this is a set of two cycles; for some configurations such as
+ * DDR-800 the parameter tRRD forces this to be set to three cycles.
+ *
+ * We assume a four-bit-time transfer on the data wires. This is
+ * the minimum burst length for DDR-2. This would correspond
+ * to (for example) a memory where each DIMM is 72 bits wide
+ * and DIMMs are ganged in pairs to deliver 64 bytes at a shot.
+ * This gives us the same occupancy on the data wires as on the
+ * address wires (for the two-address-cycle case).
+ *
+ * The only non-trivial scheduling problem is the data wires.
+ * A write will use the wires earlier in the operation than a read
+ * will; typically one cycle earlier as seen at the DRAM, but earlier
+ * by a worst-case round-trip wire delay when seen at the memory controller.
+ * So, while reads from one rank can be scheduled back-to-back
+ * every two cycles, and writes (to any rank) scheduled every two cycles,
+ * when a read is followed by a write we need to insert a bubble.
+ * Furthermore, consecutive reads from two different ranks may need
+ * to insert a bubble due to skew between when one DRAM stops driving the
+ * wires and when the other one starts. (These bubbles are parameters.)
+ *
+ * This means that when some number of reads and writes are at the
+ * heads of their queues, reads could starve writes, and/or reads
+ * to the same rank could starve out other requests, since the others
+ * would never see the data bus ready.
+ * For this reason, we have implemented an anti-starvation feature.
+ * A group of requests is marked "old", and a counter is incremented
+ * each cycle as long as any request from that batch has not issued.
+ * if the counter reaches twice the bank busy time, we hold off any
+ * newer requests until all of the "old" requests have issued.
+ *
+ * We also model tFAW. This is an obscure DRAM parameter that says
+ * that no more than four activate requests can happen within a window
+ * of a certain size. For most configurations this does not come into play,
+ * or has very little effect, but it could be used to throttle the power
+ * consumption of the DRAM. In this implementation (unlike in a DRAM
+ * data sheet) TFAW is measured in memory bus cycles; i.e. if TFAW = 16
+ * then no more than four activates may happen within any 16 cycle window.
+ * Refreshes are included in the activates.
+ *
+ *
+ * $Id: $
+ *
+ */
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
+#include "mem/ruby/slicc_interface/NetworkMessage.hh"
+#include "mem/ruby/network/Network.hh"
+
+#include "mem/ruby/common/Consumer.hh"
+
+#include "mem/ruby/system/MemoryControl.hh"
+
+#include <list>
+
+class Consumer;
+
+// Value to reset watchdog timer to.
+// If we're idle for this many memory control cycles,
+// shut down our clock (our rescheduling of ourselves).
+// Refresh shuts down as well.
+// When we restart, we'll be in a different phase
+// with respect to ruby cycles, so this introduces
+// a slight inaccuracy. But it is necessary or the
+// ruby tester never terminates because the event
+// queue is never empty.
+#define IDLECOUNT_MAX_VALUE 1000
+
+// Output operator definition
+
+ostream& operator<<(ostream& out, const MemoryControl& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+// CONSTRUCTOR
+
+MemoryControl::MemoryControl (AbstractChip* chip_ptr, int version) {
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+ m_msg_counter = 0;
+
+ m_debug = 0;
+ //if (m_version == 0) m_debug = 1;
+
+ m_mem_bus_cycle_multiplier = RubyConfig::memBusCycleMultiplier();
+ m_banks_per_rank = RubyConfig::banksPerRank();
+ m_ranks_per_dimm = RubyConfig::ranksPerDimm();
+ m_dimms_per_channel = RubyConfig::dimmsPerChannel();
+ m_bank_bit_0 = RubyConfig::bankBit0();
+ m_rank_bit_0 = RubyConfig::rankBit0();
+ m_dimm_bit_0 = RubyConfig::dimmBit0();
+ m_bank_queue_size = RubyConfig::bankQueueSize();
+ m_bank_busy_time = RubyConfig::bankBusyTime();
+ m_rank_rank_delay = RubyConfig::rankRankDelay();
+ m_read_write_delay = RubyConfig::readWriteDelay();
+ m_basic_bus_busy_time = RubyConfig::basicBusBusyTime();
+ m_mem_ctl_latency = RubyConfig::memCtlLatency();
+ m_refresh_period = RubyConfig::refreshPeriod();
+ m_memRandomArbitrate = RubyConfig::memRandomArbitrate();
+ m_tFaw = RubyConfig::tFaw();
+ m_memFixedDelay = RubyConfig::memFixedDelay();
+
+ assert(m_tFaw <= 62); // must fit in a uint64 shift register
+
+ m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
+ m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
+ m_refresh_period_system = m_refresh_period / m_total_banks;
+
+ m_bankQueues = new list<MemoryNode> [m_total_banks];
+ assert(m_bankQueues);
+
+ m_bankBusyCounter = new int [m_total_banks];
+ assert(m_bankBusyCounter);
+
+ m_oldRequest = new int [m_total_banks];
+ assert(m_oldRequest);
+
+ for (int i=0; i<m_total_banks; i++) {
+ m_bankBusyCounter[i] = 0;
+ m_oldRequest[i] = 0;
+ }
+
+ m_busBusyCounter_Basic = 0;
+ m_busBusyCounter_Write = 0;
+ m_busBusyCounter_ReadNewRank = 0;
+ m_busBusy_WhichRank = 0;
+
+ m_roundRobin = 0;
+ m_refresh_count = 1;
+ m_need_refresh = 0;
+ m_refresh_bank = 0;
+ m_awakened = 0;
+ m_idleCount = 0;
+ m_ageCounter = 0;
+
+ // Each tfaw shift register keeps a moving bit pattern
+ // which shows when recent activates have occurred.
+ // m_tfaw_count keeps track of how many 1 bits are set
+ // in each shift register. When m_tfaw_count is >= 4,
+ // new activates are not allowed.
+ m_tfaw_shift = new uint64 [m_total_ranks];
+ m_tfaw_count = new int [m_total_ranks];
+ for (int i=0; i<m_total_ranks; i++) {
+ m_tfaw_shift[i] = 0;
+ m_tfaw_count[i] = 0;
+ }
+}
+
+
+// DESTRUCTOR
+
+MemoryControl::~MemoryControl () {
+ delete [] m_bankQueues;
+ delete [] m_bankBusyCounter;
+ delete [] m_oldRequest;
+}
+
+
+// PUBLIC METHODS
+
+// enqueue new request from directory
+
+void MemoryControl::enqueue (const MsgPtr& message, int latency) {
+ Time current_time = g_eventQueue_ptr->getTime();
+ Time arrival_time = current_time + latency;
+ const MemoryMsg* memMess = dynamic_cast<const MemoryMsg*>(message.ref());
+ physical_address_t addr = memMess->getAddress().getAddress();
+ MemoryRequestType type = memMess->getType();
+ bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
+ MemoryNode thisReq(arrival_time, message, addr, is_mem_read, !is_mem_read);
+ enqueueMemRef(thisReq);
+}
+
+// Alternate entry point used when we already have a MemoryNode structure built.
+
+void MemoryControl::enqueueMemRef (MemoryNode& memRef) {
+ m_msg_counter++;
+ memRef.m_msg_counter = m_msg_counter;
+ Time arrival_time = memRef.m_time;
+ uint64 at = arrival_time;
+ bool is_mem_read = memRef.m_is_mem_read;
+ physical_address_t addr = memRef.m_addr;
+ int bank = getBank(addr);
+ if (m_debug) {
+ printf("New memory request%7d: 0x%08llx %c arrived at %10lld ", m_msg_counter, addr, is_mem_read? 'R':'W', at);
+ printf("bank =%3x\n", bank);
+ }
+ g_system_ptr->getProfiler()->profileMemReq(bank);
+ m_input_queue.push_back(memRef);
+ if (!m_awakened) {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ m_awakened = 1;
+ }
+}
+
+
+
+// dequeue, peek, and isReady are used to transfer completed requests
+// back to the directory
+
+void MemoryControl::dequeue () {
+ assert(isReady());
+ m_response_queue.pop_front();
+}
+
+
+const Message* MemoryControl::peek () {
+ MemoryNode node = peekNode();
+ Message* msg_ptr = node.m_msgptr.ref();
+ assert(msg_ptr != NULL);
+ return msg_ptr;
+}
+
+
+MemoryNode MemoryControl::peekNode () {
+ assert(isReady());
+ MemoryNode req = m_response_queue.front();
+ uint64 returnTime = req.m_time;
+ if (m_debug) {
+ printf("Old memory request%7d: 0x%08llx %c peeked at %10lld\n",
+ req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', returnTime);
+ }
+ return req;
+}
+
+
+bool MemoryControl::isReady () {
+ return ((!m_response_queue.empty()) &&
+ (m_response_queue.front().m_time <= g_eventQueue_ptr->getTime()));
+}
+
+void MemoryControl::setConsumer (Consumer* consumer_ptr) {
+ m_consumer_ptr = consumer_ptr;
+}
+
+void MemoryControl::print (ostream& out) const {
+}
+
+
+void MemoryControl::printConfig (ostream& out) {
+ out << "Memory Control " << m_version << ":" << endl;
+ out << " Ruby cycles per memory cycle: " << m_mem_bus_cycle_multiplier << endl;
+ out << " Basic read latency: " << m_mem_ctl_latency << endl;
+ if (m_memFixedDelay) {
+ out << " Fixed Latency mode: Added cycles = " << m_memFixedDelay << endl;
+ } else {
+ out << " Bank busy time: " << BANK_BUSY_TIME << " memory cycles" << endl;
+ out << " Memory channel busy time: " << m_basic_bus_busy_time << endl;
+ out << " Dead cycles between reads to different ranks: " << m_rank_rank_delay << endl;
+ out << " Dead cycle between a read and a write: " << m_read_write_delay << endl;
+ out << " tFaw (four-activate) window: " << m_tFaw << endl;
+ }
+ out << " Banks per rank: " << m_banks_per_rank << endl;
+ out << " Ranks per DIMM: " << m_ranks_per_dimm << endl;
+ out << " DIMMs per channel: " << m_dimms_per_channel << endl;
+ out << " LSB of bank field in address: " << m_bank_bit_0 << endl;
+ out << " LSB of rank field in address: " << m_rank_bit_0 << endl;
+ out << " LSB of DIMM field in address: " << m_dimm_bit_0 << endl;
+ out << " Max size of each bank queue: " << m_bank_queue_size << endl;
+ out << " Refresh period (within one bank): " << m_refresh_period << endl;
+ out << " Arbitration randomness: " << m_memRandomArbitrate << endl;
+}
+
+
+void MemoryControl::setDebug (int debugFlag) {
+ m_debug = debugFlag;
+}
+
+
+// ****************************************************************
+
+// PRIVATE METHODS
+
+// Queue up a completed request to send back to directory
+
+void MemoryControl::enqueueToDirectory (MemoryNode req, int latency) {
+ Time arrival_time = g_eventQueue_ptr->getTime()
+ + (latency * m_mem_bus_cycle_multiplier);
+ req.m_time = arrival_time;
+ m_response_queue.push_back(req);
+
+ // schedule the wake up
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
+}
+
+
+
+// getBank returns an integer that is unique for each
+// bank across this memory controller.
+
+int MemoryControl::getBank (physical_address_t addr) {
+ int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1);
+ int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1);
+ int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1);
+ return (dimm * m_ranks_per_dimm * m_banks_per_rank)
+ + (rank * m_banks_per_rank)
+ + bank;
+}
+
+// getRank returns an integer that is unique for each rank
+// and independent of individual bank.
+
+int MemoryControl::getRank (int bank) {
+ int rank = (bank / m_banks_per_rank);
+ assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
+ return rank;
+}
+
+
+// queueReady determines if the head item in a bank queue
+// can be issued this cycle
+
+bool MemoryControl::queueReady (int bank) {
+ if ((m_bankBusyCounter[bank] > 0) && !m_memFixedDelay) {
+ g_system_ptr->getProfiler()->profileMemBankBusy();
+ //if (m_debug) printf(" bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
+ return false;
+ }
+ if (m_memRandomArbitrate >= 2) {
+ if ((random() % 100) < m_memRandomArbitrate) {
+ g_system_ptr->getProfiler()->profileMemRandBusy();
+ return false;
+ }
+ }
+ if (m_memFixedDelay) return true;
+ if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) {
+ g_system_ptr->getProfiler()->profileMemNotOld();
+ return false;
+ }
+ if (m_busBusyCounter_Basic == m_basic_bus_busy_time) {
+ // Another bank must have issued this same cycle.
+ // For profiling, we count this as an arb wait rather than
+ // a bus wait. This is a little inaccurate since it MIGHT
+ // have also been blocked waiting for a read-write or a
+ // read-read instead, but it's pretty close.
+ g_system_ptr->getProfiler()->profileMemArbWait(1);
+ return false;
+ }
+ if (m_busBusyCounter_Basic > 0) {
+ g_system_ptr->getProfiler()->profileMemBusBusy();
+ return false;
+ }
+ int rank = getRank(bank);
+ if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) {
+ g_system_ptr->getProfiler()->profileMemTfawBusy();
+ return false;
+ }
+ bool write = !m_bankQueues[bank].front().m_is_mem_read;
+ if (write && (m_busBusyCounter_Write > 0)) {
+ g_system_ptr->getProfiler()->profileMemReadWriteBusy();
+ return false;
+ }
+ if (!write && (rank != m_busBusy_WhichRank)
+ && (m_busBusyCounter_ReadNewRank > 0)) {
+ g_system_ptr->getProfiler()->profileMemDataBusBusy();
+ return false;
+ }
+ return true;
+}
+
+
+// issueRefresh checks to see if this bank has a refresh scheduled
+// and, if so, does the refresh and returns true
+
+bool MemoryControl::issueRefresh (int bank) {
+ if (!m_need_refresh || (m_refresh_bank != bank)) return false;
+ if (m_bankBusyCounter[bank] > 0) return false;
+ // Note that m_busBusyCounter will prevent multiple issues during
+ // the same cycle, as well as on different but close cycles:
+ if (m_busBusyCounter_Basic > 0) return false;
+ int rank = getRank(bank);
+ if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) return false;
+
+ // Issue it:
+
+ //if (m_debug) {
+ //uint64 current_time = g_eventQueue_ptr->getTime();
+ //printf(" Refresh bank %3x at %lld\n", bank, current_time);
+ //}
+ g_system_ptr->getProfiler()->profileMemRefresh();
+ m_need_refresh--;
+ m_refresh_bank++;
+ if (m_refresh_bank >= m_total_banks) m_refresh_bank = 0;
+ m_bankBusyCounter[bank] = m_bank_busy_time;
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
+ markTfaw(rank);
+ return true;
+}
+
+
+// Mark the activate in the tFaw shift register
+void MemoryControl::markTfaw (int rank) {
+ if (m_tFaw) {
+ m_tfaw_shift[rank] |= (1 << (m_tFaw-1));
+ m_tfaw_count[rank]++;
+ }
+}
+
+
+// Issue a memory request: Activate the bank,
+// reserve the address and data buses, and queue
+// the request for return to the requesting
+// processor after a fixed latency.
+
+void MemoryControl::issueRequest (int bank) {
+ int rank = getRank(bank);
+ MemoryNode req = m_bankQueues[bank].front();
+ m_bankQueues[bank].pop_front();
+ if (m_debug) {
+ uint64 current_time = g_eventQueue_ptr->getTime();
+ printf(" Mem issue request%7d: 0x%08llx %c at %10lld bank =%3x\n",
+ req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', current_time, bank);
+ }
+ if (req.m_msgptr.ref() != NULL) { // don't enqueue L3 writebacks
+ enqueueToDirectory(req, m_mem_ctl_latency + m_memFixedDelay);
+ }
+ m_oldRequest[bank] = 0;
+ markTfaw(rank);
+ m_bankBusyCounter[bank] = m_bank_busy_time;
+ m_busBusy_WhichRank = rank;
+ if (req.m_is_mem_read) {
+ g_system_ptr->getProfiler()->profileMemRead();
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time + m_rank_rank_delay;
+ } else {
+ g_system_ptr->getProfiler()->profileMemWrite();
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
+ }
+}
+
+
+// executeCycle: This function is called once per memory clock cycle
+// to simulate all the periodic hardware.
+
+void MemoryControl::executeCycle () {
+ // Keep track of time by counting down the busy counters:
+ for (int bank=0; bank < m_total_banks; bank++) {
+ if (m_bankBusyCounter[bank] > 0) m_bankBusyCounter[bank]--;
+ }
+ if (m_busBusyCounter_Write > 0) m_busBusyCounter_Write--;
+ if (m_busBusyCounter_ReadNewRank > 0) m_busBusyCounter_ReadNewRank--;
+ if (m_busBusyCounter_Basic > 0) m_busBusyCounter_Basic--;
+
+ // Count down the tFAW shift registers:
+ for (int rank=0; rank < m_total_ranks; rank++) {
+ if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--;
+ m_tfaw_shift[rank] >>= 1;
+ }
+
+ // After time period expires, latch an indication that we need a refresh.
+ // Disable refresh if in memFixedDelay mode.
+ if (!m_memFixedDelay) m_refresh_count--;
+ if (m_refresh_count == 0) {
+ m_refresh_count = m_refresh_period_system;
+ assert (m_need_refresh < 10); // Are we overrunning our ability to refresh?
+ m_need_refresh++;
+ }
+
+ // If this batch of requests is all done, make a new batch:
+ m_ageCounter++;
+ int anyOld = 0;
+ for (int bank=0; bank < m_total_banks; bank++) {
+ anyOld |= m_oldRequest[bank];
+ }
+ if (!anyOld) {
+ for (int bank=0; bank < m_total_banks; bank++) {
+ if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1;
+ }
+ m_ageCounter = 0;
+ }
+
+ // If randomness desired, re-randomize round-robin position each cycle
+ if (m_memRandomArbitrate) {
+ m_roundRobin = random() % m_total_banks;
+ }
+
+
+ // For each channel, scan round-robin, and pick an old, ready
+ // request and issue it. Treat a refresh request as if it
+ // were at the head of its bank queue. After we issue something,
+ // keep scanning the queues just to gather statistics about
+ // how many are waiting. If in memFixedDelay mode, we can issue
+ // more than one request per cycle.
+
+ int queueHeads = 0;
+ int banksIssued = 0;
+ for (int i = 0; i < m_total_banks; i++) {
+ m_roundRobin++;
+ if (m_roundRobin >= m_total_banks) m_roundRobin = 0;
+ issueRefresh(m_roundRobin);
+ int qs = m_bankQueues[m_roundRobin].size();
+ if (qs > 1) {
+ g_system_ptr->getProfiler()->profileMemBankQ(qs-1);
+ }
+ if (qs > 0) {
+ m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is queued
+ queueHeads++;
+ if (queueReady(m_roundRobin)) {
+ issueRequest(m_roundRobin);
+ banksIssued++;
+ if (m_memFixedDelay) {
+ g_system_ptr->getProfiler()->profileMemWaitCycles(m_memFixedDelay);
+ }
+ }
+ }
+ }
+
+ // memWaitCycles is a redundant catch-all for the specific counters in queueReady
+ g_system_ptr->getProfiler()->profileMemWaitCycles(queueHeads - banksIssued);
+
+ // Check input queue and move anything to bank queues if not full.
+ // Since this is done here at the end of the cycle, there will always
+ // be at least one cycle of latency in the bank queue.
+ // We deliberately move at most one request per cycle (to simulate
+ // typical hardware). Note that if one bank queue fills up, other
+ // requests can get stuck behind it here.
+
+ if (!m_input_queue.empty()) {
+ m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is pending
+ MemoryNode req = m_input_queue.front();
+ int bank = getBank(req.m_addr);
+ if (m_bankQueues[bank].size() < m_bank_queue_size) {
+ m_input_queue.pop_front();
+ m_bankQueues[bank].push_back(req);
+ }
+ g_system_ptr->getProfiler()->profileMemInputQ(m_input_queue.size());
+ }
+}
+
+
+// wakeup: This function is called once per memory controller clock cycle.
+
+void MemoryControl::wakeup () {
+
+ // execute everything
+ executeCycle();
+
+ m_idleCount--;
+ if (m_idleCount <= 0) {
+ m_awakened = 0;
+ } else {
+ // Reschedule ourselves so that we run every memory cycle:
+ g_eventQueue_ptr->scheduleEvent(this, m_mem_bus_cycle_multiplier);
+ }
+}
+
+
diff --git a/src/mem/ruby/system/MemoryControl.hh b/src/mem/ruby/system/MemoryControl.hh
new file mode 100644
index 000000000..a98181b0b
--- /dev/null
+++ b/src/mem/ruby/system/MemoryControl.hh
@@ -0,0 +1,176 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MemoryControl.h
+ *
+ * Description: See MemoryControl.C
+ *
+ * $Id: $
+ *
+ */
+
+#ifndef MEMORY_CONTROL_H
+#define MEMORY_CONTROL_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/slicc_interface/Message.hh"
+#include "mem/gems_common/util.hh"
+#include "mem/ruby/system/MemoryNode.hh"
+// Note that "MemoryMsg" is in the "generated" directory:
+#include "mem/protocol/MemoryMsg.hh"
+#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/system/AbstractMemOrCache.hh"
+
+#include <list>
+
+// This constant is part of the definition of tFAW; see
+// the comments in header to MemoryControl.C
+#define ACTIVATE_PER_TFAW 4
+
+//////////////////////////////////////////////////////////////////////////////
+
+class Consumer;
+
+class MemoryControl : public Consumer, public AbstractMemOrCache {
+public:
+
+ // Constructors
+ MemoryControl (AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~MemoryControl ();
+
+ // Public Methods
+
+ void wakeup() ;
+
+ void setConsumer (Consumer* consumer_ptr);
+ Consumer* getConsumer () { return m_consumer_ptr; };
+ void setDescription (const string& name) { m_name = name; };
+ string getDescription () { return m_name; };
+
+ // Called from the directory:
+ void enqueue (const MsgPtr& message, int latency );
+ void enqueueMemRef (MemoryNode& memRef);
+ void dequeue ();
+ const Message* peek ();
+ MemoryNode peekNode ();
+ bool isReady();
+ bool areNSlotsAvailable (int n) { return true; }; // infinite queue length
+
+ //// Called from L3 cache:
+ //void writeBack(physical_address_t addr);
+
+ void printConfig (ostream& out);
+ void print (ostream& out) const;
+ void setDebug (int debugFlag);
+
+private:
+
+ void enqueueToDirectory (MemoryNode req, int latency);
+ int getBank (physical_address_t addr);
+ int getRank (int bank);
+ bool queueReady (int bank);
+ void issueRequest (int bank);
+ bool issueRefresh (int bank);
+ void markTfaw (int rank);
+ void executeCycle ();
+
+ // Private copy constructor and assignment operator
+ MemoryControl (const MemoryControl& obj);
+ MemoryControl& operator=(const MemoryControl& obj);
+
+ // data members
+ AbstractChip* m_chip_ptr;
+ Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
+ string m_name;
+ int m_version;
+ int m_msg_counter;
+ int m_awakened;
+
+ int m_mem_bus_cycle_multiplier;
+ int m_banks_per_rank;
+ int m_ranks_per_dimm;
+ int m_dimms_per_channel;
+ int m_bank_bit_0;
+ int m_rank_bit_0;
+ int m_dimm_bit_0;
+ unsigned int m_bank_queue_size;
+ int m_bank_busy_time;
+ int m_rank_rank_delay;
+ int m_read_write_delay;
+ int m_basic_bus_busy_time;
+ int m_mem_ctl_latency;
+ int m_refresh_period;
+ int m_memRandomArbitrate;
+ int m_tFaw;
+ int m_memFixedDelay;
+
+ int m_total_banks;
+ int m_total_ranks;
+ int m_refresh_period_system;
+
+ // queues where memory requests live
+
+ list<MemoryNode> m_response_queue;
+ list<MemoryNode> m_input_queue;
+ list<MemoryNode>* m_bankQueues;
+
+ // Each entry indicates number of address-bus cycles until bank
+ // is reschedulable:
+ int* m_bankBusyCounter;
+ int* m_oldRequest;
+
+ uint64* m_tfaw_shift;
+ int* m_tfaw_count;
+
+ // Each of these indicates number of address-bus cycles until
+ // we can issue a new request of the corresponding type:
+ int m_busBusyCounter_Write;
+ int m_busBusyCounter_ReadNewRank;
+ int m_busBusyCounter_Basic;
+
+ int m_busBusy_WhichRank; // which rank last granted
+ int m_roundRobin; // which bank queue was last granted
+ int m_refresh_count; // cycles until next refresh
+ int m_need_refresh; // set whenever m_refresh_count goes to zero
+ int m_refresh_bank; // which bank to refresh next
+ int m_ageCounter; // age of old requests; to detect starvation
+ int m_idleCount; // watchdog timer for shutting down
+ int m_debug; // turn on printf's
+};
+
+#endif // MEMORY_CONTROL_H
+
diff --git a/src/mem/ruby/system/MemoryNode.cc b/src/mem/ruby/system/MemoryNode.cc
new file mode 100644
index 000000000..be93fae85
--- /dev/null
+++ b/src/mem/ruby/system/MemoryNode.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * MemoryNode.C
+ *
+ * Description: See MemoryNode.h
+ *
+ * $Id: MemoryNode.C 1.3 04/08/04 14:15:38-05:00 beckmann@c2-141.cs.wisc.edu $
+ *
+ */
+
+#include "mem/ruby/system/MemoryNode.hh"
+
+void MemoryNode::print(ostream& out) const
+{
+ out << "[";
+ out << m_time << ", ";
+ out << m_msg_counter << ", ";
+ out << m_msgptr << "; ";
+ out << "]";
+}
diff --git a/src/mem/ruby/system/MemoryNode.hh b/src/mem/ruby/system/MemoryNode.hh
new file mode 100644
index 000000000..95d4227f9
--- /dev/null
+++ b/src/mem/ruby/system/MemoryNode.hh
@@ -0,0 +1,90 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * Description:
+ * This structure records everything known about a single
+ * memory request that is queued in the memory controller.
+ * It is created when the memory request first arrives
+ * at a memory controller and is deleted when the underlying
+ * message is enqueued to be sent back to the directory.
+ */
+
+#ifndef MEMORYNODE_H
+#define MEMORYNODE_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/slicc_interface/Message.hh"
+#include "mem/protocol/MemoryRequestType.hh"
+
+class MemoryNode {
+
+public:
+ // Constructors
+
+// old one:
+ MemoryNode(const Time& time, int counter, const MsgPtr& msgptr, const physical_address_t addr, const bool is_mem_read) {
+ m_time = time;
+ m_msg_counter = counter;
+ m_msgptr = msgptr;
+ m_addr = addr;
+ m_is_mem_read = is_mem_read;
+ m_is_dirty_wb = !is_mem_read;
+ }
+
+// new one:
+ MemoryNode(const Time& time, const MsgPtr& msgptr, const physical_address_t addr, const bool is_mem_read, const bool is_dirty_wb) {
+ m_time = time;
+ m_msg_counter = 0;
+ m_msgptr = msgptr;
+ m_addr = addr;
+ m_is_mem_read = is_mem_read;
+ m_is_dirty_wb = is_dirty_wb;
+ }
+
+ // Destructor
+ ~MemoryNode() {};
+
+ // Public Methods
+ void print(ostream& out) const;
+
+ // Data Members (m_ prefix) (all public -- this is really more a struct)
+
+ Time m_time;
+ int m_msg_counter;
+ MsgPtr m_msgptr;
+ physical_address_t m_addr;
+ bool m_is_mem_read;
+ bool m_is_dirty_wb;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const MemoryNode& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MemoryNode& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //MEMORYNODE_H
diff --git a/src/mem/ruby/system/NodeID.hh b/src/mem/ruby/system/NodeID.hh
new file mode 100644
index 000000000..04c339acc
--- /dev/null
+++ b/src/mem/ruby/system/NodeID.hh
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NodeID.h
+ *
+ * Description:
+ *
+ * $Id: NodeID.h,v 3.3 2003/12/04 15:01:39 xu Exp $
+ *
+ */
+
+#ifndef NODEID_H
+#define NODEID_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/gems_common/util.hh"
+
+typedef int NodeID;
+
+extern inline
+string NodeIDToString (NodeID node) { return int_to_string(node); }
+
+#endif //NODEID_H
diff --git a/src/mem/ruby/system/NodePersistentTable.cc b/src/mem/ruby/system/NodePersistentTable.cc
new file mode 100644
index 000000000..4dd5c670f
--- /dev/null
+++ b/src/mem/ruby/system/NodePersistentTable.cc
@@ -0,0 +1,193 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: NodePersistentTable.C 1.3 04/08/16 14:12:33-05:00 beckmann@c2-143.cs.wisc.edu $
+ *
+ */
+
+#include "mem/ruby/system/NodePersistentTable.hh"
+#include "mem/ruby/common/Set.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/gems_common/util.hh"
+
+// randomize so that handoffs are not locality-aware
+// int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
+int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+
+class NodePersistentTableEntry {
+public:
+ Set m_starving;
+ Set m_marked;
+ Set m_request_to_write;
+};
+
+NodePersistentTable::NodePersistentTable(AbstractChip* chip_ptr, int version)
+{
+ m_chip_ptr = chip_ptr;
+ m_map_ptr = new Map<Address, NodePersistentTableEntry>;
+ m_version = version;
+}
+
+NodePersistentTable::~NodePersistentTable()
+{
+ delete m_map_ptr;
+ m_map_ptr = NULL;
+ m_chip_ptr = NULL;
+}
+
+void NodePersistentTable::persistentRequestLock(const Address& address, NodeID llocker, AccessType type)
+{
+
+ // if (locker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << llocker << " requesting lock for " << address << endl;
+
+ NodeID locker = (NodeID) persistent_randomize[llocker];
+
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ // Allocate if not present
+ NodePersistentTableEntry entry;
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ m_map_ptr->add(address, entry);
+ } else {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(!(entry.m_starving.isElement(locker))); // Make sure we're not already in the locked set
+
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ }
+}
+
+void NodePersistentTable::persistentRequestUnlock(const Address& address, NodeID uunlocker)
+{
+ // if (unlocker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker << " requesting unlock for " << address << endl;
+
+ NodeID unlocker = (NodeID) persistent_randomize[uunlocker];
+
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_starving.isElement(unlocker)); // Make sure we're in the locked set
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ entry.m_starving.remove(unlocker);
+ entry.m_marked.remove(unlocker);
+ entry.m_request_to_write.remove(unlocker);
+ assert(entry.m_marked.isSubset(entry.m_starving));
+
+ // Deallocate if empty
+ if (entry.m_starving.isEmpty()) {
+ assert(entry.m_marked.isEmpty());
+ m_map_ptr->erase(address);
+ }
+}
+
+bool NodePersistentTable::okToIssueStarving(const Address& address) const
+{
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ return true; // No entry present
+ } else if (m_map_ptr->lookup(address).m_starving.isElement(m_chip_ptr->getID())) {
+ return false; // We can't issue another lockdown until are previous unlock has occurred
+ } else {
+ return (m_map_ptr->lookup(address).m_marked.isEmpty());
+ }
+}
+
+NodeID NodePersistentTable::findSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ // cout << "Node " << m_chip_ptr->getID() << " returning " << persistent_randomize[entry.m_starving.smallestElement()] << " for findSmallest(" << address << ")" << endl;
+ return (NodeID) persistent_randomize[entry.m_starving.smallestElement()];
+}
+
+AccessType NodePersistentTable::typeOfSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ if (entry.m_request_to_write.isElement(entry.m_starving.smallestElement())) {
+ return AccessType_Write;
+ } else {
+ return AccessType_Read;
+ }
+}
+
+void NodePersistentTable::markEntries(const Address& address)
+{
+ assert(address == line_address(address));
+ if (m_map_ptr->exist(address)) {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_marked.isEmpty()); // None should be marked
+ entry.m_marked = entry.m_starving; // Mark all the nodes currently in the table
+ }
+}
+
+bool NodePersistentTable::isLocked(const Address& address) const
+{
+ assert(address == line_address(address));
+ // If an entry is present, it must be locked
+ return (m_map_ptr->exist(address));
+}
+
+int NodePersistentTable::countStarvingForAddress(const Address& address) const
+{
+ if (m_map_ptr->exist(address)) {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+int NodePersistentTable::countReadStarvingForAddress(const Address& address) const
+{
+ if (m_map_ptr->exist(address)) {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count() - entry.m_request_to_write.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+
diff --git a/src/mem/ruby/system/NodePersistentTable.hh b/src/mem/ruby/system/NodePersistentTable.hh
new file mode 100644
index 000000000..32de2613e
--- /dev/null
+++ b/src/mem/ruby/system/NodePersistentTable.hh
@@ -0,0 +1,99 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: NodePersistentTable.h 1.3 04/08/16 14:12:33-05:00 beckmann@c2-143.cs.wisc.edu $
+ *
+ * Description:
+ *
+ */
+
+#ifndef NodePersistentTable_H
+#define NodePersistentTable_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/system/NodeID.hh"
+#include "mem/protocol/AccessType.hh"
+
+class AbstractChip;
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+class Address;
+class NodePersistentTableEntry;
+
+class NodePersistentTable {
+public:
+ // Constructors
+ NodePersistentTable(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~NodePersistentTable();
+
+ // Public Methods
+ void persistentRequestLock(const Address& address, NodeID locker, AccessType type);
+ void persistentRequestUnlock(const Address& address, NodeID unlocker);
+ bool okToIssueStarving(const Address& address) const;
+ NodeID findSmallest(const Address& address) const;
+ AccessType typeOfSmallest(const Address& address) const;
+ void markEntries(const Address& address);
+ bool isLocked(const Address& addr) const;
+ int countStarvingForAddress(const Address& addr) const;
+ int countReadStarvingForAddress(const Address& addr) const;
+
+ static void printConfig(ostream& out) {}
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ NodePersistentTable(const NodePersistentTable& obj);
+ NodePersistentTable& operator=(const NodePersistentTable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, NodePersistentTableEntry>* m_map_ptr;
+ AbstractChip* m_chip_ptr;
+ int m_version;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const NodePersistentTable& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const NodePersistentTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //NodePersistentTable_H
diff --git a/src/mem/ruby/system/PerfectCacheMemory.hh b/src/mem/ruby/system/PerfectCacheMemory.hh
new file mode 100644
index 000000000..4578d0a44
--- /dev/null
+++ b/src/mem/ruby/system/PerfectCacheMemory.hh
@@ -0,0 +1,238 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PerfectCacheMemory.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef PERFECTCACHEMEMORY_H
+#define PERFECTCACHEMEMORY_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/protocol/AccessPermission.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+
+template<class ENTRY>
+class PerfectCacheLineState {
+public:
+ PerfectCacheLineState() { m_permission = AccessPermission_NUM; }
+ AccessPermission m_permission;
+ ENTRY m_entry;
+};
+
+template<class ENTRY>
+class PerfectCacheMemory {
+public:
+
+ // Constructors
+ PerfectCacheMemory(AbstractChip* chip_ptr);
+
+ // Destructor
+ //~PerfectCacheMemory();
+
+ // Public Methods
+
+ static void printConfig(ostream& out);
+
+ // perform a cache access and see if we hit or not. Return true on
+ // a hit.
+ bool tryCacheAccess(const CacheMsg& msg, bool& block_stc, ENTRY*& entry);
+
+ // tests to see if an address is present in the cache
+ bool isTagPresent(const Address& address) const;
+
+ // Returns true if there is:
+ // a) a tag match on this address or there is
+ // b) an Invalid line in the same cache "way"
+ bool cacheAvail(const Address& address) const;
+
+ // find an Invalid entry and sets the tag appropriate for the address
+ void allocate(const Address& address);
+
+ void deallocate(const Address& address);
+
+ // Returns with the physical address of the conflicting cache line
+ Address cacheProbe(const Address& newAddress) const;
+
+ // looks an address up in the cache
+ ENTRY& lookup(const Address& address);
+ const ENTRY& lookup(const Address& address) const;
+
+ // Get/Set permission of cache block
+ AccessPermission getPermission(const Address& address) const;
+ void changePermission(const Address& address, AccessPermission new_perm);
+
+ // Print cache contents
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ PerfectCacheMemory(const PerfectCacheMemory& obj);
+ PerfectCacheMemory& operator=(const PerfectCacheMemory& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, PerfectCacheLineState<ENTRY> > m_map;
+ AbstractChip* m_chip_ptr;
+};
+
+// Output operator declaration
+//ostream& operator<<(ostream& out, const PerfectCacheMemory<ENTRY>& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+template<class ENTRY>
+extern inline
+ostream& operator<<(ostream& out, const PerfectCacheMemory<ENTRY>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+template<class ENTRY>
+extern inline
+PerfectCacheMemory<ENTRY>::PerfectCacheMemory(AbstractChip* chip_ptr)
+{
+ m_chip_ptr = chip_ptr;
+}
+
+// STATIC METHODS
+
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::printConfig(ostream& out)
+{
+}
+
+// PUBLIC METHODS
+
+template<class ENTRY>
+extern inline
+bool PerfectCacheMemory<ENTRY>::tryCacheAccess(const CacheMsg& msg, bool& block_stc, ENTRY*& entry)
+{
+ ERROR_MSG("not implemented");
+}
+
+// tests to see if an address is present in the cache
+template<class ENTRY>
+extern inline
+bool PerfectCacheMemory<ENTRY>::isTagPresent(const Address& address) const
+{
+ return m_map.exist(line_address(address));
+}
+
+template<class ENTRY>
+extern inline
+bool PerfectCacheMemory<ENTRY>::cacheAvail(const Address& address) const
+{
+ return true;
+}
+
+// find an Invalid or already allocated entry and sets the tag
+// appropriate for the address
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::allocate(const Address& address)
+{
+ PerfectCacheLineState<ENTRY> line_state;
+ line_state.m_permission = AccessPermission_Busy;
+ line_state.m_entry = ENTRY();
+ m_map.add(line_address(address), line_state);
+}
+
+// deallocate entry
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::deallocate(const Address& address)
+{
+ m_map.erase(line_address(address));
+}
+
+// Returns with the physical address of the conflicting cache line
+template<class ENTRY>
+extern inline
+Address PerfectCacheMemory<ENTRY>::cacheProbe(const Address& newAddress) const
+{
+ ERROR_MSG("cacheProbe called in perfect cache");
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+ENTRY& PerfectCacheMemory<ENTRY>::lookup(const Address& address)
+{
+ return m_map.lookup(line_address(address)).m_entry;
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+const ENTRY& PerfectCacheMemory<ENTRY>::lookup(const Address& address) const
+{
+ return m_map.lookup(line_address(address)).m_entry;
+}
+
+template<class ENTRY>
+extern inline
+AccessPermission PerfectCacheMemory<ENTRY>::getPermission(const Address& address) const
+{
+ return m_map.lookup(line_address(address)).m_permission;
+}
+
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::changePermission(const Address& address, AccessPermission new_perm)
+{
+ Address line_address = address;
+ line_address.makeLineAddress();
+ PerfectCacheLineState<ENTRY>& line_state = m_map.lookup(line_address);
+ AccessPermission old_perm = line_state.m_permission;
+ line_state.m_permission = new_perm;
+}
+
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::print(ostream& out) const
+{
+}
+
+#endif //PERFECTCACHEMEMORY_H
diff --git a/src/mem/ruby/system/PersistentArbiter.cc b/src/mem/ruby/system/PersistentArbiter.cc
new file mode 100644
index 000000000..b44393301
--- /dev/null
+++ b/src/mem/ruby/system/PersistentArbiter.cc
@@ -0,0 +1,165 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "mem/ruby/system/PersistentArbiter.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/gems_common/util.hh"
+
+PersistentArbiter::PersistentArbiter(AbstractChip* chip_ptr)
+{
+ m_chip_ptr = chip_ptr;
+
+ // wastes entries, but who cares
+ m_entries.setSize(RubyConfig::numberOfProcessors());
+
+ for (int i = 0; i < m_entries.size(); i++) {
+ m_entries[i].valid = false;
+ }
+
+ m_busy = false;
+ m_locker = -1;
+
+}
+
+PersistentArbiter::~PersistentArbiter()
+{
+ m_chip_ptr = NULL;
+}
+
+
+void PersistentArbiter::addLocker(NodeID id, Address addr, AccessType type) {
+ //cout << "Arbiter " << getArbiterId() << " adding locker " << id << " " << addr << endl;
+ assert(m_entries[id].valid == false);
+ m_entries[id].valid = true;
+ m_entries[id].address = addr;
+ m_entries[id].type = type;
+ m_entries[id].localId = id;
+
+}
+
+void PersistentArbiter::removeLocker(NodeID id) {
+ //cout << "Arbiter " << getArbiterId() << " removing locker " << id << " " << m_entries[id].address << endl;
+ assert(m_entries[id].valid == true);
+ m_entries[id].valid = false;
+
+ if (!lockersExist()) {
+ m_busy = false;
+ }
+}
+
+bool PersistentArbiter::successorRequestPresent(Address addr, NodeID id) {
+ for (int i = (id + 1); i < m_entries.size(); i++) {
+ if (m_entries[i].address == addr && m_entries[i].valid) {
+ //cout << "m_entries[" << id << ", address " << m_entries[id].address << " is equal to " << addr << endl;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool PersistentArbiter::lockersExist() {
+ for (int i = 0; i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ return true;
+ }
+ }
+ //cout << "no lockers found" << endl;
+ return false;
+}
+
+void PersistentArbiter::advanceActiveLock() {
+ assert(lockersExist());
+
+ //cout << "arbiter advancing lock from " << m_locker;
+ m_busy = false;
+
+ if (m_locker < (m_entries.size() - 1)) {
+ for (int i = (m_locker+1); i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ m_locker = i;
+ m_busy = true;
+ //cout << " to " << m_locker << endl;
+ return;
+ }
+ }
+ }
+
+ if (!m_busy) {
+ for (int i = 0; i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ m_locker = i;
+ m_busy = true;
+ //cout << " to " << m_locker << endl;
+ return;
+ }
+ }
+
+ assert(m_busy)
+ }
+}
+
+Address PersistentArbiter::getActiveLockAddress() {
+ assert( m_entries[m_locker].valid = true );
+ return m_entries[m_locker].address;
+}
+
+
+NodeID PersistentArbiter::getArbiterId() {
+ return m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip();
+}
+
+bool PersistentArbiter::isBusy() {
+ return m_busy;
+}
+
+NodeID PersistentArbiter::getActiveLocalId() {
+ assert( m_entries[m_locker].valid = true );
+ return m_entries[m_locker].localId;
+}
+
+void PersistentArbiter::setIssuedAddress(Address addr) {
+ m_issued_address = addr;
+}
+
+bool PersistentArbiter::isIssuedAddress(Address addr) {
+ return (m_issued_address == addr);
+}
+
+void PersistentArbiter::print(ostream& out) const {
+
+ out << "[";
+ for (int i = 0; i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ out << "( " << m_entries[i].localId << ", " << m_entries[i].address << ") ";
+ }
+ }
+ out << "]" << endl;
+
+}
diff --git a/src/mem/ruby/system/PersistentArbiter.hh b/src/mem/ruby/system/PersistentArbiter.hh
new file mode 100644
index 000000000..705d833f4
--- /dev/null
+++ b/src/mem/ruby/system/PersistentArbiter.hh
@@ -0,0 +1,107 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PersistentArbiter.h
+ *
+ * Description:
+ *
+ * Used for hierarchical distributed persistent request scheme
+ *
+ */
+
+#ifndef PERSISTENTARBITER_H
+#define PERSISTENTARBITER_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/gems_common/Vector.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/protocol/AccessPermission.hh"
+#include "mem/protocol/AccessType.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Address.hh"
+
+struct ArbiterEntry {
+ bool valid;
+ Address address;
+ AccessType type;
+ NodeID localId;
+};
+
+class PersistentArbiter {
+public:
+
+ // Constructors
+ PersistentArbiter(AbstractChip* chip_ptr);
+
+ // Destructor
+ ~PersistentArbiter();
+
+ // Public Methods
+
+ void addLocker(NodeID id, Address addr, AccessType type);
+ void removeLocker(NodeID id);
+ bool successorRequestPresent(Address addr, NodeID id);
+ bool lockersExist();
+ void advanceActiveLock();
+ Address getActiveLockAddress();
+ NodeID getArbiterId();
+ bool isBusy();
+
+ void setIssuedAddress(Address addr);
+ bool isIssuedAddress(Address addr);
+
+
+ Address getIssuedAddress() { return m_issued_address; }
+
+ static void printConfig(ostream& out) {}
+ void print(ostream& out) const;
+
+ NodeID getActiveLocalId();
+
+private:
+
+ Address m_issued_address;
+ AbstractChip* m_chip_ptr;
+ int m_locker;
+ bool m_busy;
+ Vector<ArbiterEntry> m_entries;
+};
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const PersistentArbiter& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+#endif //PERFECTCACHEMEMORY_H
diff --git a/src/mem/ruby/system/PersistentTable.cc b/src/mem/ruby/system/PersistentTable.cc
new file mode 100644
index 000000000..7f07251ce
--- /dev/null
+++ b/src/mem/ruby/system/PersistentTable.cc
@@ -0,0 +1,194 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "mem/ruby/system/PersistentTable.hh"
+#include "mem/ruby/common/NetDest.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/gems_common/util.hh"
+
+// randomize so that handoffs are not locality-aware
+// int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
+// int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+
+class PersistentTableEntry {
+public:
+ NetDest m_starving;
+ NetDest m_marked;
+ NetDest m_request_to_write;
+};
+
+PersistentTable::PersistentTable(AbstractChip* chip_ptr, int version)
+{
+ m_chip_ptr = chip_ptr;
+ m_map_ptr = new Map<Address, PersistentTableEntry>;
+ m_version = version;
+}
+
+PersistentTable::~PersistentTable()
+{
+ delete m_map_ptr;
+ m_map_ptr = NULL;
+ m_chip_ptr = NULL;
+}
+
+void PersistentTable::persistentRequestLock(const Address& address, MachineID locker, AccessType type)
+{
+
+ // if (locker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << llocker << " requesting lock for " << address << endl;
+
+ // MachineID locker = (MachineID) persistent_randomize[llocker];
+
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ // Allocate if not present
+ PersistentTableEntry entry;
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ m_map_ptr->add(address, entry);
+ } else {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(!(entry.m_starving.isElement(locker))); // Make sure we're not already in the locked set
+
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ }
+}
+
+void PersistentTable::persistentRequestUnlock(const Address& address, MachineID unlocker)
+{
+ // if (unlocker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker << " requesting unlock for " << address << endl;
+
+ // MachineID unlocker = (MachineID) persistent_randomize[uunlocker];
+
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_starving.isElement(unlocker)); // Make sure we're in the locked set
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ entry.m_starving.remove(unlocker);
+ entry.m_marked.remove(unlocker);
+ entry.m_request_to_write.remove(unlocker);
+ assert(entry.m_marked.isSubset(entry.m_starving));
+
+ // Deallocate if empty
+ if (entry.m_starving.isEmpty()) {
+ assert(entry.m_marked.isEmpty());
+ m_map_ptr->erase(address);
+ }
+}
+
+bool PersistentTable::okToIssueStarving(const Address& address) const
+{
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ return true; // No entry present
+ } else if (m_map_ptr->lookup(address).m_starving.isElement( (MachineID) {MachineType_L1Cache, m_version})) {
+ return false; // We can't issue another lockdown until are previous unlock has occurred
+ } else {
+ return (m_map_ptr->lookup(address).m_marked.isEmpty());
+ }
+}
+
+MachineID PersistentTable::findSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ // cout << "Node " << m_chip_ptr->getID() << " returning " << persistent_randomize[entry.m_starving.smallestElement()] << " for findSmallest(" << address << ")" << endl;
+ // return (MachineID) persistent_randomize[entry.m_starving.smallestElement()];
+ return (MachineID) { MachineType_L1Cache, entry.m_starving.smallestElement() };
+}
+
+AccessType PersistentTable::typeOfSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ if (entry.m_request_to_write.isElement((MachineID) {MachineType_L1Cache, entry.m_starving.smallestElement()})) {
+ return AccessType_Write;
+ } else {
+ return AccessType_Read;
+ }
+}
+
+void PersistentTable::markEntries(const Address& address)
+{
+ assert(address == line_address(address));
+ if (m_map_ptr->exist(address)) {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_marked.isEmpty()); // None should be marked
+ entry.m_marked = entry.m_starving; // Mark all the nodes currently in the table
+ }
+}
+
+bool PersistentTable::isLocked(const Address& address) const
+{
+ assert(address == line_address(address));
+ // If an entry is present, it must be locked
+ return (m_map_ptr->exist(address));
+}
+
+int PersistentTable::countStarvingForAddress(const Address& address) const
+{
+ if (m_map_ptr->exist(address)) {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+int PersistentTable::countReadStarvingForAddress(const Address& address) const
+{
+ if (m_map_ptr->exist(address)) {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count() - entry.m_request_to_write.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+
diff --git a/src/mem/ruby/system/PersistentTable.hh b/src/mem/ruby/system/PersistentTable.hh
new file mode 100644
index 000000000..9f2e38fd7
--- /dev/null
+++ b/src/mem/ruby/system/PersistentTable.hh
@@ -0,0 +1,99 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef PersistentTable_H
+#define PersistentTable_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/system/MachineID.hh"
+#include "mem/protocol/AccessType.hh"
+
+class AbstractChip;
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+class Address;
+class PersistentTableEntry;
+
+class PersistentTable {
+public:
+ // Constructors
+ PersistentTable(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~PersistentTable();
+
+ // Public Methods
+ void persistentRequestLock(const Address& address, MachineID locker, AccessType type);
+ void persistentRequestUnlock(const Address& address, MachineID unlocker);
+ bool okToIssueStarving(const Address& address) const;
+ MachineID findSmallest(const Address& address) const;
+ AccessType typeOfSmallest(const Address& address) const;
+ void markEntries(const Address& address);
+ bool isLocked(const Address& addr) const;
+ int countStarvingForAddress(const Address& addr) const;
+ int countReadStarvingForAddress(const Address& addr) const;
+
+ static void printConfig(ostream& out) {}
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ PersistentTable(const PersistentTable& obj);
+ PersistentTable& operator=(const PersistentTable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, PersistentTableEntry>* m_map_ptr;
+ AbstractChip* m_chip_ptr;
+ int m_version;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const PersistentTable& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const PersistentTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //PersistentTable_H
diff --git a/src/mem/ruby/system/PseudoLRUPolicy.hh b/src/mem/ruby/system/PseudoLRUPolicy.hh
new file mode 100644
index 000000000..57a0b40e9
--- /dev/null
+++ b/src/mem/ruby/system/PseudoLRUPolicy.hh
@@ -0,0 +1,110 @@
+
+#ifndef PSEUDOLRUPOLICY_H
+#define PSEUDOLRUPOLICY_H
+
+#include "mem/ruby/system/AbstractReplacementPolicy.hh"
+
+/**
+ * Implementation of tree-based pseudo-LRU replacement
+ *
+ * Works for any associativity between 1 and 128.
+ *
+ * Also implements associativities that are not a power of 2 by
+ * ignoring paths that lead to a larger index (i.e. truncating the
+ * tree). Note that when this occurs, the algorithm becomes less
+ * fair, as it will favor indicies in the larger (by index) half of
+ * the associative set. This is most unfair when the nearest power of
+ * 2 is one below the associativy, and most fair when it is one above.
+ */
+
+class PseudoLRUPolicy : public AbstractReplacementPolicy {
+ public:
+
+ PseudoLRUPolicy(Index num_sets, Index assoc);
+ ~PseudoLRUPolicy();
+
+ void touch(Index set, Index way, Time time);
+ Index getVictim(Index set) const;
+
+ private:
+ unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */
+ unsigned int m_num_levels; /** number of levels in the tree */
+ uint64* m_trees; /** bit representation of the trees, one for each set */
+};
+
+inline
+PseudoLRUPolicy::PseudoLRUPolicy(Index num_sets, Index assoc)
+ : AbstractReplacementPolicy(num_sets, assoc)
+{
+ int num_tree_nodes;
+
+ // associativity cannot exceed capacity of tree representation
+ assert(num_sets > 0 && assoc > 1 && assoc <= (Index) sizeof(uint64)*4);
+
+ m_trees = NULL;
+ m_num_levels = 0;
+
+ m_effective_assoc = 1;
+ while(m_effective_assoc < assoc){
+ m_effective_assoc <<= 1; // effective associativity is ceiling power of 2
+ }
+ assoc = m_effective_assoc;
+ while(true){
+ assoc /= 2;
+ if(!assoc) break;
+ m_num_levels++;
+ }
+ assert(m_num_levels < sizeof(unsigned int)*4);
+ num_tree_nodes = ((int)pow(2, m_num_levels))-1;
+ m_trees = new uint64[m_num_sets];
+ for(unsigned int i=0; i< m_num_sets; i++){
+ m_trees[i] = 0;
+ }
+}
+
+inline
+PseudoLRUPolicy::~PseudoLRUPolicy()
+{
+ if(m_trees != NULL)
+ delete[] m_trees;
+}
+
+inline
+void PseudoLRUPolicy::touch(Index set, Index index, Time time){
+ assert(index >= 0 && index < m_assoc);
+ assert(set >= 0 && set < m_num_sets);
+
+ int tree_index = 0;
+ int node_val;
+ for(int i=m_num_levels -1; i>=0; i--){
+ node_val = (index >> i)&1;
+ if(node_val)
+ m_trees[set] |= node_val << tree_index;
+ else
+ m_trees[set] &= ~(1 << tree_index);
+ tree_index = node_val ? (tree_index*2)+2 : (tree_index*2)+1;
+ }
+ m_last_ref_ptr[set][index] = time;
+}
+
+inline
+Index PseudoLRUPolicy::getVictim(Index set) const {
+ // assert(m_assoc != 0);
+
+ Index index = 0;
+
+ int tree_index = 0;
+ int node_val;
+ for(unsigned int i=0;i<m_num_levels;i++){
+ node_val = (m_trees[set]>>tree_index)&1;
+ index += node_val?0:(m_effective_assoc >> (i+1));
+ tree_index = node_val? (tree_index*2)+1 : (tree_index*2)+2;
+ }
+ assert(index >= 0 && index < m_effective_assoc);
+
+ /* return either the found index or the max possible index */
+ /* NOTE: this is not a fair replacement when assoc is not a power of 2 */
+ return (index > (m_assoc-1)) ? m_assoc-1:index;
+}
+
+#endif // PSEUDOLRUPOLICY_H
diff --git a/src/mem/ruby/system/SConscript b/src/mem/ruby/system/SConscript
new file mode 100644
index 000000000..ee9b359c5
--- /dev/null
+++ b/src/mem/ruby/system/SConscript
@@ -0,0 +1,45 @@
+# -*- mode:python -*-
+
+# Copyright (c) 2009 The Hewlett-Packard Development Company
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+
+Import('*')
+
+if not env['RUBY']:
+ Return()
+
+Source('DirectoryMemory.cc')
+Source('MemoryControl.cc')
+Source('MemoryNode.cc')
+Source('NodePersistentTable.cc')
+Source('PersistentTable.cc')
+Source('Sequencer.cc', Werror=False)
+Source('StoreBuffer.cc')
+Source('StoreCache.cc')
+Source('System.cc')
+Source('TimerTable.cc')
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
new file mode 100644
index 000000000..82eef2901
--- /dev/null
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -0,0 +1,960 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: Sequencer.C 1.131 2006/11/06 17:41:01-06:00 bobba@gratiano.cs.wisc.edu $
+ *
+ */
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/system/Sequencer.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/protocol/Protocol.hh"
+#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/system/CacheMemory.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+//#include "mem/ruby/recorder/Tracer.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/protocol/Chip.hh"
+#include "mem/ruby/tester/Tester.hh"
+#include "mem/ruby/common/SubBlock.hh"
+#include "mem/protocol/Protocol.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/packet.hh"
+
+Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+
+ m_deadlock_check_scheduled = false;
+ m_outstanding_count = 0;
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
+ m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
+
+ m_packetTable_ptr = new Map<Address, Packet*>;
+
+ for(int p=0; p < smt_threads; ++p){
+ m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>;
+ m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>;
+ }
+
+}
+
+Sequencer::~Sequencer() {
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int i=0; i < smt_threads; ++i){
+ if(m_writeRequestTable_ptr[i]){
+ delete m_writeRequestTable_ptr[i];
+ }
+ if(m_readRequestTable_ptr[i]){
+ delete m_readRequestTable_ptr[i];
+ }
+ }
+ if(m_writeRequestTable_ptr){
+ delete [] m_writeRequestTable_ptr;
+ }
+ if(m_readRequestTable_ptr){
+ delete [] m_readRequestTable_ptr;
+ }
+}
+
+void Sequencer::wakeup() {
+ // Check for deadlock of any of the requests
+ Time current_time = g_eventQueue_ptr->getTime();
+ bool deadlock = false;
+
+ // Check across all outstanding requests
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int total_outstanding = 0;
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i<keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
+ WARN_MSG("Possible Deadlock detected");
+ WARN_EXPR(request);
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(m_version);
+ WARN_EXPR(keys.size());
+ WARN_EXPR(current_time);
+ WARN_EXPR(request.getTime());
+ WARN_EXPR(current_time - request.getTime());
+ WARN_EXPR(*m_readRequestTable_ptr[p]);
+ ERROR_MSG("Aborting");
+ deadlock = true;
+ }
+ }
+
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i<keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
+ WARN_MSG("Possible Deadlock detected");
+ WARN_EXPR(request);
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(m_version);
+ WARN_EXPR(current_time);
+ WARN_EXPR(request.getTime());
+ WARN_EXPR(current_time - request.getTime());
+ WARN_EXPR(keys.size());
+ WARN_EXPR(*m_writeRequestTable_ptr[p]);
+ ERROR_MSG("Aborting");
+ deadlock = true;
+ }
+ }
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ } // across all request tables
+ assert(m_outstanding_count == total_outstanding);
+
+ if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ } else {
+ m_deadlock_check_scheduled = false;
+ }
+}
+
+//returns the total number of requests
+int Sequencer::getNumberOutstanding(){
+ return m_outstanding_count;
+}
+
+// returns the total number of demand requests
+int Sequencer::getNumberOutstandingDemand(){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int total_demand = 0;
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ if(request.getPrefetch() == PrefetchBit_No){
+ total_demand++;
+ }
+ }
+
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if(request.getPrefetch() == PrefetchBit_No){
+ total_demand++;
+ }
+ }
+ }
+
+ return total_demand;
+}
+
+int Sequencer::getNumberOutstandingPrefetch(){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int total_prefetch = 0;
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ total_prefetch++;
+ }
+ }
+
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ total_prefetch++;
+ }
+ }
+ }
+
+ return total_prefetch;
+}
+
+bool Sequencer::isPrefetchRequest(const Address & lineaddr){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ // check load requests
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ if(line_address(request.getAddress()) == lineaddr){
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ return true;
+ }
+ else{
+ return false;
+ }
+ }
+ }
+
+ // check store requests
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if(line_address(request.getAddress()) == lineaddr){
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ return true;
+ }
+ else{
+ return false;
+ }
+ }
+ }
+ }
+ // we should've found a matching request
+ cout << "isRequestPrefetch() ERROR request NOT FOUND : " << lineaddr << endl;
+ printProgress(cout);
+ assert(0);
+}
+
+AccessModeType Sequencer::getAccessModeOfRequest(Address addr, int thread){
+ if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
+ return request.getAccessMode();
+ } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
+ return request.getAccessMode();
+ } else {
+ printProgress(cout);
+ ERROR_MSG("Request not found in RequestTables");
+ }
+}
+
+Address Sequencer::getLogicalAddressOfRequest(Address addr, int thread){
+ assert(thread >= 0);
+ if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
+ return request.getLogicalAddress();
+ } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
+ return request.getLogicalAddress();
+ } else {
+ printProgress(cout);
+ WARN_MSG("Request not found in RequestTables");
+ WARN_MSG(addr);
+ WARN_MSG(thread);
+ ASSERT(0);
+ }
+}
+
+// returns the ThreadID of the request
+int Sequencer::getRequestThreadID(const Address & addr){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int thread = -1;
+ int num_found = 0;
+ for(int p=0; p < smt_threads; ++p){
+ if(m_readRequestTable_ptr[p]->exist(addr)){
+ num_found++;
+ thread = p;
+ }
+ if(m_writeRequestTable_ptr[p]->exist(addr)){
+ num_found++;
+ thread = p;
+ }
+ }
+ if(num_found != 1){
+ cout << "getRequestThreadID ERROR too many matching requests addr = " << addr << endl;
+ printProgress(cout);
+ }
+ ASSERT(num_found == 1);
+ ASSERT(thread != -1);
+
+ return thread;
+}
+
+// given a line address, return the request's physical address
+Address Sequencer::getRequestPhysicalAddress(const Address & lineaddr){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ Address physaddr;
+ int num_found = 0;
+ for(int p=0; p < smt_threads; ++p){
+ if(m_readRequestTable_ptr[p]->exist(lineaddr)){
+ num_found++;
+ physaddr = (m_readRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
+ }
+ if(m_writeRequestTable_ptr[p]->exist(lineaddr)){
+ num_found++;
+ physaddr = (m_writeRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
+ }
+ }
+ if(num_found != 1){
+ cout << "getRequestPhysicalAddress ERROR too many matching requests addr = " << lineaddr << endl;
+ printProgress(cout);
+ }
+ ASSERT(num_found == 1);
+
+ return physaddr;
+}
+
+void Sequencer::printProgress(ostream& out) const{
+
+ int total_demand = 0;
+ out << "Sequencer Stats Version " << m_version << endl;
+ out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
+ out << "---------------" << endl;
+ out << "outstanding requests" << endl;
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> rkeys = m_readRequestTable_ptr[p]->keys();
+ int read_size = rkeys.size();
+ out << "proc " << m_chip_ptr->getID() << " thread " << p << " Read Requests = " << read_size << endl;
+ // print the request table
+ for(int i=0; i < read_size; ++i){
+ CacheMsg & request = m_readRequestTable_ptr[p]->lookup(rkeys[i]);
+ out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << rkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
+ if( request.getPrefetch() == PrefetchBit_No ){
+ total_demand++;
+ }
+ }
+
+ Vector<Address> wkeys = m_writeRequestTable_ptr[p]->keys();
+ int write_size = wkeys.size();
+ out << "proc " << m_chip_ptr->getID() << " thread " << p << " Write Requests = " << write_size << endl;
+ // print the request table
+ for(int i=0; i < write_size; ++i){
+ CacheMsg & request = m_writeRequestTable_ptr[p]->lookup(wkeys[i]);
+ out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
+ if( request.getPrefetch() == PrefetchBit_No ){
+ total_demand++;
+ }
+ }
+
+ out << endl;
+ }
+ out << "Total Number Outstanding: " << m_outstanding_count << endl;
+ out << "Total Number Demand : " << total_demand << endl;
+ out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
+ out << endl;
+ out << endl;
+
+}
+
+void Sequencer::printConfig(ostream& out) {
+ if (TSO) {
+ out << "sequencer: Sequencer - TSO" << endl;
+ } else {
+ out << "sequencer: Sequencer - SC" << endl;
+ }
+ out << " max_outstanding_requests: " << g_SEQUENCER_OUTSTANDING_REQUESTS << endl;
+}
+
+bool Sequencer::empty() const {
+ return m_outstanding_count == 0;
+}
+
+// Insert the request on the correct request table. Return true if
+// the entry was already present.
+bool Sequencer::insertRequest(const CacheMsg& request) {
+ int thread = request.getThreadID();
+ assert(thread >= 0);
+ int total_outstanding = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+ assert(m_outstanding_count == total_outstanding);
+
+ // See if we should schedule a deadlock check
+ if (m_deadlock_check_scheduled == false) {
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ m_deadlock_check_scheduled = true;
+ }
+
+ if ((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ATOMIC)) {
+ if (m_writeRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
+ m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ return true;
+ }
+ m_writeRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
+ m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ m_outstanding_count++;
+ } else {
+ if (m_readRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
+ m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ return true;
+ }
+ m_readRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
+ m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ m_outstanding_count++;
+ }
+
+ g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
+
+ total_outstanding = 0;
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+
+ assert(m_outstanding_count == total_outstanding);
+ return false;
+}
+
+void Sequencer::removeRequest(const CacheMsg& request) {
+ int thread = request.getThreadID();
+ assert(thread >= 0);
+ int total_outstanding = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+ assert(m_outstanding_count == total_outstanding);
+
+ if ((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ATOMIC)) {
+ m_writeRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
+ } else {
+ m_readRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
+ }
+ m_outstanding_count--;
+
+ total_outstanding = 0;
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+ assert(m_outstanding_count == total_outstanding);
+}
+
+void Sequencer::writeCallback(const Address& address) {
+ DataBlock data;
+ writeCallback(address, data);
+}
+
+void Sequencer::writeCallback(const Address& address, DataBlock& data) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_writeRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
+
+ writeCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
+}
+
+void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
+
+ assert(address == line_address(address));
+ assert(thread >= 0);
+ assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
+
+ writeCallback(address, data, respondingMach, thread);
+
+}
+
+void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
+ CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread);
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ATOMIC));
+
+ hitCallback(request, data, respondingMach, thread);
+
+}
+
+void Sequencer::readCallback(const Address& address) {
+ DataBlock data;
+ readCallback(address, data);
+}
+
+void Sequencer::readCallback(const Address& address, DataBlock& data) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_readRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
+
+ readCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
+}
+
+void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
+
+ assert(address == line_address(address));
+ assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
+
+ readCallback(address, data, respondingMach, thread);
+}
+
+void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
+
+ CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread );
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_LD) ||
+ (request.getType() == CacheRequestType_IFETCH)
+ );
+
+ hitCallback(request, data, respondingMach, thread);
+}
+
+void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread) {
+ int size = request.getSize();
+ Address request_address = request.getAddress();
+ Address request_logical_address = request.getLogicalAddress();
+ Address request_line_address = line_address(request_address);
+ CacheRequestType type = request.getType();
+ int threadID = request.getThreadID();
+ Time issued_time = request.getTime();
+ int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
+
+ // Set this cache entry to the most recently used
+ if (type == CacheRequestType_IFETCH) {
+ if (Protocol::m_TwoLevelCache) {
+ if (m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ else {
+ if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ } else {
+ if (Protocol::m_TwoLevelCache) {
+ if (m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ else {
+ if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ }
+
+ assert(g_eventQueue_ptr->getTime() >= issued_time);
+ Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
+
+ if (PROTOCOL_DEBUG_TRACE) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Done", "",
+ int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
+ }
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
+ if (request.getPrefetch() == PrefetchBit_Yes) {
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
+ g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
+ return; // Ignore the software prefetch, don't callback the driver
+ }
+
+ // Profile the miss latency for all non-zero demand misses
+ if (miss_latency != 0) {
+ g_system_ptr->getProfiler()->missLatency(miss_latency, type, respondingMach);
+
+ }
+
+ bool write =
+ (type == CacheRequestType_ST) ||
+ (type == CacheRequestType_ATOMIC);
+
+ if (TSO && write) {
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data,
+ m_packetTable_ptr->lookup(request.getAddress()));
+ } else {
+
+ // Copy the correct bytes out of the cache line into the subblock
+ SubBlock subblock(request_address, request_logical_address, size);
+ subblock.mergeFrom(data); // copy the correct bytes from DataBlock in the SubBlock
+
+ // Scan the store buffer to see if there are any outstanding stores we need to collect
+ if (TSO) {
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock);
+ }
+
+ // Call into the Driver and let it read and/or modify the sub-block
+ Packet* pkt = m_packetTable_ptr->lookup(request.getAddress());
+
+ // update data if this is a store/atomic
+
+ /*
+ if (pkt->req->isCondSwap()) {
+ L1Cache_Entry entry = m_L1Cache_vec[m_version]->lookup(Address(pkt->req->physAddr()));
+ DataBlk datablk = entry->getDataBlk();
+ uint8_t *orig_data = datablk.getArray();
+ if ( datablk.equal(pkt->req->getExtraData()) )
+ datablk->setArray(pkt->getData());
+ pkt->setData(orig_data);
+ }
+ */
+
+ g_system_ptr->getDriver()->hitCallback(pkt);
+ m_packetTable_ptr->remove(request.getAddress());
+
+ // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
+ // (This is only triggered for the non-TSO case)
+ if (write) {
+ assert(!TSO);
+ subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock
+ }
+ }
+}
+
+void Sequencer::printDebug(){
+ //notify driver of debug
+ g_system_ptr->getDriver()->printDebug();
+}
+
+//dsm: breaks build, delayed
+// Returns true if the sequencer already has a load or store outstanding
+bool
+Sequencer::isReady(const Packet* pkt) const
+{
+
+ int cpu_number = pkt->req->contextId();
+ la_t logical_addr = pkt->req->getVaddr();
+ pa_t physical_addr = pkt->req->getPaddr();
+ CacheRequestType type_of_request;
+ if ( pkt->req->isInstFetch() ) {
+ type_of_request = CacheRequestType_IFETCH;
+ } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
+ type_of_request = CacheRequestType_ATOMIC;
+ } else if ( pkt->isRead() ) {
+ type_of_request = CacheRequestType_LD;
+ } else if ( pkt->isWrite() ) {
+ type_of_request = CacheRequestType_ST;
+ } else {
+ assert(false);
+ }
+ int thread = pkt->req->threadId();
+
+ CacheMsg request(Address( physical_addr ),
+ Address( physical_addr ),
+ type_of_request,
+ Address(0),
+ AccessModeType_UserMode, // User/supervisor mode
+ 0, // Size in bytes of request
+ PrefetchBit_No, // Not a prefetch
+ 0, // Version number
+ Address(logical_addr), // Virtual Address
+ thread // SMT thread
+ );
+ return isReady(request);
+}
+
+bool
+Sequencer::isReady(const CacheMsg& request) const
+{
+ if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) {
+ //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl;
+ //printProgress(cout);
+ return false;
+ }
+
+ // This code allows reads to be performed even when we have a write
+ // request outstanding for the line
+ bool write =
+ (request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ATOMIC);
+
+ // LUKE - disallow more than one request type per address
+ // INVARIANT: at most one request type per address, per processor
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ if( m_writeRequestTable_ptr[p]->exist(line_address(request.getAddress())) ||
+ m_readRequestTable_ptr[p]->exist(line_address(request.getAddress())) ){
+ //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
+ //printProgress(cout);
+ return false;
+ }
+ }
+
+ if (TSO) {
+ return m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady();
+ }
+ return true;
+}
+
+//dsm: breaks build, delayed
+// Called by Driver (Simics or Tester).
+void
+Sequencer::makeRequest(Packet* pkt)
+{
+ int cpu_number = pkt->req->contextId();
+ la_t logical_addr = pkt->req->getVaddr();
+ pa_t physical_addr = pkt->req->getPaddr();
+ int request_size = pkt->getSize();
+ CacheRequestType type_of_request;
+ PrefetchBit prefetch;
+ bool write = false;
+ if ( pkt->req->isInstFetch() ) {
+ type_of_request = CacheRequestType_IFETCH;
+ } else if ( pkt->req->isLocked() || pkt->req->isSwap() ) {
+ type_of_request = CacheRequestType_ATOMIC;
+ write = true;
+ } else if ( pkt->isRead() ) {
+ type_of_request = CacheRequestType_LD;
+ } else if ( pkt->isWrite() ) {
+ type_of_request = CacheRequestType_ST;
+ write = true;
+ } else {
+ assert(false);
+ }
+ if (pkt->req->isPrefetch()) {
+ prefetch = PrefetchBit_Yes;
+ } else {
+ prefetch = PrefetchBit_No;
+ }
+ la_t virtual_pc = pkt->req->getPC();
+ int isPriv = false; // TODO: get permission data
+ int thread = pkt->req->threadId();
+
+ AccessModeType access_mode = AccessModeType_UserMode; // TODO: get actual permission
+
+ CacheMsg request(Address( physical_addr ),
+ Address( physical_addr ),
+ type_of_request,
+ Address(virtual_pc),
+ access_mode, // User/supervisor mode
+ request_size, // Size in bytes of request
+ prefetch,
+ 0, // Version number
+ Address(logical_addr), // Virtual Address
+ thread // SMT thread
+ );
+
+ if ( TSO && write && !pkt->req->isPrefetch() ) {
+ assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(pkt, request);
+ return;
+ }
+
+ m_packetTable_ptr->insert(Address( physical_addr ), pkt);
+
+ doRequest(request);
+}
+
+bool Sequencer::doRequest(const CacheMsg& request) {
+ bool hit = false;
+ // Check the fast path
+ DataBlock* data_ptr;
+
+ int thread = request.getThreadID();
+
+ hit = tryCacheAccess(line_address(request.getAddress()),
+ request.getType(),
+ request.getProgramCounter(),
+ request.getAccessMode(),
+ request.getSize(),
+ data_ptr);
+
+ if (hit && (request.getType() == CacheRequestType_IFETCH || !REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) ) {
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path hit");
+ hitCallback(request, *data_ptr, GenericMachineType_L1Cache, thread);
+ return true;
+ }
+
+ if (TSO && (request.getType() == CacheRequestType_LD || request.getType() == CacheRequestType_IFETCH)) {
+
+ // See if we can satisfy the load entirely from the store buffer
+ SubBlock subblock(line_address(request.getAddress()), request.getSize());
+ if (m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->trySubBlock(subblock)) {
+ DataBlock dummy;
+ hitCallback(request, dummy, GenericMachineType_NULL, thread); // Call with an 'empty' datablock, since the data is in the store buffer
+ return true;
+ }
+ }
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path miss");
+ issueRequest(request);
+ return hit;
+}
+
+void Sequencer::issueRequest(const CacheMsg& request) {
+ bool found = insertRequest(request);
+
+ if (!found) {
+ CacheMsg msg = request;
+ msg.getAddress() = line_address(request.getAddress()); // Make line address
+
+ // Fast Path L1 misses are profiled here - all non-fast path misses are profiled within the generated protocol code
+ if (!REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) {
+ g_system_ptr->getProfiler()->addPrimaryStatSample(msg, m_chip_ptr->getID());
+ }
+
+ if (PROTOCOL_DEBUG_TRACE) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip() + m_version), -1, msg.getAddress(),"", "Begin", "", CacheRequestType_to_string(request.getType()));
+ }
+
+#if 0
+ // Commented out by nate binkert because I removed the trace stuff
+ if (g_system_ptr->getTracer()->traceEnabled()) {
+ g_system_ptr->getTracer()->traceRequest((m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), msg.getAddress(), msg.getProgramCounter(),
+ msg.getType(), g_eventQueue_ptr->getTime());
+ }
+#endif
+
+ Time latency = 0; // initialzed to an null value
+
+ latency = SEQUENCER_TO_CONTROLLER_LATENCY;
+
+ // Send the message to the cache controller
+ assert(latency > 0);
+ m_chip_ptr->m_L1Cache_mandatoryQueue_vec[m_version]->enqueue(msg, latency);
+
+ } // !found
+}
+
+bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
+ const Address& pc, AccessModeType access_mode,
+ int size, DataBlock*& data_ptr) {
+ if (type == CacheRequestType_IFETCH) {
+ if (Protocol::m_TwoLevelCache) {
+ return m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ else {
+ return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ } else {
+ if (Protocol::m_TwoLevelCache) {
+ return m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ else {
+ return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ }
+}
+
+void Sequencer::resetRequestTime(const Address& addr, int thread){
+ assert(thread >= 0);
+ //reset both load and store requests, if they exist
+ if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
+ if( request.m_AccessMode != AccessModeType_UserMode){
+ cout << "resetRequestType ERROR read request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
+ printProgress(cout);
+ }
+ //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
+ request.setTime(g_eventQueue_ptr->getTime());
+ }
+ if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
+ if( request.m_AccessMode != AccessModeType_UserMode){
+ cout << "resetRequestType ERROR write request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
+ printProgress(cout);
+ }
+ //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
+ request.setTime(g_eventQueue_ptr->getTime());
+ }
+}
+
+// removes load request from queue
+void Sequencer::removeLoadRequest(const Address & addr, int thread){
+ removeRequest(getReadRequest(addr, thread));
+}
+
+void Sequencer::removeStoreRequest(const Address & addr, int thread){
+ removeRequest(getWriteRequest(addr, thread));
+}
+
+// returns the read CacheMsg
+CacheMsg & Sequencer::getReadRequest( const Address & addr, int thread ){
+ Address temp = addr;
+ assert(thread >= 0);
+ assert(temp == line_address(temp));
+ assert(m_readRequestTable_ptr[thread]->exist(addr));
+ return m_readRequestTable_ptr[thread]->lookup(addr);
+}
+
+CacheMsg & Sequencer::getWriteRequest( const Address & addr, int thread){
+ Address temp = addr;
+ assert(thread >= 0);
+ assert(temp == line_address(temp));
+ assert(m_writeRequestTable_ptr[thread]->exist(addr));
+ return m_writeRequestTable_ptr[thread]->lookup(addr);
+}
+
+void Sequencer::print(ostream& out) const {
+ out << "[Sequencer: " << m_chip_ptr->getID()
+ << ", outstanding requests: " << m_outstanding_count;
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ out << ", read request table[ " << p << " ]: " << *m_readRequestTable_ptr[p]
+ << ", write request table[ " << p << " ]: " << *m_writeRequestTable_ptr[p];
+ }
+ out << "]";
+}
+
+// this can be called from setState whenever coherence permissions are upgraded
+// when invoked, coherence violations will be checked for the given block
+void Sequencer::checkCoherence(const Address& addr) {
+#ifdef CHECK_COHERENCE
+ g_system_ptr->checkGlobalCoherenceInvariant(addr);
+#endif
+}
+
+bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ) {
+ for(unsigned int i=0; i < size_in_bytes; i++) {
+ std::cerr << __FILE__ << "(" << __LINE__ << "): Not implemented. " << std::endl;
+ value[i] = 0; // _read_physical_memory( m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version,
+ // addr.getAddress() + i, 1 );
+ }
+ return false; // Do nothing?
+}
+
+bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
+ unsigned int size_in_bytes) {
+ char test_buffer[64];
+
+ return false; // Do nothing?
+}
+
diff --git a/src/mem/ruby/system/Sequencer.hh b/src/mem/ruby/system/Sequencer.hh
new file mode 100644
index 000000000..d34a2fd3e
--- /dev/null
+++ b/src/mem/ruby/system/Sequencer.hh
@@ -0,0 +1,169 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: Sequencer.h 1.70 2006/09/27 14:56:41-05:00 bobba@s1-01.cs.wisc.edu $
+ *
+ * Description:
+ *
+ */
+
+#ifndef SEQUENCER_H
+#define SEQUENCER_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/common/Consumer.hh"
+#include "mem/protocol/CacheRequestType.hh"
+#include "mem/protocol/AccessModeType.hh"
+#include "mem/protocol/GenericMachineType.hh"
+#include "mem/protocol/PrefetchBit.hh"
+#include "mem/gems_common/Map.hh"
+
+class DataBlock;
+class AbstractChip;
+class CacheMsg;
+class Address;
+class MachineID;
+class Packet;
+
+class Sequencer : public Consumer {
+public:
+ // Constructors
+ Sequencer(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~Sequencer();
+
+ // Public Methods
+ void wakeup(); // Used only for deadlock detection
+
+ static void printConfig(ostream& out);
+
+ // returns total number of outstanding request (includes prefetches)
+ int getNumberOutstanding();
+ // return only total number of outstanding demand requests
+ int getNumberOutstandingDemand();
+ // return only total number of outstanding prefetch requests
+ int getNumberOutstandingPrefetch();
+
+ // remove load/store request from queue
+ void removeLoadRequest(const Address & addr, int thread);
+ void removeStoreRequest(const Address & addr, int thread);
+
+ void printProgress(ostream& out) const;
+
+ // returns a pointer to the request in the request tables
+ CacheMsg & getReadRequest( const Address & addr, int thread );
+ CacheMsg & getWriteRequest( const Address & addr, int thread );
+
+ void writeCallback(const Address& address, DataBlock& data);
+ void readCallback(const Address& address, DataBlock& data);
+ void writeCallback(const Address& address);
+ void readCallback(const Address& address);
+ void writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread);
+ void readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread);
+ void writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread);
+ void readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread);
+
+ // returns the thread ID of the request
+ int getRequestThreadID(const Address & addr);
+ // returns the physical address of the request
+ Address getRequestPhysicalAddress(const Address & lineaddr);
+ // returns whether a request is a prefetch request
+ bool isPrefetchRequest(const Address & lineaddr);
+
+ //notifies driver of debug print
+ void printDebug();
+
+ // called by Tester or Simics
+ void makeRequest(Packet* pkt);
+ bool doRequest(const CacheMsg& request);
+ void issueRequest(const CacheMsg& request);
+ bool isReady(const Packet* pkt) const;
+ bool isReady(const CacheMsg& request) const; // depricate this function
+ bool empty() const;
+ void resetRequestTime(const Address& addr, int thread);
+ Address getLogicalAddressOfRequest(Address address, int thread);
+ AccessModeType getAccessModeOfRequest(Address address, int thread);
+ //uint64 getSequenceNumberOfRequest(Address addr, int thread);
+
+ void print(ostream& out) const;
+ void checkCoherence(const Address& address);
+
+ bool getRubyMemoryValue(const Address& addr, char* value, unsigned int size_in_bytes);
+ bool setRubyMemoryValue(const Address& addr, char *value, unsigned int size_in_bytes);
+
+ void removeRequest(const CacheMsg& request);
+private:
+ // Private Methods
+ bool tryCacheAccess(const Address& addr, CacheRequestType type, const Address& pc, AccessModeType access_mode, int size, DataBlock*& data_ptr);
+ // void conflictCallback(const CacheMsg& request, GenericMachineType respondingMach, int thread);
+ void hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread);
+ bool insertRequest(const CacheMsg& request);
+
+
+ // Private copy constructor and assignment operator
+ Sequencer(const Sequencer& obj);
+ Sequencer& operator=(const Sequencer& obj);
+
+ // Data Members (m_ prefix)
+ AbstractChip* m_chip_ptr;
+
+ // indicates what processor on the chip this sequencer is associated with
+ int m_version;
+
+ // One request table per SMT thread
+ Map<Address, CacheMsg>** m_writeRequestTable_ptr;
+ Map<Address, CacheMsg>** m_readRequestTable_ptr;
+
+ Map<Address, Packet*>* m_packetTable_ptr;
+
+ // Global outstanding request count, across all request tables
+ int m_outstanding_count;
+ bool m_deadlock_check_scheduled;
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Sequencer& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Sequencer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SEQUENCER_H
+
diff --git a/src/mem/ruby/system/StoreBuffer.cc b/src/mem/ruby/system/StoreBuffer.cc
new file mode 100644
index 000000000..280decdd8
--- /dev/null
+++ b/src/mem/ruby/system/StoreBuffer.cc
@@ -0,0 +1,302 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/ruby/system/StoreBuffer.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/common/Driver.hh"
+#include "mem/gems_common/Vector.hh"
+#include "mem/ruby/eventqueue/RubyEventQueue.hh"
+#include "mem/ruby/profiler/AddressProfiler.hh"
+#include "mem/ruby/system/Sequencer.hh"
+#include "mem/ruby/common/SubBlock.hh"
+#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/packet.hh"
+
+// *** Begin Helper class ***
+struct StoreBufferEntry {
+ StoreBufferEntry() {} // So we can allocate a vector of StoreBufferEntries
+ StoreBufferEntry(const SubBlock& block, CacheRequestType type, const Address& pc, AccessModeType access_mode, int size, int thread) : m_subblock(block) {
+ m_type = type;
+ m_pc = pc;
+ m_access_mode = access_mode;
+ m_size = size;
+ m_thread = thread;
+ m_time = g_eventQueue_ptr->getTime();
+ }
+
+ void print(ostream& out) const
+ {
+ out << "[StoreBufferEntry: "
+ << "SubBlock: " << m_subblock
+ << ", Type: " << m_type
+ << ", PC: " << m_pc
+ << ", AccessMode: " << m_access_mode
+ << ", Size: " << m_size
+ << ", Thread: " << m_thread
+ << ", Time: " << m_time
+ << "]";
+ }
+
+ SubBlock m_subblock;
+ CacheRequestType m_type;
+ Address m_pc;
+ AccessModeType m_access_mode;
+ int m_size;
+ int m_thread;
+ Time m_time;
+};
+
+extern inline
+ostream& operator<<(ostream& out, const StoreBufferEntry& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+// *** End Helper class ***
+
+const int MAX_ENTRIES = 128;
+
+static void inc_index(int& index)
+{
+ index++;
+ if (index >= MAX_ENTRIES) {
+ index = 0;
+ }
+}
+
+StoreBuffer::StoreBuffer(AbstractChip* chip_ptr, int version) :
+ m_store_cache()
+{
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+ m_queue_ptr = new Vector<StoreBufferEntry>(MAX_ENTRIES);
+ m_queue_ptr->setSize(MAX_ENTRIES);
+ m_pending = false;
+ m_seen_atomic = false;
+ m_head = 0;
+ m_tail = 0;
+ m_size = 0;
+ m_deadlock_check_scheduled = false;
+}
+
+StoreBuffer::~StoreBuffer()
+{
+ delete m_queue_ptr;
+}
+
+// Used only to check for deadlock
+void StoreBuffer::wakeup()
+{
+ // Check for deadlock of any of the requests
+ Time current_time = g_eventQueue_ptr->getTime();
+
+ int queue_pointer = m_head;
+ for (int i=0; i<m_size; i++) {
+ if (current_time - (getEntry(queue_pointer).m_time) >= g_DEADLOCK_THRESHOLD) {
+ WARN_EXPR(getEntry(queue_pointer));
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(current_time);
+ ERROR_MSG("Possible Deadlock detected");
+ }
+ inc_index(queue_pointer);
+ }
+
+ if (m_size > 0) { // If there are still outstanding requests, keep checking
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ } else {
+ m_deadlock_check_scheduled = false;
+ }
+}
+
+void StoreBuffer::printConfig(ostream& out)
+{
+ out << "Store buffer entries: " << MAX_ENTRIES << " (Only valid if TSO is enabled)" << endl;
+}
+
+// Handle an incoming store request, this method is responsible for
+// calling hitCallback as needed
+void
+StoreBuffer::insertStore(Packet* pkt, const CacheMsg& request)
+{
+ Address addr = request.getAddress();
+ CacheRequestType type = request.getType();
+ Address pc = request.getProgramCounter();
+ AccessModeType access_mode = request.getAccessMode();
+ int size = request.getSize();
+ int threadID = request.getThreadID();
+
+ DEBUG_MSG(STOREBUFFER_COMP, MedPrio, "insertStore");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, g_eventQueue_ptr->getTime());
+ assert((type == CacheRequestType_ST) || (type == CacheRequestType_ATOMIC));
+ assert(isReady());
+
+ // See if we should schedule a deadlock check
+ if (m_deadlock_check_scheduled == false) {
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ m_deadlock_check_scheduled = true;
+ }
+
+ // Perform the hit-callback for the store
+ SubBlock subblock(addr, size);
+ if(type == CacheRequestType_ST) {
+ g_system_ptr->getDriver()->hitCallback(pkt);
+ assert(subblock.getSize() != 0);
+ } else {
+ // wait to perform the hitCallback until later for Atomics
+ }
+
+ // Perform possible pre-fetch
+ if(!isEmpty()) {
+ Packet new_pkt(pkt);
+ pkt->req->setFlags(Request::PREFETCH);
+ m_chip_ptr->getSequencer(m_version)->makeRequest(&new_pkt);
+ }
+
+ // Update the StoreCache
+ m_store_cache.add(subblock);
+
+ // Enqueue the entry
+ StoreBufferEntry entry(subblock, type, pc, access_mode, size, threadID); // FIXME
+ enqueue(entry);
+
+ if(type == CacheRequestType_ATOMIC) {
+ m_seen_atomic = true;
+ }
+
+ processHeadOfQueue();
+}
+
+void StoreBuffer::callBack(const Address& addr, DataBlock& data, Packet* pkt)
+{
+ DEBUG_MSG(STOREBUFFER_COMP, MedPrio, "callBack");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, g_eventQueue_ptr->getTime());
+ assert(!isEmpty());
+ assert(m_pending == true);
+ assert(line_address(addr) == addr);
+ assert(line_address(m_pending_address) == addr);
+ assert(line_address(peek().m_subblock.getAddress()) == addr);
+ CacheRequestType type = peek().m_type;
+ //int threadID = peek().m_thread;
+ assert((type == CacheRequestType_ST) || (type == CacheRequestType_ATOMIC));
+ m_pending = false;
+
+ // If oldest entry was ATOMIC, perform the callback
+ if(type == CacheRequestType_ST) {
+ // We already performed the call back for the store at insert time
+ } else {
+ // We waited to perform the hitCallback until now for Atomics
+ peek().m_subblock.mergeFrom(data); // copy the correct bytes from DataBlock into the SubBlock for the Load part of the atomic Load/Store
+ g_system_ptr->getDriver()->hitCallback(pkt);
+ m_seen_atomic = false;
+
+ /// FIXME - record the time spent in the store buffer - split out ST vs ATOMIC
+ }
+ assert(peek().m_subblock.getSize() != 0);
+
+ // Apply the head entry to the datablock
+ peek().m_subblock.mergeTo(data); // For both the Store and Atomic cases
+
+ // Update the StoreCache
+ m_store_cache.remove(peek().m_subblock);
+
+ // Dequeue the entry from the store buffer
+ dequeue();
+
+ if (isEmpty()) {
+ assert(m_store_cache.isEmpty());
+ }
+
+ if(type == CacheRequestType_ATOMIC) {
+ assert(isEmpty());
+ }
+
+ // See if we can remove any more entries
+ processHeadOfQueue();
+}
+
+void StoreBuffer::processHeadOfQueue()
+{
+ if(!isEmpty() && !m_pending) {
+ StoreBufferEntry& entry = peek();
+ assert(m_pending == false);
+ m_pending = true;
+ m_pending_address = entry.m_subblock.getAddress();
+ CacheMsg request(entry.m_subblock.getAddress(), entry.m_subblock.getAddress(), entry.m_type, entry.m_pc, entry.m_access_mode, entry.m_size, PrefetchBit_No, 0, Address(0), entry.m_thread);
+ m_chip_ptr->getSequencer(m_version)->doRequest(request);
+ }
+}
+
+bool StoreBuffer::isReady() const
+{
+ return ((m_size < MAX_ENTRIES) && (!m_seen_atomic));
+}
+
+// Queue implementation methods
+
+StoreBufferEntry& StoreBuffer::peek()
+{
+ return getEntry(m_head);
+}
+
+void StoreBuffer::dequeue()
+{
+ assert(m_size > 0);
+ m_size--;
+ inc_index(m_head);
+}
+
+void StoreBuffer::enqueue(const StoreBufferEntry& entry)
+{
+ // assert(isReady());
+ (*m_queue_ptr)[m_tail] = entry;
+ m_size++;
+ g_system_ptr->getProfiler()->storeBuffer(m_size, m_store_cache.size());
+ inc_index(m_tail);
+}
+
+StoreBufferEntry& StoreBuffer::getEntry(int index)
+{
+ return (*m_queue_ptr)[index];
+}
+
+void StoreBuffer::print(ostream& out) const
+{
+ out << "[StoreBuffer]";
+}
+
diff --git a/src/mem/ruby/system/StoreBuffer.hh b/src/mem/ruby/system/StoreBuffer.hh
new file mode 100644
index 000000000..2c9283f4b
--- /dev/null
+++ b/src/mem/ruby/system/StoreBuffer.hh
@@ -0,0 +1,121 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef StoreBuffer_H
+#define StoreBuffer_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/protocol/AccessModeType.hh"
+#include "mem/protocol/CacheRequestType.hh"
+#include "mem/ruby/system/StoreCache.hh"
+
+class CacheMsg;
+class DataBlock;
+class SubBlock;
+class StoreBufferEntry;
+class AbstractChip;
+class Packet;
+
+template <class TYPE> class Vector;
+
+class StoreBuffer : public Consumer {
+public:
+ // Constructors
+ StoreBuffer(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~StoreBuffer();
+
+ // Public Methods
+ void wakeup(); // Used only for deadlock detection
+ void callBack(const Address& addr, DataBlock& data, Packet* pkt);
+ void insertStore(Packet* pkt, const CacheMsg& request);
+ void updateSubBlock(SubBlock& sub_block) const { m_store_cache.update(sub_block); }
+ bool trySubBlock(const SubBlock& sub_block) const { assert(isReady()); return m_store_cache.check(sub_block); }
+ void print(ostream& out) const;
+ bool isEmpty() const { return (m_size == 0); }
+ bool isReady() const;
+
+ // Class methods
+ static void printConfig(ostream& out);
+
+private:
+ // Private Methods
+ void processHeadOfQueue();
+
+ StoreBufferEntry& peek();
+ void dequeue();
+ void enqueue(const StoreBufferEntry& entry);
+ StoreBufferEntry& getEntry(int index);
+
+ // Private copy constructor and assignment operator
+ StoreBuffer(const StoreBuffer& obj);
+ StoreBuffer& operator=(const StoreBuffer& obj);
+
+ // Data Members (m_ prefix)
+ int m_version;
+
+ Vector<StoreBufferEntry>* m_queue_ptr;
+ int m_head;
+ int m_tail;
+ int m_size;
+
+ StoreCache m_store_cache;
+
+ AbstractChip* m_chip_ptr;
+ bool m_pending;
+ Address m_pending_address;
+ bool m_seen_atomic;
+ bool m_deadlock_check_scheduled;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const StoreBuffer& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const StoreBuffer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //StoreBuffer_H
diff --git a/src/mem/ruby/system/StoreCache.cc b/src/mem/ruby/system/StoreCache.cc
new file mode 100644
index 000000000..a11b2ac50
--- /dev/null
+++ b/src/mem/ruby/system/StoreCache.cc
@@ -0,0 +1,178 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "mem/ruby/system/StoreCache.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/common/Driver.hh"
+#include "mem/gems_common/Vector.hh"
+#include "mem/ruby/common/DataBlock.hh"
+#include "mem/ruby/common/SubBlock.hh"
+#include "mem/gems_common/Map.hh"
+
+// Helper class
+struct StoreCacheEntry {
+ StoreCacheEntry() {
+ m_byte_counters.setSize(RubyConfig::dataBlockBytes());
+ for(int i=0; i<m_byte_counters.size(); i++) {
+ m_byte_counters[i] = 0;
+ }
+ m_line_counter = 0;
+
+ }
+ Address m_addr;
+ DataBlock m_datablock;
+ Vector<int> m_byte_counters;
+ int m_line_counter;
+};
+
+StoreCache::StoreCache()
+{
+ m_internal_cache_ptr = new Map<Address, StoreCacheEntry>;
+}
+
+StoreCache::~StoreCache()
+{
+ delete m_internal_cache_ptr;
+}
+
+bool StoreCache::isEmpty() const
+{
+ return m_internal_cache_ptr->size() == 0;
+}
+
+int StoreCache::size() const { return m_internal_cache_ptr->size(); }
+
+void StoreCache::add(const SubBlock& block)
+{
+ if (m_internal_cache_ptr->exist(line_address(block.getAddress())) == false) {
+ m_internal_cache_ptr->allocate(line_address(block.getAddress()));
+ }
+
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // For each byte in entry change the bytes and inc. the counters
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+ // Update counter
+ entry.m_byte_counters[starting_offset+index]++;
+
+ // Record data
+ entry.m_datablock.setByte(starting_offset+index, block.getByte(index));
+
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, block.getAddress());
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, int(block.getByte(index)));
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, starting_offset+index);
+ }
+
+ // Increment the counter
+ entry.m_line_counter++;
+}
+
+void StoreCache::remove(const SubBlock& block)
+{
+ assert(m_internal_cache_ptr->exist(line_address(block.getAddress())));
+
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // Decrement the byte counters
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+ // Update counter
+ entry.m_byte_counters[starting_offset+index]--;
+ }
+
+ // Decrement the line counter
+ entry.m_line_counter--;
+ assert(entry.m_line_counter >= 0);
+
+ // Check to see if we should de-allocate this entry
+ if (entry.m_line_counter == 0) {
+ m_internal_cache_ptr->deallocate(line_address(block.getAddress()));
+ }
+}
+
+bool StoreCache::check(const SubBlock& block) const
+{
+ if (m_internal_cache_ptr->exist(line_address(block.getAddress())) == false) {
+ return false;
+ } else {
+ // Lookup the entry
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // See if all the bytes are valid
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+ if (entry.m_byte_counters[starting_offset+index] > 0) {
+ // So far so good
+ } else {
+ // not all the bytes were valid
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void StoreCache::update(SubBlock& block) const
+{
+ if (m_internal_cache_ptr->exist(line_address(block.getAddress()))) {
+ // Lookup the entry
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // Copy all appropriate and valid bytes from the store cache to
+ // the SubBlock
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, block.getAddress());
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, int(entry.m_datablock.getByte(starting_offset+index)));
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, starting_offset+index);
+
+ // If this byte is valid, copy the data into the sub-block
+ if (entry.m_byte_counters[starting_offset+index] > 0) {
+ block.setByte(index, entry.m_datablock.getByte(starting_offset+index));
+ }
+ }
+ }
+}
+
+void StoreCache::print(ostream& out) const
+{
+ out << "[StoreCache]";
+}
+
diff --git a/src/mem/ruby/system/StoreCache.hh b/src/mem/ruby/system/StoreCache.hh
new file mode 100644
index 000000000..81eecde38
--- /dev/null
+++ b/src/mem/ruby/system/StoreCache.hh
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef StoreCache_H
+#define StoreCache_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/common/Address.hh"
+
+
+class DataBlock;
+class SubBlock;
+class StoreCacheEntry;
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+
+class StoreCache {
+public:
+ // Constructors
+ StoreCache();
+
+ // Destructor
+ ~StoreCache();
+
+ // Public Methods
+ void add(const SubBlock& block);
+ void remove(const SubBlock& block);
+ bool check(const SubBlock& block) const;
+ void update(SubBlock& block) const;
+ bool isEmpty() const;
+ int size() const;
+ void print(ostream& out) const;
+
+private:
+ Map<Address, StoreCacheEntry>* m_internal_cache_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const StoreCache& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const StoreCache& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //StoreCache_H
diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc
new file mode 100644
index 000000000..877a894fc
--- /dev/null
+++ b/src/mem/ruby/system/System.cc
@@ -0,0 +1,270 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * RubySystem.C
+ *
+ * Description: See RubySystem.h
+ *
+ * $Id$
+ *
+ */
+
+
+#include "mem/ruby/system/System.hh"
+#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/network/Network.hh"
+#include "mem/ruby/tester/Tester.hh"
+#include "mem/ruby/tester/SyntheticDriver.hh"
+#include "mem/ruby/tester/DeterministicDriver.hh"
+#include "mem/protocol/Chip.hh"
+//#include "mem/ruby/recorder/Tracer.hh"
+#include "mem/protocol/Protocol.hh"
+
+RubySystem::RubySystem()
+{
+ init();
+ m_preinitialized_driver = false;
+ createDriver();
+
+ /* gem5:Binkert for decomissiong of tracer
+ m_tracer_ptr = new Tracer;
+ */
+
+ /* gem5:Arka for decomissiong of log_tm
+ if (XACT_MEMORY) {
+ m_xact_isolation_checker = new XactIsolationChecker;
+ m_xact_commit_arbiter = new XactCommitArbiter;
+ m_xact_visualizer = new XactVisualizer;
+ }
+*/
+}
+
+RubySystem::RubySystem(Driver* _driver)
+{
+ init();
+ m_preinitialized_driver = true;
+ m_driver_ptr = _driver;
+}
+
+RubySystem::~RubySystem()
+{
+ for (int i = 0; i < m_chip_vector.size(); i++) {
+ delete m_chip_vector[i];
+ }
+ if (!m_preinitialized_driver)
+ delete m_driver_ptr;
+ delete m_network_ptr;
+ delete m_profiler_ptr;
+ /* gem5:Binkert for decomissiong of tracer
+ delete m_tracer_ptr;
+ */
+}
+
+void RubySystem::init()
+{
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"initializing");
+
+ m_driver_ptr = NULL;
+ m_profiler_ptr = new Profiler;
+
+ // NETWORK INITIALIZATION
+ // create the network by calling a function that calls new
+ m_network_ptr = Network::createNetwork(RubyConfig::numberOfChips());
+
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"Constructed network");
+
+ // CHIP INITIALIZATION
+ m_chip_vector.setSize(RubyConfig::numberOfChips());// create the vector of pointers to processors
+ for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
+ // create the chip
+ m_chip_vector[i] = new Chip(i, m_network_ptr);
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"Constructed a chip");
+ }
+
+ // These must be after the chips are constructed
+
+#if 0
+ if (!g_SIMICS) {
+ if (g_SYNTHETIC_DRIVER && !g_DETERMINISTIC_DRIVER) {
+ m_driver_ptr = new SyntheticDriver(this);
+ } else if (!g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
+ m_driver_ptr = new DeterministicDriver(this);
+ } else if (g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
+ ERROR_MSG("SYNTHETIC and DETERMINISTIC DRIVERS are exclusive and cannot be both enabled");
+ } else {
+ // normally make tester object, otherwise make an opal interface object.
+ if (!OpalInterface::isOpalLoaded()) {
+ m_driver_ptr = new Tester(this);
+ } else {
+ m_driver_ptr = new OpalInterface(this);
+ }
+ }
+ } else {
+ // detect if opal is loaded or not
+ if (OpalInterface::isOpalLoaded()) {
+ m_driver_ptr = new OpalInterface(this);
+ } else {
+ assert(0);
+ /* Need to allocate a driver here */
+ // m_driver_ptr = new SimicsDriver(this);
+ }
+ }
+#endif
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"finished initializing");
+ DEBUG_NEWLINE(SYSTEM_COMP, MedPrio);
+}
+
+void RubySystem::createDriver()
+{
+ if (g_SYNTHETIC_DRIVER && !g_DETERMINISTIC_DRIVER) {
+ cerr << "Creating Synthetic Driver" << endl;
+ m_driver_ptr = new SyntheticDriver(this);
+ } else if (!g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
+ cerr << "Creating Deterministic Driver" << endl;
+ m_driver_ptr = new DeterministicDriver(this);
+ }
+}
+
+void RubySystem::printConfig(ostream& out) const
+{
+ out << "\n================ Begin RubySystem Configuration Print ================\n\n";
+ RubyConfig::printConfiguration(out);
+ out << endl;
+ getChip(0)->printConfig(out);
+ m_network_ptr->printConfig(out);
+ m_driver_ptr->printConfig(out);
+ m_profiler_ptr->printConfig(out);
+ out << "\n================ End RubySystem Configuration Print ================\n\n";
+}
+
+void RubySystem::printStats(ostream& out)
+{
+ const time_t T = time(NULL);
+ tm *localTime = localtime(&T);
+ char buf[100];
+ strftime(buf, 100, "%b/%d/%Y %H:%M:%S", localTime);
+
+ out << "Real time: " << buf << endl;
+
+ m_profiler_ptr->printStats(out);
+ for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
+ for(int p=0; p<RubyConfig::numberOfProcsPerChip(); p++) {
+ m_chip_vector[i]->m_L1Cache_mandatoryQueue_vec[p]->printStats(out);
+ }
+ }
+ m_network_ptr->printStats(out);
+ m_driver_ptr->printStats(out);
+ Chip::printStats(out);
+}
+
+void RubySystem::clearStats() const
+{
+ m_profiler_ptr->clearStats();
+ m_network_ptr->clearStats();
+ m_driver_ptr->clearStats();
+ Chip::clearStats();
+ for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
+ for(int p=0; p<RubyConfig::numberOfProcsPerChip(); p++) {
+ m_chip_vector[i]->m_L1Cache_mandatoryQueue_vec[p]->clearStats();
+ }
+ }
+}
+
+void RubySystem::recordCacheContents(CacheRecorder& tr) const
+{
+ for (int i = 0; i < m_chip_vector.size(); i++) {
+ for (int m_version = 0; m_version < RubyConfig::numberOfProcsPerChip(); m_version++) {
+ if (Protocol::m_TwoLevelCache) {
+ m_chip_vector[i]->m_L1Cache_L1IcacheMemory_vec[m_version]->setAsInstructionCache(true);
+ m_chip_vector[i]->m_L1Cache_L1DcacheMemory_vec[m_version]->setAsInstructionCache(false);
+ } else {
+ m_chip_vector[i]->m_L1Cache_cacheMemory_vec[m_version]->setAsInstructionCache(false);
+ }
+ }
+ m_chip_vector[i]->recordCacheContents(tr);
+ }
+}
+
+#ifdef CHECK_COHERENCE
+// This code will check for cases if the given cache block is exclusive in
+// one node and shared in another-- a coherence violation
+//
+// To use, the SLICC specification must call sequencer.checkCoherence(address)
+// when the controller changes to a state with new permissions. Do this
+// in setState. The SLICC spec must also define methods "isBlockShared"
+// and "isBlockExclusive" that are specific to that protocol
+//
+void RubySystem::checkGlobalCoherenceInvariant(const Address& addr ) {
+
+ NodeID exclusive = -1;
+ bool sharedDetected = false;
+ NodeID lastShared = -1;
+
+ for (int i = 0; i < m_chip_vector.size(); i++) {
+
+ if (m_chip_vector[i]->isBlockExclusive(addr)) {
+ if (exclusive != -1) {
+ // coherence violation
+ WARN_EXPR(exclusive);
+ WARN_EXPR(m_chip_vector[i]->getID());
+ WARN_EXPR(addr);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Coherence Violation Detected -- 2 exclusive chips");
+ }
+ else if (sharedDetected) {
+ WARN_EXPR(lastShared);
+ WARN_EXPR(m_chip_vector[i]->getID());
+ WARN_EXPR(addr);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
+ }
+ else {
+ exclusive = m_chip_vector[i]->getID();
+ }
+ }
+ else if (m_chip_vector[i]->isBlockShared(addr)) {
+ sharedDetected = true;
+ lastShared = m_chip_vector[i]->getID();
+
+ if (exclusive != -1) {
+ WARN_EXPR(lastShared);
+ WARN_EXPR(exclusive);
+ WARN_EXPR(addr);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
+ }
+ }
+ }
+}
+#endif
+
+
+
+
diff --git a/src/mem/ruby/system/System.hh b/src/mem/ruby/system/System.hh
new file mode 100644
index 000000000..8679b55c3
--- /dev/null
+++ b/src/mem/ruby/system/System.hh
@@ -0,0 +1,138 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * RubySystem.h
+ *
+ * Description: Contains all of the various parts of the system we are
+ * simulating. Performs allocation, deallocation, and setup of all
+ * the major components of the system
+ *
+ * $Id$
+ *
+ */
+
+#ifndef SYSTEM_H
+#define SYSTEM_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/gems_common/Vector.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/config/RubyConfig.hh"
+#include "mem/protocol/MachineType.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+
+class Profiler;
+class Network;
+class Driver;
+class CacheRecorder;
+class Tracer;
+class Sequencer;
+class XactIsolationChecker;
+class XactCommitArbiter;
+class XactVisualizer;
+class TransactionInterfaceManager;
+
+class RubySystem {
+public:
+ // Constructors
+ RubySystem();
+ RubySystem(Driver* _driver); // used when driver is already instantiated (e.g. M5's RubyMem)
+
+ // Destructor
+ ~RubySystem();
+
+ // Public Methods
+ int getNumProcessors() { return RubyConfig::numberOfProcessors(); }
+ int getNumMemories() { return RubyConfig::numberOfMemories(); }
+ Profiler* getProfiler() { return m_profiler_ptr; }
+ Driver* getDriver() { assert(m_driver_ptr != NULL); return m_driver_ptr; }
+ Tracer* getTracer() { assert(m_tracer_ptr != NULL); return m_tracer_ptr; }
+ Network* getNetwork() { assert(m_network_ptr != NULL); return m_network_ptr; }
+ XactIsolationChecker* getXactIsolationChecker() { assert(m_xact_isolation_checker!= NULL); return m_xact_isolation_checker;}
+ XactCommitArbiter* getXactCommitArbiter() { assert(m_xact_commit_arbiter!= NULL); return m_xact_commit_arbiter;}
+ XactVisualizer* getXactVisualizer() { assert(m_xact_visualizer!= NULL); return m_xact_visualizer;}
+
+ AbstractChip* getChip(int chipNumber) const { assert(m_chip_vector[chipNumber] != NULL); return m_chip_vector[chipNumber];}
+ Sequencer* getSequencer(int procNumber) const {
+ assert(procNumber < RubyConfig::numberOfProcessors());
+ return m_chip_vector[procNumber/RubyConfig::numberOfProcsPerChip()]->getSequencer(procNumber%RubyConfig::numberOfProcsPerChip());
+ }
+ TransactionInterfaceManager* getTransactionInterfaceManager(int procNumber) const {
+ return m_chip_vector[procNumber/RubyConfig::numberOfProcsPerChip()]->getTransactionInterfaceManager(procNumber%RubyConfig::numberOfProcsPerChip());
+ }
+ void recordCacheContents(CacheRecorder& tr) const;
+ void printConfig(ostream& out) const;
+ void printStats(ostream& out);
+ void clearStats() const;
+
+ void print(ostream& out) const;
+#ifdef CHECK_COHERENCE
+ void checkGlobalCoherenceInvariant(const Address& addr);
+#endif
+
+private:
+ // Private Methods
+ void init();
+ void createDriver();
+
+ // Private copy constructor and assignment operator
+ RubySystem(const RubySystem& obj);
+ RubySystem& operator=(const RubySystem& obj);
+
+ // Data Members (m_ prefix)
+ Network* m_network_ptr;
+ Vector<AbstractChip*> m_chip_vector;
+ Profiler* m_profiler_ptr;
+ bool m_preinitialized_driver;
+ Driver* m_driver_ptr;
+ Tracer* m_tracer_ptr;
+ XactIsolationChecker *m_xact_isolation_checker;
+ XactCommitArbiter *m_xact_commit_arbiter;
+ XactVisualizer *m_xact_visualizer;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const RubySystem& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+inline
+ostream& operator<<(ostream& out, const RubySystem& obj)
+{
+// obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SYSTEM_H
+
+
+
diff --git a/src/mem/ruby/system/TBETable.hh b/src/mem/ruby/system/TBETable.hh
new file mode 100644
index 000000000..aa7e0df6e
--- /dev/null
+++ b/src/mem/ruby/system/TBETable.hh
@@ -0,0 +1,165 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TBETable.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef TBETABLE_H
+#define TBETABLE_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/slicc_interface/AbstractChip.hh"
+#include "mem/ruby/system/System.hh"
+
+template<class ENTRY>
+class TBETable {
+public:
+
+ // Constructors
+ TBETable(AbstractChip* chip_ptr);
+
+ // Destructor
+ //~TBETable();
+
+ // Public Methods
+
+ static void printConfig(ostream& out) { out << "TBEs_per_TBETable: " << NUMBER_OF_TBES << endl; }
+
+ bool isPresent(const Address& address) const;
+ void allocate(const Address& address);
+ void deallocate(const Address& address);
+ bool areNSlotsAvailable(int n) const { return (NUMBER_OF_TBES - m_map.size()) >= n; }
+
+ ENTRY& lookup(const Address& address);
+ const ENTRY& lookup(const Address& address) const;
+
+ // Print cache contents
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ TBETable(const TBETable& obj);
+ TBETable& operator=(const TBETable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, ENTRY> m_map;
+ AbstractChip* m_chip_ptr;
+};
+
+// Output operator declaration
+//ostream& operator<<(ostream& out, const TBETable<ENTRY>& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+template<class ENTRY>
+extern inline
+ostream& operator<<(ostream& out, const TBETable<ENTRY>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+template<class ENTRY>
+extern inline
+TBETable<ENTRY>::TBETable(AbstractChip* chip_ptr)
+{
+ m_chip_ptr = chip_ptr;
+}
+
+// PUBLIC METHODS
+
+// tests to see if an address is present in the cache
+template<class ENTRY>
+extern inline
+bool TBETable<ENTRY>::isPresent(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map.size() <= NUMBER_OF_TBES);
+ return m_map.exist(address);
+}
+
+template<class ENTRY>
+extern inline
+void TBETable<ENTRY>::allocate(const Address& address)
+{
+ assert(isPresent(address) == false);
+ assert(m_map.size() < NUMBER_OF_TBES);
+ g_system_ptr->getProfiler()->L2tbeUsageSample(m_map.size());
+ m_map.add(address, ENTRY());
+}
+
+template<class ENTRY>
+extern inline
+void TBETable<ENTRY>::deallocate(const Address& address)
+{
+ assert(isPresent(address) == true);
+ assert(m_map.size() > 0);
+ m_map.erase(address);
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+ENTRY& TBETable<ENTRY>::lookup(const Address& address)
+{
+ assert(isPresent(address) == true);
+ return m_map.lookup(address);
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+const ENTRY& TBETable<ENTRY>::lookup(const Address& address) const
+{
+ assert(isPresent(address) == true);
+ return m_map.lookup(address);
+}
+
+template<class ENTRY>
+extern inline
+void TBETable<ENTRY>::print(ostream& out) const
+{
+}
+
+#endif //TBETABLE_H
diff --git a/src/mem/ruby/system/TimerTable.cc b/src/mem/ruby/system/TimerTable.cc
new file mode 100644
index 000000000..edc2de230
--- /dev/null
+++ b/src/mem/ruby/system/TimerTable.cc
@@ -0,0 +1,129 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/system/TimerTable.hh"
+#include "mem/ruby/eventqueue/RubyEventQueue.hh"
+
+TimerTable::TimerTable(Chip* chip_ptr)
+{
+ assert(chip_ptr != NULL);
+ m_consumer_ptr = NULL;
+ m_chip_ptr = chip_ptr;
+ m_next_valid = false;
+ m_next_address = Address(0);
+ m_next_time = 0;
+}
+
+
+bool TimerTable::isReady() const
+{
+ if (m_map.size() == 0) {
+ return false;
+ }
+
+ if (!m_next_valid) {
+ updateNext();
+ }
+ assert(m_next_valid);
+ return (g_eventQueue_ptr->getTime() >= m_next_time);
+}
+
+const Address& TimerTable::readyAddress() const
+{
+ assert(isReady());
+
+ if (!m_next_valid) {
+ updateNext();
+ }
+ assert(m_next_valid);
+ return m_next_address;
+}
+
+void TimerTable::set(const Address& address, Time relative_latency)
+{
+ assert(address == line_address(address));
+ assert(relative_latency > 0);
+ assert(m_map.exist(address) == false);
+ Time ready_time = g_eventQueue_ptr->getTime() + relative_latency;
+ m_map.add(address, ready_time);
+ assert(m_consumer_ptr != NULL);
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, ready_time);
+ m_next_valid = false;
+
+ // Don't always recalculate the next ready address
+ if (ready_time <= m_next_time) {
+ m_next_valid = false;
+ }
+}
+
+void TimerTable::unset(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(m_map.exist(address) == true);
+ m_map.remove(address);
+
+ // Don't always recalculate the next ready address
+ if (address == m_next_address) {
+ m_next_valid = false;
+ }
+}
+
+void TimerTable::print(ostream& out) const
+{
+}
+
+
+void TimerTable::updateNext() const
+{
+ if (m_map.size() == 0) {
+ assert(m_next_valid == false);
+ return;
+ }
+
+ Vector<Address> addresses = m_map.keys();
+ m_next_address = addresses[0];
+ m_next_time = m_map.lookup(m_next_address);
+
+ // Search for the minimum time
+ int size = addresses.size();
+ for (int i=1; i<size; i++) {
+ Address maybe_next_address = addresses[i];
+ Time maybe_next_time = m_map.lookup(maybe_next_address);
+ if (maybe_next_time < m_next_time) {
+ m_next_time = maybe_next_time;
+ m_next_address= maybe_next_address;
+ }
+ }
+ m_next_valid = true;
+}
diff --git a/src/mem/ruby/system/TimerTable.hh b/src/mem/ruby/system/TimerTable.hh
new file mode 100644
index 000000000..36ac83fc6
--- /dev/null
+++ b/src/mem/ruby/system/TimerTable.hh
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TimerTable.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef TIMERTABLE_H
+#define TIMERTABLE_H
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/gems_common/Map.hh"
+#include "mem/ruby/common/Address.hh"
+class Consumer;
+class Chip;
+
+class TimerTable {
+public:
+
+ // Constructors
+ TimerTable(Chip* chip_ptr);
+
+ // Destructor
+ //~TimerTable();
+
+ // Class Methods
+ static void printConfig(ostream& out) {}
+
+ // Public Methods
+ void setConsumer(Consumer* consumer_ptr) { ASSERT(m_consumer_ptr==NULL); m_consumer_ptr = consumer_ptr; }
+ void setDescription(const string& name) { m_name = name; }
+
+ bool isReady() const;
+ const Address& readyAddress() const;
+ bool isSet(const Address& address) const { return m_map.exist(address); }
+ void set(const Address& address, Time relative_latency);
+ void unset(const Address& address);
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ void updateNext() const;
+
+ // Private copy constructor and assignment operator
+ TimerTable(const TimerTable& obj);
+ TimerTable& operator=(const TimerTable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, Time> m_map;
+ Chip* m_chip_ptr;
+ mutable bool m_next_valid;
+ mutable Time m_next_time; // Only valid if m_next_valid is true
+ mutable Address m_next_address; // Only valid if m_next_valid is true
+ Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
+ string m_name;
+};
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TimerTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+#endif //TIMERTABLE_H