summaryrefslogtreecommitdiff
path: root/src/mem/ruby/structures
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem/ruby/structures')
-rw-r--r--src/mem/ruby/structures/AbstractReplacementPolicy.hh89
-rw-r--r--src/mem/ruby/structures/BankedArray.cc85
-rw-r--r--src/mem/ruby/structures/BankedArray.hh72
-rw-r--r--src/mem/ruby/structures/Cache.py49
-rw-r--r--src/mem/ruby/structures/CacheMemory.cc565
-rw-r--r--src/mem/ruby/structures/CacheMemory.hh173
-rw-r--r--src/mem/ruby/structures/DirectoryMemory.cc212
-rw-r--r--src/mem/ruby/structures/DirectoryMemory.hh104
-rw-r--r--src/mem/ruby/structures/DirectoryMemory.py44
-rw-r--r--src/mem/ruby/structures/LRUPolicy.hh95
-rw-r--r--src/mem/ruby/structures/MemoryControl.cc49
-rw-r--r--src/mem/ruby/structures/MemoryControl.hh114
-rw-r--r--src/mem/ruby/structures/MemoryControl.py39
-rw-r--r--src/mem/ruby/structures/MemoryNode.cc41
-rw-r--r--src/mem/ruby/structures/MemoryNode.hh92
-rw-r--r--src/mem/ruby/structures/MemoryVector.hh237
-rw-r--r--src/mem/ruby/structures/PerfectCacheMemory.hh192
-rw-r--r--src/mem/ruby/structures/PersistentTable.cc219
-rw-r--r--src/mem/ruby/structures/PersistentTable.hh100
-rw-r--r--src/mem/ruby/structures/Prefetcher.hh2
-rw-r--r--src/mem/ruby/structures/PseudoLRUPolicy.hh137
-rw-r--r--src/mem/ruby/structures/RubyMemoryControl.cc791
-rw-r--r--src/mem/ruby/structures/RubyMemoryControl.hh172
-rw-r--r--src/mem/ruby/structures/RubyMemoryControl.py55
-rw-r--r--src/mem/ruby/structures/SConscript16
-rw-r--r--src/mem/ruby/structures/SparseMemory.cc417
-rw-r--r--src/mem/ruby/structures/SparseMemory.hh98
-rw-r--r--src/mem/ruby/structures/TBETable.hh124
-rw-r--r--src/mem/ruby/structures/TimerTable.cc129
-rw-r--r--src/mem/ruby/structures/TimerTable.hh107
-rw-r--r--src/mem/ruby/structures/WireBuffer.cc158
-rw-r--r--src/mem/ruby/structures/WireBuffer.hh102
-rw-r--r--src/mem/ruby/structures/WireBuffer.py35
33 files changed, 4913 insertions, 1 deletions
diff --git a/src/mem/ruby/structures/AbstractReplacementPolicy.hh b/src/mem/ruby/structures/AbstractReplacementPolicy.hh
new file mode 100644
index 000000000..3c492377e
--- /dev/null
+++ b/src/mem/ruby/structures/AbstractReplacementPolicy.hh
@@ -0,0 +1,89 @@
+/*
+ * Copyright (c) 2007 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__
+#define __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__
+
+#include "base/types.hh"
+
+class AbstractReplacementPolicy
+{
+ public:
+ AbstractReplacementPolicy(Index num_sets, Index assoc);
+ virtual ~AbstractReplacementPolicy();
+
+ /* touch a block. a.k.a. update timestamp */
+ virtual void touch(Index set, Index way, Tick time) = 0;
+
+ /* returns the way to replace */
+ virtual Index getVictim(Index set) const = 0;
+
+ /* get the time of the last access */
+ Tick getLastAccess(Index set, Index way);
+
+ protected:
+ unsigned m_num_sets; /** total number of sets */
+ unsigned m_assoc; /** set associativity */
+ Tick **m_last_ref_ptr; /** timestamp of last reference */
+};
+
+inline
+AbstractReplacementPolicy::AbstractReplacementPolicy(Index num_sets,
+ Index assoc)
+{
+ m_num_sets = num_sets;
+ m_assoc = assoc;
+ m_last_ref_ptr = new Tick*[m_num_sets];
+ for(unsigned i = 0; i < m_num_sets; i++){
+ m_last_ref_ptr[i] = new Tick[m_assoc];
+ for(unsigned j = 0; j < m_assoc; j++){
+ m_last_ref_ptr[i][j] = 0;
+ }
+ }
+}
+
+inline
+AbstractReplacementPolicy::~AbstractReplacementPolicy()
+{
+ if (m_last_ref_ptr != NULL){
+ for (unsigned i = 0; i < m_num_sets; i++){
+ if (m_last_ref_ptr[i] != NULL){
+ delete[] m_last_ref_ptr[i];
+ }
+ }
+ delete[] m_last_ref_ptr;
+ }
+}
+
+inline Tick
+AbstractReplacementPolicy::getLastAccess(Index set, Index way)
+{
+ return m_last_ref_ptr[set][way];
+}
+
+#endif // __MEM_RUBY_SYSTEM_ABSTRACTREPLACEMENTPOLICY_HH__
diff --git a/src/mem/ruby/structures/BankedArray.cc b/src/mem/ruby/structures/BankedArray.cc
new file mode 100644
index 000000000..0644ffe8b
--- /dev/null
+++ b/src/mem/ruby/structures/BankedArray.cc
@@ -0,0 +1,85 @@
+/*
+ * Copyright (c) 2012 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Brad Beckmann
+ *
+ */
+
+#include "base/intmath.hh"
+#include "mem/ruby/structures/BankedArray.hh"
+#include "mem/ruby/system/System.hh"
+
+BankedArray::BankedArray(unsigned int banks, Cycles accessLatency,
+ unsigned int startIndexBit)
+{
+ this->banks = banks;
+ this->accessLatency = accessLatency;
+ this->startIndexBit = startIndexBit;
+
+ if (banks != 0) {
+ bankBits = floorLog2(banks);
+ }
+
+ busyBanks.resize(banks);
+}
+
+bool
+BankedArray::tryAccess(Index idx)
+{
+ if (accessLatency == 0)
+ return true;
+
+ unsigned int bank = mapIndexToBank(idx);
+ assert(bank < banks);
+
+ if (busyBanks[bank].endAccess >= curTick()) {
+ if (!(busyBanks[bank].startAccess == curTick() &&
+ busyBanks[bank].idx == idx)) {
+ return false;
+ } else {
+ // We tried to allocate resources twice
+ // in the same cycle for the same addr
+ return true;
+ }
+ }
+
+ busyBanks[bank].idx = idx;
+ busyBanks[bank].startAccess = curTick();
+ busyBanks[bank].endAccess = curTick() +
+ (accessLatency-1) * g_system_ptr->clockPeriod();
+
+ return true;
+}
+
+unsigned int
+BankedArray::mapIndexToBank(Index idx)
+{
+ if (banks == 1) {
+ return 0;
+ }
+ return idx % banks;
+}
diff --git a/src/mem/ruby/structures/BankedArray.hh b/src/mem/ruby/structures/BankedArray.hh
new file mode 100644
index 000000000..89007befa
--- /dev/null
+++ b/src/mem/ruby/structures/BankedArray.hh
@@ -0,0 +1,72 @@
+/*
+ * Copyright (c) 2012 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Brad Beckmann
+ *
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_BANKEDARRAY_HH__
+#define __MEM_RUBY_SYSTEM_BANKEDARRAY_HH__
+
+#include <vector>
+
+#include "mem/ruby/common/TypeDefines.hh"
+#include "sim/core.hh"
+
+class BankedArray
+{
+ private:
+ unsigned int banks;
+ Cycles accessLatency;
+ unsigned int bankBits;
+ unsigned int startIndexBit;
+
+ class AccessRecord
+ {
+ public:
+ AccessRecord() : idx(0), startAccess(0), endAccess(0) {}
+ Index idx;
+ Tick startAccess;
+ Tick endAccess;
+ };
+
+ // If the tick event is scheduled then the bank is busy
+ // otherwise, schedule the event and wait for it to complete
+ std::vector<AccessRecord> busyBanks;
+
+ unsigned int mapIndexToBank(Index idx);
+
+ public:
+ BankedArray(unsigned int banks, Cycles accessLatency, unsigned int startIndexBit);
+
+ // Note: We try the access based on the cache index, not the address
+ // This is so we don't get aliasing on blocks being replaced
+ bool tryAccess(Index idx);
+
+};
+
+#endif
diff --git a/src/mem/ruby/structures/Cache.py b/src/mem/ruby/structures/Cache.py
new file mode 100644
index 000000000..14a359233
--- /dev/null
+++ b/src/mem/ruby/structures/Cache.py
@@ -0,0 +1,49 @@
+# Copyright (c) 2009 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Steve Reinhardt
+# Brad Beckmann
+
+from m5.params import *
+from m5.SimObject import SimObject
+from Controller import RubyController
+
+class RubyCache(SimObject):
+ type = 'RubyCache'
+ cxx_class = 'CacheMemory'
+ cxx_header = "mem/ruby/structures/CacheMemory.hh"
+ size = Param.MemorySize("capacity in bytes");
+ latency = Param.Cycles("");
+ assoc = Param.Int("");
+ replacement_policy = Param.String("PSEUDO_LRU", "");
+ start_index_bit = Param.Int(6, "index start, default 6 for 64-byte line");
+ is_icache = Param.Bool(False, "is instruction only cache");
+
+ dataArrayBanks = Param.Int(1, "Number of banks for the data array")
+ tagArrayBanks = Param.Int(1, "Number of banks for the tag array")
+ dataAccessLatency = Param.Cycles(1, "cycles for a data array access")
+ tagAccessLatency = Param.Cycles(1, "cycles for a tag array access")
+ resourceStalls = Param.Bool(False, "stall if there is a resource failure")
diff --git a/src/mem/ruby/structures/CacheMemory.cc b/src/mem/ruby/structures/CacheMemory.cc
new file mode 100644
index 000000000..7ce6cd584
--- /dev/null
+++ b/src/mem/ruby/structures/CacheMemory.cc
@@ -0,0 +1,565 @@
+/*
+ * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "base/intmath.hh"
+#include "debug/RubyCache.hh"
+#include "debug/RubyCacheTrace.hh"
+#include "debug/RubyResourceStalls.hh"
+#include "debug/RubyStats.hh"
+#include "mem/protocol/AccessPermission.hh"
+#include "mem/ruby/structures/CacheMemory.hh"
+#include "mem/ruby/system/System.hh"
+
+using namespace std;
+
+ostream&
+operator<<(ostream& out, const CacheMemory& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+CacheMemory *
+RubyCacheParams::create()
+{
+ return new CacheMemory(this);
+}
+
+CacheMemory::CacheMemory(const Params *p)
+ : SimObject(p),
+ dataArray(p->dataArrayBanks, p->dataAccessLatency, p->start_index_bit),
+ tagArray(p->tagArrayBanks, p->tagAccessLatency, p->start_index_bit)
+{
+ m_cache_size = p->size;
+ m_latency = p->latency;
+ m_cache_assoc = p->assoc;
+ m_policy = p->replacement_policy;
+ m_start_index_bit = p->start_index_bit;
+ m_is_instruction_only_cache = p->is_icache;
+ m_resource_stalls = p->resourceStalls;
+}
+
+void
+CacheMemory::init()
+{
+ m_cache_num_sets = (m_cache_size / m_cache_assoc) /
+ RubySystem::getBlockSizeBytes();
+ assert(m_cache_num_sets > 1);
+ m_cache_num_set_bits = floorLog2(m_cache_num_sets);
+ assert(m_cache_num_set_bits > 0);
+
+ if (m_policy == "PSEUDO_LRU")
+ m_replacementPolicy_ptr =
+ new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else if (m_policy == "LRU")
+ m_replacementPolicy_ptr =
+ new LRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else
+ assert(false);
+
+ m_cache.resize(m_cache_num_sets);
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ m_cache[i].resize(m_cache_assoc);
+ for (int j = 0; j < m_cache_assoc; j++) {
+ m_cache[i][j] = NULL;
+ }
+ }
+}
+
+CacheMemory::~CacheMemory()
+{
+ if (m_replacementPolicy_ptr != NULL)
+ delete m_replacementPolicy_ptr;
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ for (int j = 0; j < m_cache_assoc; j++) {
+ delete m_cache[i][j];
+ }
+ }
+}
+
+// convert a Address to its location in the cache
+Index
+CacheMemory::addressToCacheSet(const Address& address) const
+{
+ assert(address == line_address(address));
+ return address.bitSelect(m_start_index_bit,
+ m_start_index_bit + m_cache_num_set_bits - 1);
+}
+
+// Given a cache index: returns the index of the tag in a set.
+// returns -1 if the tag is not found.
+int
+CacheMemory::findTagInSet(Index cacheSet, const Address& tag) const
+{
+ assert(tag == line_address(tag));
+ // search the set for the tags
+ m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
+ if (it != m_tag_index.end())
+ if (m_cache[cacheSet][it->second]->m_Permission !=
+ AccessPermission_NotPresent)
+ return it->second;
+ return -1; // Not found
+}
+
+// Given a cache index: returns the index of the tag in a set.
+// returns -1 if the tag is not found.
+int
+CacheMemory::findTagInSetIgnorePermissions(Index cacheSet,
+ const Address& tag) const
+{
+ assert(tag == line_address(tag));
+ // search the set for the tags
+ m5::hash_map<Address, int>::const_iterator it = m_tag_index.find(tag);
+ if (it != m_tag_index.end())
+ return it->second;
+ return -1; // Not found
+}
+
+bool
+CacheMemory::tryCacheAccess(const Address& address, RubyRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == line_address(address));
+ DPRINTF(RubyCache, "address: %s\n", address);
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if (loc != -1) {
+ // Do we even have a tag match?
+ AbstractCacheEntry* entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
+ data_ptr = &(entry->getDataBlk());
+
+ if (entry->m_Permission == AccessPermission_Read_Write) {
+ return true;
+ }
+ if ((entry->m_Permission == AccessPermission_Read_Only) &&
+ (type == RubyRequestType_LD || type == RubyRequestType_IFETCH)) {
+ return true;
+ }
+ // The line must not be accessible
+ }
+ data_ptr = NULL;
+ return false;
+}
+
+bool
+CacheMemory::testCacheAccess(const Address& address, RubyRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == line_address(address));
+ DPRINTF(RubyCache, "address: %s\n", address);
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+
+ if (loc != -1) {
+ // Do we even have a tag match?
+ AbstractCacheEntry* entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
+ data_ptr = &(entry->getDataBlk());
+
+ return m_cache[cacheSet][loc]->m_Permission !=
+ AccessPermission_NotPresent;
+ }
+
+ data_ptr = NULL;
+ return false;
+}
+
+// tests to see if an address is present in the cache
+bool
+CacheMemory::isTagPresent(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+
+ if (loc == -1) {
+ // We didn't find the tag
+ DPRINTF(RubyCache, "No tag match for address: %s\n", address);
+ return false;
+ }
+ DPRINTF(RubyCache, "address: %s found\n", address);
+ return true;
+}
+
+// Returns true if there is:
+// a) a tag match on this address or there is
+// b) an unused line in the same cache "way"
+bool
+CacheMemory::cacheAvail(const Address& address) const
+{
+ assert(address == line_address(address));
+
+ Index cacheSet = addressToCacheSet(address);
+
+ for (int i = 0; i < m_cache_assoc; i++) {
+ AbstractCacheEntry* entry = m_cache[cacheSet][i];
+ if (entry != NULL) {
+ if (entry->m_Address == address ||
+ entry->m_Permission == AccessPermission_NotPresent) {
+ // Already in the cache or we found an empty entry
+ return true;
+ }
+ } else {
+ return true;
+ }
+ }
+ return false;
+}
+
+AbstractCacheEntry*
+CacheMemory::allocate(const Address& address, AbstractCacheEntry* entry)
+{
+ assert(address == line_address(address));
+ assert(!isTagPresent(address));
+ assert(cacheAvail(address));
+ DPRINTF(RubyCache, "address: %s\n", address);
+
+ // Find the first open slot
+ Index cacheSet = addressToCacheSet(address);
+ std::vector<AbstractCacheEntry*> &set = m_cache[cacheSet];
+ for (int i = 0; i < m_cache_assoc; i++) {
+ if (!set[i] || set[i]->m_Permission == AccessPermission_NotPresent) {
+ set[i] = entry; // Init entry
+ set[i]->m_Address = address;
+ set[i]->m_Permission = AccessPermission_Invalid;
+ DPRINTF(RubyCache, "Allocate clearing lock for addr: %x\n",
+ address);
+ set[i]->m_locked = -1;
+ m_tag_index[address] = i;
+
+ m_replacementPolicy_ptr->touch(cacheSet, i, curTick());
+
+ return entry;
+ }
+ }
+ panic("Allocate didn't find an available entry");
+}
+
+void
+CacheMemory::deallocate(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(isTagPresent(address));
+ DPRINTF(RubyCache, "address: %s\n", address);
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if (loc != -1) {
+ delete m_cache[cacheSet][loc];
+ m_cache[cacheSet][loc] = NULL;
+ m_tag_index.erase(address);
+ }
+}
+
+// Returns with the physical address of the conflicting cache line
+Address
+CacheMemory::cacheProbe(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(!cacheAvail(address));
+
+ Index cacheSet = addressToCacheSet(address);
+ return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)]->
+ m_Address;
+}
+
+// looks an address up in the cache
+AbstractCacheEntry*
+CacheMemory::lookup(const Address& address)
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if(loc == -1) return NULL;
+ return m_cache[cacheSet][loc];
+}
+
+// looks an address up in the cache
+const AbstractCacheEntry*
+CacheMemory::lookup(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if(loc == -1) return NULL;
+ return m_cache[cacheSet][loc];
+}
+
+// Sets the most recently used bit for a cache block
+void
+CacheMemory::setMRU(const Address& address)
+{
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+
+ if(loc != -1)
+ m_replacementPolicy_ptr->touch(cacheSet, loc, curTick());
+}
+
+void
+CacheMemory::recordCacheContents(int cntrl, CacheRecorder* tr) const
+{
+ uint64 warmedUpBlocks = 0;
+ uint64 totalBlocks M5_VAR_USED = (uint64)m_cache_num_sets
+ * (uint64)m_cache_assoc;
+
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ for (int j = 0; j < m_cache_assoc; j++) {
+ if (m_cache[i][j] != NULL) {
+ AccessPermission perm = m_cache[i][j]->m_Permission;
+ RubyRequestType request_type = RubyRequestType_NULL;
+ if (perm == AccessPermission_Read_Only) {
+ if (m_is_instruction_only_cache) {
+ request_type = RubyRequestType_IFETCH;
+ } else {
+ request_type = RubyRequestType_LD;
+ }
+ } else if (perm == AccessPermission_Read_Write) {
+ request_type = RubyRequestType_ST;
+ }
+
+ if (request_type != RubyRequestType_NULL) {
+ tr->addRecord(cntrl, m_cache[i][j]->m_Address.getAddress(),
+ 0, request_type,
+ m_replacementPolicy_ptr->getLastAccess(i, j),
+ m_cache[i][j]->getDataBlk());
+ warmedUpBlocks++;
+ }
+ }
+ }
+ }
+
+ DPRINTF(RubyCacheTrace, "%s: %lli blocks of %lli total blocks"
+ "recorded %.2f%% \n", name().c_str(), warmedUpBlocks,
+ (uint64)m_cache_num_sets * (uint64)m_cache_assoc,
+ (float(warmedUpBlocks)/float(totalBlocks))*100.0);
+}
+
+void
+CacheMemory::print(ostream& out) const
+{
+ out << "Cache dump: " << name() << endl;
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ for (int j = 0; j < m_cache_assoc; j++) {
+ if (m_cache[i][j] != NULL) {
+ out << " Index: " << i
+ << " way: " << j
+ << " entry: " << *m_cache[i][j] << endl;
+ } else {
+ out << " Index: " << i
+ << " way: " << j
+ << " entry: NULL" << endl;
+ }
+ }
+ }
+}
+
+void
+CacheMemory::printData(ostream& out) const
+{
+ out << "printData() not supported" << endl;
+}
+
+void
+CacheMemory::setLocked(const Address& address, int context)
+{
+ DPRINTF(RubyCache, "Setting Lock for addr: %x to %d\n", address, context);
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ m_cache[cacheSet][loc]->m_locked = context;
+}
+
+void
+CacheMemory::clearLocked(const Address& address)
+{
+ DPRINTF(RubyCache, "Clear Lock for addr: %x\n", address);
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ m_cache[cacheSet][loc]->m_locked = -1;
+}
+
+bool
+CacheMemory::isLocked(const Address& address, int context)
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ DPRINTF(RubyCache, "Testing Lock for addr: %llx cur %d con %d\n",
+ address, m_cache[cacheSet][loc]->m_locked, context);
+ return m_cache[cacheSet][loc]->m_locked == context;
+}
+
+void
+CacheMemory::regStats()
+{
+ m_demand_hits
+ .name(name() + ".demand_hits")
+ .desc("Number of cache demand hits")
+ ;
+
+ m_demand_misses
+ .name(name() + ".demand_misses")
+ .desc("Number of cache demand misses")
+ ;
+
+ m_demand_accesses
+ .name(name() + ".demand_accesses")
+ .desc("Number of cache demand accesses")
+ ;
+
+ m_demand_accesses = m_demand_hits + m_demand_misses;
+
+ m_sw_prefetches
+ .name(name() + ".total_sw_prefetches")
+ .desc("Number of software prefetches")
+ .flags(Stats::nozero)
+ ;
+
+ m_hw_prefetches
+ .name(name() + ".total_hw_prefetches")
+ .desc("Number of hardware prefetches")
+ .flags(Stats::nozero)
+ ;
+
+ m_prefetches
+ .name(name() + ".total_prefetches")
+ .desc("Number of prefetches")
+ .flags(Stats::nozero)
+ ;
+
+ m_prefetches = m_sw_prefetches + m_hw_prefetches;
+
+ m_accessModeType
+ .init(RubyRequestType_NUM)
+ .name(name() + ".access_mode")
+ .flags(Stats::pdf | Stats::total)
+ ;
+ for (int i = 0; i < RubyAccessMode_NUM; i++) {
+ m_accessModeType
+ .subname(i, RubyAccessMode_to_string(RubyAccessMode(i)))
+ .flags(Stats::nozero)
+ ;
+ }
+
+ numDataArrayReads
+ .name(name() + ".num_data_array_reads")
+ .desc("number of data array reads")
+ .flags(Stats::nozero)
+ ;
+
+ numDataArrayWrites
+ .name(name() + ".num_data_array_writes")
+ .desc("number of data array writes")
+ .flags(Stats::nozero)
+ ;
+
+ numTagArrayReads
+ .name(name() + ".num_tag_array_reads")
+ .desc("number of tag array reads")
+ .flags(Stats::nozero)
+ ;
+
+ numTagArrayWrites
+ .name(name() + ".num_tag_array_writes")
+ .desc("number of tag array writes")
+ .flags(Stats::nozero)
+ ;
+
+ numTagArrayStalls
+ .name(name() + ".num_tag_array_stalls")
+ .desc("number of stalls caused by tag array")
+ .flags(Stats::nozero)
+ ;
+
+ numDataArrayStalls
+ .name(name() + ".num_data_array_stalls")
+ .desc("number of stalls caused by data array")
+ .flags(Stats::nozero)
+ ;
+}
+
+void
+CacheMemory::recordRequestType(CacheRequestType requestType)
+{
+ DPRINTF(RubyStats, "Recorded statistic: %s\n",
+ CacheRequestType_to_string(requestType));
+ switch(requestType) {
+ case CacheRequestType_DataArrayRead:
+ numDataArrayReads++;
+ return;
+ case CacheRequestType_DataArrayWrite:
+ numDataArrayWrites++;
+ return;
+ case CacheRequestType_TagArrayRead:
+ numTagArrayReads++;
+ return;
+ case CacheRequestType_TagArrayWrite:
+ numTagArrayWrites++;
+ return;
+ default:
+ warn("CacheMemory access_type not found: %s",
+ CacheRequestType_to_string(requestType));
+ }
+}
+
+bool
+CacheMemory::checkResourceAvailable(CacheResourceType res, Address addr)
+{
+ if (!m_resource_stalls) {
+ return true;
+ }
+
+ if (res == CacheResourceType_TagArray) {
+ if (tagArray.tryAccess(addressToCacheSet(addr))) return true;
+ else {
+ DPRINTF(RubyResourceStalls,
+ "Tag array stall on addr %s in set %d\n",
+ addr, addressToCacheSet(addr));
+ numTagArrayStalls++;
+ return false;
+ }
+ } else if (res == CacheResourceType_DataArray) {
+ if (dataArray.tryAccess(addressToCacheSet(addr))) return true;
+ else {
+ DPRINTF(RubyResourceStalls,
+ "Data array stall on addr %s in set %d\n",
+ addr, addressToCacheSet(addr));
+ numDataArrayStalls++;
+ return false;
+ }
+ } else {
+ assert(false);
+ return true;
+ }
+}
diff --git a/src/mem/ruby/structures/CacheMemory.hh b/src/mem/ruby/structures/CacheMemory.hh
new file mode 100644
index 000000000..87a0b40c0
--- /dev/null
+++ b/src/mem/ruby/structures/CacheMemory.hh
@@ -0,0 +1,173 @@
+/*
+ * Copyright (c) 1999-2012 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__
+#define __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__
+
+#include <string>
+#include <vector>
+
+#include "base/hashmap.hh"
+#include "base/statistics.hh"
+#include "mem/protocol/CacheRequestType.hh"
+#include "mem/protocol/CacheResourceType.hh"
+#include "mem/protocol/RubyRequest.hh"
+#include "mem/ruby/common/DataBlock.hh"
+#include "mem/ruby/slicc_interface/AbstractCacheEntry.hh"
+#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
+#include "mem/ruby/structures/BankedArray.hh"
+#include "mem/ruby/structures/LRUPolicy.hh"
+#include "mem/ruby/structures/PseudoLRUPolicy.hh"
+#include "mem/ruby/system/CacheRecorder.hh"
+#include "params/RubyCache.hh"
+#include "sim/sim_object.hh"
+
+class CacheMemory : public SimObject
+{
+ public:
+ typedef RubyCacheParams Params;
+ CacheMemory(const Params *p);
+ ~CacheMemory();
+
+ void init();
+
+ // Public Methods
+ // perform a cache access and see if we hit or not. Return true on a hit.
+ bool tryCacheAccess(const Address& address, RubyRequestType type,
+ DataBlock*& data_ptr);
+
+ // similar to above, but doesn't require full access check
+ bool testCacheAccess(const Address& address, RubyRequestType type,
+ DataBlock*& data_ptr);
+
+ // tests to see if an address is present in the cache
+ bool isTagPresent(const Address& address) const;
+
+ // Returns true if there is:
+ // a) a tag match on this address or there is
+ // b) an unused line in the same cache "way"
+ bool cacheAvail(const Address& address) const;
+
+ // find an unused entry and sets the tag appropriate for the address
+ AbstractCacheEntry* allocate(const Address& address, AbstractCacheEntry* new_entry);
+ void allocateVoid(const Address& address, AbstractCacheEntry* new_entry)
+ {
+ allocate(address, new_entry);
+ }
+
+ // Explicitly free up this address
+ void deallocate(const Address& address);
+
+ // Returns with the physical address of the conflicting cache line
+ Address cacheProbe(const Address& address) const;
+
+ // looks an address up in the cache
+ AbstractCacheEntry* lookup(const Address& address);
+ const AbstractCacheEntry* lookup(const Address& address) const;
+
+ Cycles getLatency() const { return m_latency; }
+
+ // Hook for checkpointing the contents of the cache
+ void recordCacheContents(int cntrl, CacheRecorder* tr) const;
+
+ // Set this address to most recently used
+ void setMRU(const Address& address);
+
+ void setLocked (const Address& addr, int context);
+ void clearLocked (const Address& addr);
+ bool isLocked (const Address& addr, int context);
+
+ // Print cache contents
+ void print(std::ostream& out) const;
+ void printData(std::ostream& out) const;
+
+ void regStats();
+ bool checkResourceAvailable(CacheResourceType res, Address addr);
+ void recordRequestType(CacheRequestType requestType);
+
+ public:
+ Stats::Scalar m_demand_hits;
+ Stats::Scalar m_demand_misses;
+ Stats::Formula m_demand_accesses;
+
+ Stats::Scalar m_sw_prefetches;
+ Stats::Scalar m_hw_prefetches;
+ Stats::Formula m_prefetches;
+
+ Stats::Vector m_accessModeType;
+
+ Stats::Scalar numDataArrayReads;
+ Stats::Scalar numDataArrayWrites;
+ Stats::Scalar numTagArrayReads;
+ Stats::Scalar numTagArrayWrites;
+
+ Stats::Scalar numTagArrayStalls;
+ Stats::Scalar numDataArrayStalls;
+
+ private:
+ // convert a Address to its location in the cache
+ Index addressToCacheSet(const Address& address) const;
+
+ // Given a cache tag: returns the index of the tag in a set.
+ // returns -1 if the tag is not found.
+ int findTagInSet(Index line, const Address& tag) const;
+ int findTagInSetIgnorePermissions(Index cacheSet,
+ const Address& tag) const;
+
+ // Private copy constructor and assignment operator
+ CacheMemory(const CacheMemory& obj);
+ CacheMemory& operator=(const CacheMemory& obj);
+
+ private:
+ Cycles m_latency;
+
+ // Data Members (m_prefix)
+ bool m_is_instruction_only_cache;
+
+ // The first index is the # of cache lines.
+ // The second index is the the amount associativity.
+ m5::hash_map<Address, int> m_tag_index;
+ std::vector<std::vector<AbstractCacheEntry*> > m_cache;
+
+ AbstractReplacementPolicy *m_replacementPolicy_ptr;
+
+ BankedArray dataArray;
+ BankedArray tagArray;
+
+ int m_cache_size;
+ std::string m_policy;
+ int m_cache_num_sets;
+ int m_cache_num_set_bits;
+ int m_cache_assoc;
+ int m_start_index_bit;
+ bool m_resource_stalls;
+};
+
+std::ostream& operator<<(std::ostream& out, const CacheMemory& obj);
+
+#endif // __MEM_RUBY_SYSTEM_CACHEMEMORY_HH__
diff --git a/src/mem/ruby/structures/DirectoryMemory.cc b/src/mem/ruby/structures/DirectoryMemory.cc
new file mode 100644
index 000000000..db165460c
--- /dev/null
+++ b/src/mem/ruby/structures/DirectoryMemory.cc
@@ -0,0 +1,212 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "base/intmath.hh"
+#include "debug/RubyCache.hh"
+#include "debug/RubyStats.hh"
+#include "mem/ruby/slicc_interface/RubySlicc_Util.hh"
+#include "mem/ruby/structures/DirectoryMemory.hh"
+#include "mem/ruby/system/System.hh"
+
+using namespace std;
+
+int DirectoryMemory::m_num_directories = 0;
+int DirectoryMemory::m_num_directories_bits = 0;
+uint64_t DirectoryMemory::m_total_size_bytes = 0;
+int DirectoryMemory::m_numa_high_bit = 0;
+
+DirectoryMemory::DirectoryMemory(const Params *p)
+ : SimObject(p)
+{
+ m_version = p->version;
+ m_size_bytes = p->size;
+ m_size_bits = floorLog2(m_size_bytes);
+ m_num_entries = 0;
+ m_use_map = p->use_map;
+ m_map_levels = p->map_levels;
+ m_numa_high_bit = p->numa_high_bit;
+}
+
+void
+DirectoryMemory::init()
+{
+ m_num_entries = m_size_bytes / RubySystem::getBlockSizeBytes();
+
+ if (m_use_map) {
+ m_sparseMemory = new SparseMemory(m_map_levels);
+ g_system_ptr->registerSparseMemory(m_sparseMemory);
+ } else {
+ m_entries = new AbstractEntry*[m_num_entries];
+ for (int i = 0; i < m_num_entries; i++)
+ m_entries[i] = NULL;
+ m_ram = g_system_ptr->getMemoryVector();
+ }
+
+ m_num_directories++;
+ m_num_directories_bits = ceilLog2(m_num_directories);
+ m_total_size_bytes += m_size_bytes;
+
+ if (m_numa_high_bit == 0) {
+ m_numa_high_bit = RubySystem::getMemorySizeBits() - 1;
+ }
+ assert(m_numa_high_bit != 0);
+}
+
+DirectoryMemory::~DirectoryMemory()
+{
+ // free up all the directory entries
+ if (m_entries != NULL) {
+ for (uint64 i = 0; i < m_num_entries; i++) {
+ if (m_entries[i] != NULL) {
+ delete m_entries[i];
+ }
+ }
+ delete [] m_entries;
+ } else if (m_use_map) {
+ delete m_sparseMemory;
+ }
+}
+
+uint64
+DirectoryMemory::mapAddressToDirectoryVersion(PhysAddress address)
+{
+ if (m_num_directories_bits == 0)
+ return 0;
+
+ uint64 ret = address.bitSelect(m_numa_high_bit - m_num_directories_bits + 1,
+ m_numa_high_bit);
+ return ret;
+}
+
+bool
+DirectoryMemory::isPresent(PhysAddress address)
+{
+ bool ret = (mapAddressToDirectoryVersion(address) == m_version);
+ return ret;
+}
+
+uint64
+DirectoryMemory::mapAddressToLocalIdx(PhysAddress address)
+{
+ uint64 ret;
+ if (m_num_directories_bits > 0) {
+ ret = address.bitRemove(m_numa_high_bit - m_num_directories_bits + 1,
+ m_numa_high_bit);
+ } else {
+ ret = address.getAddress();
+ }
+
+ return ret >> (RubySystem::getBlockSizeBits());
+}
+
+AbstractEntry*
+DirectoryMemory::lookup(PhysAddress address)
+{
+ assert(isPresent(address));
+ DPRINTF(RubyCache, "Looking up address: %s\n", address);
+
+ if (m_use_map) {
+ return m_sparseMemory->lookup(address);
+ } else {
+ uint64_t idx = mapAddressToLocalIdx(address);
+ assert(idx < m_num_entries);
+ return m_entries[idx];
+ }
+}
+
+AbstractEntry*
+DirectoryMemory::allocate(const PhysAddress& address, AbstractEntry* entry)
+{
+ assert(isPresent(address));
+ uint64 idx;
+ DPRINTF(RubyCache, "Looking up address: %s\n", address);
+
+ if (m_use_map) {
+ m_sparseMemory->add(address, entry);
+ entry->changePermission(AccessPermission_Read_Write);
+ } else {
+ idx = mapAddressToLocalIdx(address);
+ assert(idx < m_num_entries);
+ entry->getDataBlk().assign(m_ram->getBlockPtr(address));
+ entry->changePermission(AccessPermission_Read_Only);
+ m_entries[idx] = entry;
+ }
+
+ return entry;
+}
+
+void
+DirectoryMemory::invalidateBlock(PhysAddress address)
+{
+ if (m_use_map) {
+ assert(m_sparseMemory->exist(address));
+ m_sparseMemory->remove(address);
+ }
+#if 0
+ else {
+ assert(isPresent(address));
+
+ Index index = address.memoryModuleIndex();
+
+ if (index < 0 || index > m_size) {
+ ERROR_MSG("Directory Memory Assertion: "
+ "accessing memory out of range.");
+ }
+
+ if (m_entries[index] != NULL){
+ delete m_entries[index];
+ m_entries[index] = NULL;
+ }
+ }
+#endif
+}
+
+void
+DirectoryMemory::print(ostream& out) const
+{
+}
+
+void
+DirectoryMemory::regStats()
+{
+ if (m_use_map) {
+ m_sparseMemory->regStats(name());
+ }
+}
+
+void
+DirectoryMemory::recordRequestType(DirectoryRequestType requestType) {
+ DPRINTF(RubyStats, "Recorded statistic: %s\n",
+ DirectoryRequestType_to_string(requestType));
+}
+
+DirectoryMemory *
+RubyDirectoryMemoryParams::create()
+{
+ return new DirectoryMemory(this);
+}
diff --git a/src/mem/ruby/structures/DirectoryMemory.hh b/src/mem/ruby/structures/DirectoryMemory.hh
new file mode 100644
index 000000000..cc390e428
--- /dev/null
+++ b/src/mem/ruby/structures/DirectoryMemory.hh
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__
+#define __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__
+
+#include <iostream>
+#include <string>
+
+#include "mem/protocol/DirectoryRequestType.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/slicc_interface/AbstractEntry.hh"
+#include "mem/ruby/structures/MemoryVector.hh"
+#include "mem/ruby/structures/SparseMemory.hh"
+#include "params/RubyDirectoryMemory.hh"
+#include "sim/sim_object.hh"
+
+class DirectoryMemory : public SimObject
+{
+ public:
+ typedef RubyDirectoryMemoryParams Params;
+ DirectoryMemory(const Params *p);
+ ~DirectoryMemory();
+
+ void init();
+
+ uint64 mapAddressToLocalIdx(PhysAddress address);
+ static uint64 mapAddressToDirectoryVersion(PhysAddress address);
+
+ bool isSparseImplementation() { return m_use_map; }
+ uint64 getSize() { return m_size_bytes; }
+
+ bool isPresent(PhysAddress address);
+ AbstractEntry* lookup(PhysAddress address);
+ AbstractEntry* allocate(const PhysAddress& address,
+ AbstractEntry* new_entry);
+
+ void invalidateBlock(PhysAddress address);
+
+ void print(std::ostream& out) const;
+ void regStats();
+
+ void recordRequestType(DirectoryRequestType requestType);
+
+ private:
+ // Private copy constructor and assignment operator
+ DirectoryMemory(const DirectoryMemory& obj);
+ DirectoryMemory& operator=(const DirectoryMemory& obj);
+
+ private:
+ const std::string m_name;
+ AbstractEntry **m_entries;
+ // int m_size; // # of memory module blocks this directory is
+ // responsible for
+ uint64 m_size_bytes;
+ uint64 m_size_bits;
+ uint64 m_num_entries;
+ int m_version;
+
+ static int m_num_directories;
+ static int m_num_directories_bits;
+ static uint64_t m_total_size_bytes;
+ static int m_numa_high_bit;
+
+ MemoryVector* m_ram;
+ SparseMemory* m_sparseMemory;
+ bool m_use_map;
+ int m_map_levels;
+};
+
+inline std::ostream&
+operator<<(std::ostream& out, const DirectoryMemory& obj)
+{
+ obj.print(out);
+ out << std::flush;
+ return out;
+}
+
+#endif // __MEM_RUBY_SYSTEM_DIRECTORYMEMORY_HH__
diff --git a/src/mem/ruby/structures/DirectoryMemory.py b/src/mem/ruby/structures/DirectoryMemory.py
new file mode 100644
index 000000000..c64439ce5
--- /dev/null
+++ b/src/mem/ruby/structures/DirectoryMemory.py
@@ -0,0 +1,44 @@
+# Copyright (c) 2009 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Steve Reinhardt
+# Brad Beckmann
+
+from m5.params import *
+from m5.proxy import *
+from m5.SimObject import SimObject
+
+class RubyDirectoryMemory(SimObject):
+ type = 'RubyDirectoryMemory'
+ cxx_class = 'DirectoryMemory'
+ cxx_header = "mem/ruby/structures/DirectoryMemory.hh"
+ version = Param.Int(0, "")
+ size = Param.MemorySize("1GB", "capacity in bytes")
+ use_map = Param.Bool(False, "enable sparse memory")
+ map_levels = Param.Int(4, "sparse memory map levels")
+ # the default value of the numa high bit is specified in the command line
+ # option and must be passed into the directory memory sim object
+ numa_high_bit = Param.Int("numa high bit")
diff --git a/src/mem/ruby/structures/LRUPolicy.hh b/src/mem/ruby/structures/LRUPolicy.hh
new file mode 100644
index 000000000..bb61b9d50
--- /dev/null
+++ b/src/mem/ruby/structures/LRUPolicy.hh
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2007 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_LRUPOLICY_HH__
+#define __MEM_RUBY_SYSTEM_LRUPOLICY_HH__
+
+#include "mem/ruby/structures/AbstractReplacementPolicy.hh"
+
+/* Simple true LRU replacement policy */
+
+class LRUPolicy : public AbstractReplacementPolicy
+{
+ public:
+ LRUPolicy(Index num_sets, Index assoc);
+ ~LRUPolicy();
+
+ void touch(Index set, Index way, Tick time);
+ Index getVictim(Index set) const;
+};
+
+inline
+LRUPolicy::LRUPolicy(Index num_sets, Index assoc)
+ : AbstractReplacementPolicy(num_sets, assoc)
+{
+}
+
+inline
+LRUPolicy::~LRUPolicy()
+{
+}
+
+inline void
+LRUPolicy::touch(Index set, Index index, Tick time)
+{
+ assert(index >= 0 && index < m_assoc);
+ assert(set >= 0 && set < m_num_sets);
+
+ m_last_ref_ptr[set][index] = time;
+}
+
+inline Index
+LRUPolicy::getVictim(Index set) const
+{
+ // assert(m_assoc != 0);
+ Tick time, smallest_time;
+ Index smallest_index;
+
+ smallest_index = 0;
+ smallest_time = m_last_ref_ptr[set][0];
+
+ for (unsigned i = 0; i < m_assoc; i++) {
+ time = m_last_ref_ptr[set][i];
+ // assert(m_cache[cacheSet][i].m_Permission !=
+ // AccessPermission_NotPresent);
+
+ if (time < smallest_time) {
+ smallest_index = i;
+ smallest_time = time;
+ }
+ }
+
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, cacheSet);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, smallest_index);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, m_cache[cacheSet][smallest_index]);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, *this);
+
+ return smallest_index;
+}
+
+#endif // __MEM_RUBY_SYSTEM_LRUPOLICY_HH__
diff --git a/src/mem/ruby/structures/MemoryControl.cc b/src/mem/ruby/structures/MemoryControl.cc
new file mode 100644
index 000000000..6c933b4d4
--- /dev/null
+++ b/src/mem/ruby/structures/MemoryControl.cc
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * Copyright (c) 2012 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "debug/RubyStats.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
+#include "mem/ruby/structures/MemoryControl.hh"
+#include "mem/ruby/system/System.hh"
+
+using namespace std;
+MemoryControl::MemoryControl(const Params *p)
+ : ClockedObject(p), Consumer(this), m_event(this)
+{
+ g_system_ptr->registerMemController(this);
+}
+
+MemoryControl::~MemoryControl() {};
+
+void
+MemoryControl::recordRequestType(MemoryControlRequestType request) {
+ DPRINTF(RubyStats, "Recorded request: %s\n",
+ MemoryControlRequestType_to_string(request));
+}
diff --git a/src/mem/ruby/structures/MemoryControl.hh b/src/mem/ruby/structures/MemoryControl.hh
new file mode 100644
index 000000000..7285e0021
--- /dev/null
+++ b/src/mem/ruby/structures/MemoryControl.hh
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * Copyright (c) 2012 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_ABSTRACT_MEMORY_CONTROL_HH__
+#define __MEM_RUBY_SYSTEM_ABSTRACT_MEMORY_CONTROL_HH__
+
+#include <iostream>
+#include <list>
+#include <string>
+
+#include "mem/protocol/MemoryControlRequestType.hh"
+#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/slicc_interface/Message.hh"
+#include "mem/ruby/structures/MemoryNode.hh"
+#include "params/MemoryControl.hh"
+#include "sim/clocked_object.hh"
+
+//////////////////////////////////////////////////////////////////////////////
+
+class MemoryControl : public ClockedObject, public Consumer
+{
+ public:
+ typedef MemoryControlParams Params;
+ const Params *params() const
+ { return dynamic_cast<const Params *>(_params); }
+
+ MemoryControl(const Params *p);
+ virtual void init() = 0;
+ virtual void reset() = 0;
+
+ ~MemoryControl();
+
+ virtual void wakeup() = 0;
+
+ virtual void setConsumer(Consumer* consumer_ptr) = 0;
+ virtual Consumer* getConsumer() = 0;
+ virtual void setClockObj(ClockedObject* consumer_ptr) {}
+
+ virtual void setDescription(const std::string& name) = 0;
+ virtual std::string getDescription() = 0;
+
+ // Called from the directory:
+ virtual void enqueue(const MsgPtr& message, Cycles latency) = 0;
+ virtual void enqueueMemRef(MemoryNode *memRef) = 0;
+ virtual void dequeue() = 0;
+ virtual const Message* peek() = 0;
+ virtual MemoryNode *peekNode() = 0;
+ virtual bool isReady() = 0;
+ virtual bool areNSlotsAvailable(int n) = 0; // infinite queue length
+
+ virtual void print(std::ostream& out) const = 0;
+ virtual void regStats() {};
+
+ virtual const int getChannel(const physical_address_t addr) const = 0;
+ virtual const int getBank(const physical_address_t addr) const = 0;
+ virtual const int getRank(const physical_address_t addr) const = 0;
+ virtual const int getRow(const physical_address_t addr) const = 0;
+
+ //added by SS
+ virtual int getBanksPerRank() = 0;
+ virtual int getRanksPerDimm() = 0;
+ virtual int getDimmsPerChannel() = 0;
+
+ virtual void recordRequestType(MemoryControlRequestType requestType);
+
+ virtual bool functionalReadBuffers(Packet *pkt)
+ { fatal("Functional read access not implemented!");}
+ virtual uint32_t functionalWriteBuffers(Packet *pkt)
+ { fatal("Functional read access not implemented!");}
+
+protected:
+ class MemCntrlEvent : public Event
+ {
+ public:
+ MemCntrlEvent(MemoryControl* _mem_cntrl)
+ {
+ mem_cntrl = _mem_cntrl;
+ }
+ private:
+ void process() { mem_cntrl->wakeup(); }
+
+ MemoryControl* mem_cntrl;
+ };
+
+ MemCntrlEvent m_event;
+};
+
+#endif // __MEM_RUBY_SYSTEM_ABSTRACT_MEMORY_CONTROL_HH__
diff --git a/src/mem/ruby/structures/MemoryControl.py b/src/mem/ruby/structures/MemoryControl.py
new file mode 100644
index 000000000..8a6879cb9
--- /dev/null
+++ b/src/mem/ruby/structures/MemoryControl.py
@@ -0,0 +1,39 @@
+# Copyright (c) 2009 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Steve Reinhardt
+# Brad Beckmann
+
+from m5.params import *
+from ClockedObject import ClockedObject
+
+class MemoryControl(ClockedObject):
+ abstract = True
+ type = 'MemoryControl'
+ cxx_class = 'MemoryControl'
+ cxx_header = "mem/ruby/structures/MemoryControl.hh"
+ version = Param.Int("");
+ ruby_system = Param.RubySystem("")
diff --git a/src/mem/ruby/structures/MemoryNode.cc b/src/mem/ruby/structures/MemoryNode.cc
new file mode 100644
index 000000000..2a5cbb189
--- /dev/null
+++ b/src/mem/ruby/structures/MemoryNode.cc
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 1999 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "mem/ruby/structures/MemoryNode.hh"
+
+using namespace std;
+
+void
+MemoryNode::print(ostream& out) const
+{
+ out << "[";
+ out << m_time << ", ";
+ out << m_msg_counter << ", ";
+ out << m_msgptr << "; ";
+ out << "]";
+}
diff --git a/src/mem/ruby/structures/MemoryNode.hh b/src/mem/ruby/structures/MemoryNode.hh
new file mode 100644
index 000000000..f215ab649
--- /dev/null
+++ b/src/mem/ruby/structures/MemoryNode.hh
@@ -0,0 +1,92 @@
+/*
+ * Copyright (c) 2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Description:
+ * This structure records everything known about a single
+ * memory request that is queued in the memory controller.
+ * It is created when the memory request first arrives
+ * at a memory controller and is deleted when the underlying
+ * message is enqueued to be sent back to the directory.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_MEMORYNODE_HH__
+#define __MEM_RUBY_SYSTEM_MEMORYNODE_HH__
+
+#include <iostream>
+
+#include "mem/ruby/common/TypeDefines.hh"
+#include "mem/ruby/slicc_interface/Message.hh"
+
+class MemoryNode
+{
+ public:
+ // old constructor
+ MemoryNode(const Cycles& time, int counter, const MsgPtr& msgptr,
+ const physical_address_t addr, const bool is_mem_read)
+ : m_time(time)
+ {
+ m_msg_counter = counter;
+ m_msgptr = msgptr;
+ m_addr = addr;
+ m_is_mem_read = is_mem_read;
+ m_is_dirty_wb = !is_mem_read;
+ }
+
+ // new constructor
+ MemoryNode(const Cycles& time, const MsgPtr& msgptr,
+ const physical_address_t addr, const bool is_mem_read,
+ const bool is_dirty_wb)
+ : m_time(time)
+ {
+ m_msg_counter = 0;
+ m_msgptr = msgptr;
+ m_addr = addr;
+ m_is_mem_read = is_mem_read;
+ m_is_dirty_wb = is_dirty_wb;
+ }
+
+ void print(std::ostream& out) const;
+
+ Cycles m_time;
+ int m_msg_counter;
+ MsgPtr m_msgptr;
+ physical_address_t m_addr;
+ bool m_is_mem_read;
+ bool m_is_dirty_wb;
+};
+
+inline std::ostream&
+operator<<(std::ostream& out, const MemoryNode& obj)
+{
+ obj.print(out);
+ out << std::flush;
+ return out;
+}
+
+#endif // __MEM_RUBY_SYSTEM_MEMORYNODE_HH__
diff --git a/src/mem/ruby/structures/MemoryVector.hh b/src/mem/ruby/structures/MemoryVector.hh
new file mode 100644
index 000000000..f2488b591
--- /dev/null
+++ b/src/mem/ruby/structures/MemoryVector.hh
@@ -0,0 +1,237 @@
+/*
+ * Copyright (c) 2009 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__
+#define __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__
+
+#include "base/trace.hh"
+#include "debug/RubyCacheTrace.hh"
+#include "mem/ruby/common/Address.hh"
+
+class DirectoryMemory;
+
+/**
+ * MemoryVector holds memory data (DRAM only)
+ */
+class MemoryVector
+{
+ public:
+ MemoryVector();
+ MemoryVector(uint64 size);
+ ~MemoryVector();
+ friend class DirectoryMemory;
+
+ void resize(uint64 size); // destructive
+
+ void write(const Address & paddr, uint8_t *data, int len);
+ uint8_t *read(const Address & paddr, uint8_t *data, int len);
+ uint32_t collatePages(uint8_t *&raw_data);
+ void populatePages(uint8_t *raw_data);
+
+ private:
+ uint8_t *getBlockPtr(const PhysAddress & addr);
+
+ uint64 m_size;
+ uint8_t **m_pages;
+ uint32_t m_num_pages;
+ const uint32_t m_page_offset_mask;
+ static const uint32_t PAGE_SIZE = 4096;
+};
+
+inline
+MemoryVector::MemoryVector()
+ : m_page_offset_mask(4095)
+{
+ m_size = 0;
+ m_num_pages = 0;
+ m_pages = NULL;
+}
+
+inline
+MemoryVector::MemoryVector(uint64 size)
+ : m_page_offset_mask(4095)
+{
+ resize(size);
+}
+
+inline
+MemoryVector::~MemoryVector()
+{
+ for (int i = 0; i < m_num_pages; i++) {
+ if (m_pages[i] != 0) {
+ delete [] m_pages[i];
+ }
+ }
+ delete [] m_pages;
+}
+
+inline void
+MemoryVector::resize(uint64 size)
+{
+ if (m_pages != NULL){
+ for (int i = 0; i < m_num_pages; i++) {
+ if (m_pages[i] != 0) {
+ delete [] m_pages[i];
+ }
+ }
+ delete [] m_pages;
+ }
+ m_size = size;
+ assert(size%PAGE_SIZE == 0);
+ m_num_pages = size >> 12;
+ m_pages = new uint8_t*[m_num_pages];
+ memset(m_pages, 0, m_num_pages * sizeof(uint8_t*));
+}
+
+inline void
+MemoryVector::write(const Address & paddr, uint8_t *data, int len)
+{
+ assert(paddr.getAddress() + len <= m_size);
+ uint32_t page_num = paddr.getAddress() >> 12;
+ if (m_pages[page_num] == 0) {
+ bool all_zeros = true;
+ for (int i = 0; i < len;i++) {
+ if (data[i] != 0) {
+ all_zeros = false;
+ break;
+ }
+ }
+ if (all_zeros)
+ return;
+ m_pages[page_num] = new uint8_t[PAGE_SIZE];
+ memset(m_pages[page_num], 0, PAGE_SIZE);
+ uint32_t offset = paddr.getAddress() & m_page_offset_mask;
+ memcpy(&m_pages[page_num][offset], data, len);
+ } else {
+ memcpy(&m_pages[page_num][paddr.getAddress()&m_page_offset_mask],
+ data, len);
+ }
+}
+
+inline uint8_t*
+MemoryVector::read(const Address & paddr, uint8_t *data, int len)
+{
+ assert(paddr.getAddress() + len <= m_size);
+ uint32_t page_num = paddr.getAddress() >> 12;
+ if (m_pages[page_num] == 0) {
+ memset(data, 0, len);
+ } else {
+ memcpy(data, &m_pages[page_num][paddr.getAddress()&m_page_offset_mask],
+ len);
+ }
+ return data;
+}
+
+inline uint8_t*
+MemoryVector::getBlockPtr(const PhysAddress & paddr)
+{
+ uint32_t page_num = paddr.getAddress() >> 12;
+ if (m_pages[page_num] == 0) {
+ m_pages[page_num] = new uint8_t[PAGE_SIZE];
+ memset(m_pages[page_num], 0, PAGE_SIZE);
+ }
+ return &m_pages[page_num][paddr.getAddress()&m_page_offset_mask];
+}
+
+/*!
+ * Function for collating all the pages of the physical memory together.
+ * In case a pointer for a page is NULL, this page needs only a single byte
+ * to represent that the pointer is NULL. Otherwise, it needs 1 + PAGE_SIZE
+ * bytes. The first represents that the page pointer is not NULL, and rest of
+ * the bytes represent the data on the page.
+ */
+
+inline uint32_t
+MemoryVector::collatePages(uint8_t *&raw_data)
+{
+ uint32_t num_zero_pages = 0;
+ uint32_t data_size = 0;
+
+ for (uint32_t i = 0;i < m_num_pages; ++i)
+ {
+ if (m_pages[i] == 0) num_zero_pages++;
+ }
+
+ raw_data = new uint8_t[sizeof(uint32_t) /* number of pages*/ +
+ m_num_pages /* whether the page is all zeros */ +
+ PAGE_SIZE * (m_num_pages - num_zero_pages)];
+
+ /* Write the number of pages to be stored. */
+ memcpy(raw_data, &m_num_pages, sizeof(uint32_t));
+ data_size = sizeof(uint32_t);
+
+ DPRINTF(RubyCacheTrace, "collating %d pages\n", m_num_pages);
+
+ for (uint32_t i = 0;i < m_num_pages; ++i)
+ {
+ if (m_pages[i] == 0) {
+ raw_data[data_size] = 0;
+ } else {
+ raw_data[data_size] = 1;
+ memcpy(raw_data + data_size + 1, m_pages[i], PAGE_SIZE);
+ data_size += PAGE_SIZE;
+ }
+ data_size += 1;
+ }
+
+ return data_size;
+}
+
+/*!
+ * Function for populating the pages of the memory using the available raw
+ * data. Each page has a byte associate with it, which represents whether the
+ * page was NULL or not, when all the pages were collated. The function assumes
+ * that the number of pages in the memory are same as those that were recorded
+ * in the checkpoint.
+ */
+inline void
+MemoryVector::populatePages(uint8_t *raw_data)
+{
+ uint32_t data_size = 0;
+ uint32_t num_pages = 0;
+
+ /* Read the number of pages that were stored. */
+ memcpy(&num_pages, raw_data, sizeof(uint32_t));
+ data_size = sizeof(uint32_t);
+ assert(num_pages == m_num_pages);
+
+ DPRINTF(RubyCacheTrace, "Populating %d pages\n", num_pages);
+
+ for (uint32_t i = 0;i < m_num_pages; ++i)
+ {
+ assert(m_pages[i] == 0);
+ if (raw_data[data_size] != 0) {
+ m_pages[i] = new uint8_t[PAGE_SIZE];
+ memcpy(m_pages[i], raw_data + data_size + 1, PAGE_SIZE);
+ data_size += PAGE_SIZE;
+ }
+ data_size += 1;
+ }
+}
+
+#endif // __MEM_RUBY_SYSTEM_MEMORYVECTOR_HH__
diff --git a/src/mem/ruby/structures/PerfectCacheMemory.hh b/src/mem/ruby/structures/PerfectCacheMemory.hh
new file mode 100644
index 000000000..b56543c41
--- /dev/null
+++ b/src/mem/ruby/structures/PerfectCacheMemory.hh
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__
+#define __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__
+
+#include "base/hashmap.hh"
+#include "mem/protocol/AccessPermission.hh"
+#include "mem/ruby/common/Address.hh"
+
+template<class ENTRY>
+struct PerfectCacheLineState
+{
+ PerfectCacheLineState() { m_permission = AccessPermission_NUM; }
+ AccessPermission m_permission;
+ ENTRY m_entry;
+};
+
+template<class ENTRY>
+inline std::ostream&
+operator<<(std::ostream& out, const PerfectCacheLineState<ENTRY>& obj)
+{
+ return out;
+}
+
+template<class ENTRY>
+class PerfectCacheMemory
+{
+ public:
+ PerfectCacheMemory();
+
+ // tests to see if an address is present in the cache
+ bool isTagPresent(const Address& address) const;
+
+ // Returns true if there is:
+ // a) a tag match on this address or there is
+ // b) an Invalid line in the same cache "way"
+ bool cacheAvail(const Address& address) const;
+
+ // find an Invalid entry and sets the tag appropriate for the address
+ void allocate(const Address& address);
+
+ void deallocate(const Address& address);
+
+ // Returns with the physical address of the conflicting cache line
+ Address cacheProbe(const Address& newAddress) const;
+
+ // looks an address up in the cache
+ ENTRY& lookup(const Address& address);
+ const ENTRY& lookup(const Address& address) const;
+
+ // Get/Set permission of cache block
+ AccessPermission getPermission(const Address& address) const;
+ void changePermission(const Address& address, AccessPermission new_perm);
+
+ // Print cache contents
+ void print(std::ostream& out) const;
+
+ private:
+ // Private copy constructor and assignment operator
+ PerfectCacheMemory(const PerfectCacheMemory& obj);
+ PerfectCacheMemory& operator=(const PerfectCacheMemory& obj);
+
+ // Data Members (m_prefix)
+ m5::hash_map<Address, PerfectCacheLineState<ENTRY> > m_map;
+};
+
+template<class ENTRY>
+inline std::ostream&
+operator<<(std::ostream& out, const PerfectCacheMemory<ENTRY>& obj)
+{
+ obj.print(out);
+ out << std::flush;
+ return out;
+}
+
+template<class ENTRY>
+inline
+PerfectCacheMemory<ENTRY>::PerfectCacheMemory()
+{
+}
+
+// tests to see if an address is present in the cache
+template<class ENTRY>
+inline bool
+PerfectCacheMemory<ENTRY>::isTagPresent(const Address& address) const
+{
+ return m_map.count(line_address(address)) > 0;
+}
+
+template<class ENTRY>
+inline bool
+PerfectCacheMemory<ENTRY>::cacheAvail(const Address& address) const
+{
+ return true;
+}
+
+// find an Invalid or already allocated entry and sets the tag
+// appropriate for the address
+template<class ENTRY>
+inline void
+PerfectCacheMemory<ENTRY>::allocate(const Address& address)
+{
+ PerfectCacheLineState<ENTRY> line_state;
+ line_state.m_permission = AccessPermission_Invalid;
+ line_state.m_entry = ENTRY();
+ m_map[line_address(address)] = line_state;
+}
+
+// deallocate entry
+template<class ENTRY>
+inline void
+PerfectCacheMemory<ENTRY>::deallocate(const Address& address)
+{
+ m_map.erase(line_address(address));
+}
+
+// Returns with the physical address of the conflicting cache line
+template<class ENTRY>
+inline Address
+PerfectCacheMemory<ENTRY>::cacheProbe(const Address& newAddress) const
+{
+ panic("cacheProbe called in perfect cache");
+ return newAddress;
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+inline ENTRY&
+PerfectCacheMemory<ENTRY>::lookup(const Address& address)
+{
+ return m_map[line_address(address)].m_entry;
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+inline const ENTRY&
+PerfectCacheMemory<ENTRY>::lookup(const Address& address) const
+{
+ return m_map[line_address(address)].m_entry;
+}
+
+template<class ENTRY>
+inline AccessPermission
+PerfectCacheMemory<ENTRY>::getPermission(const Address& address) const
+{
+ return m_map[line_address(address)].m_permission;
+}
+
+template<class ENTRY>
+inline void
+PerfectCacheMemory<ENTRY>::changePermission(const Address& address,
+ AccessPermission new_perm)
+{
+ Address line_address = address;
+ line_address.makeLineAddress();
+ PerfectCacheLineState<ENTRY>& line_state = m_map[line_address];
+ line_state.m_permission = new_perm;
+}
+
+template<class ENTRY>
+inline void
+PerfectCacheMemory<ENTRY>::print(std::ostream& out) const
+{
+}
+
+#endif // __MEM_RUBY_SYSTEM_PERFECTCACHEMEMORY_HH__
diff --git a/src/mem/ruby/structures/PersistentTable.cc b/src/mem/ruby/structures/PersistentTable.cc
new file mode 100644
index 000000000..57b06946e
--- /dev/null
+++ b/src/mem/ruby/structures/PersistentTable.cc
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "mem/ruby/structures/PersistentTable.hh"
+
+using namespace std;
+
+// randomize so that handoffs are not locality-aware
+#if 0
+int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6,
+ 10, 14, 3, 7, 11, 15};
+int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ 10, 11, 12, 13, 14, 15};
+#endif
+
+PersistentTable::PersistentTable()
+{
+}
+
+PersistentTable::~PersistentTable()
+{
+}
+
+void
+PersistentTable::persistentRequestLock(const Address& address,
+ MachineID locker,
+ AccessType type)
+{
+#if 0
+ if (locker == m_chip_ptr->getID())
+ cout << "Chip " << m_chip_ptr->getID() << ": " << llocker
+ << " requesting lock for " << address << endl;
+
+ MachineID locker = (MachineID) persistent_randomize[llocker];
+#endif
+
+ assert(address == line_address(address));
+
+ static const PersistentTableEntry dflt;
+ pair<AddressMap::iterator, bool> r =
+ m_map.insert(AddressMap::value_type(address, dflt));
+ bool present = !r.second;
+ AddressMap::iterator i = r.first;
+ PersistentTableEntry &entry = i->second;
+
+ if (present) {
+ // Make sure we're not already in the locked set
+ assert(!(entry.m_starving.isElement(locker)));
+ }
+
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write)
+ entry.m_request_to_write.add(locker);
+
+ if (present)
+ assert(entry.m_marked.isSubset(entry.m_starving));
+}
+
+void
+PersistentTable::persistentRequestUnlock(const Address& address,
+ MachineID unlocker)
+{
+#if 0
+ if (unlocker == m_chip_ptr->getID())
+ cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker
+ << " requesting unlock for " << address << endl;
+
+ MachineID unlocker = (MachineID) persistent_randomize[uunlocker];
+#endif
+
+ assert(address == line_address(address));
+ assert(m_map.count(address));
+ PersistentTableEntry& entry = m_map[address];
+
+ //
+ // Make sure we're in the locked set
+ //
+ assert(entry.m_starving.isElement(unlocker));
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ entry.m_starving.remove(unlocker);
+ entry.m_marked.remove(unlocker);
+ entry.m_request_to_write.remove(unlocker);
+ assert(entry.m_marked.isSubset(entry.m_starving));
+
+ // Deallocate if empty
+ if (entry.m_starving.isEmpty()) {
+ assert(entry.m_marked.isEmpty());
+ m_map.erase(address);
+ }
+}
+
+bool
+PersistentTable::okToIssueStarving(const Address& address,
+ MachineID machId) const
+{
+ assert(address == line_address(address));
+
+ AddressMap::const_iterator i = m_map.find(address);
+ if (i == m_map.end()) {
+ // No entry present
+ return true;
+ }
+
+ const PersistentTableEntry &entry = i->second;
+
+ if (entry.m_starving.isElement(machId)) {
+ // We can't issue another lockdown until are previous unlock
+ // has occurred
+ return false;
+ }
+
+ return entry.m_marked.isEmpty();
+}
+
+MachineID
+PersistentTable::findSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ AddressMap::const_iterator i = m_map.find(address);
+ assert(i != m_map.end());
+ const PersistentTableEntry& entry = i->second;
+ return entry.m_starving.smallestElement();
+}
+
+AccessType
+PersistentTable::typeOfSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ AddressMap::const_iterator i = m_map.find(address);
+ assert(i != m_map.end());
+ const PersistentTableEntry& entry = i->second;
+ if (entry.m_request_to_write.
+ isElement(entry.m_starving.smallestElement())) {
+ return AccessType_Write;
+ } else {
+ return AccessType_Read;
+ }
+}
+
+void
+PersistentTable::markEntries(const Address& address)
+{
+ assert(address == line_address(address));
+ AddressMap::iterator i = m_map.find(address);
+ if (i == m_map.end())
+ return;
+
+ PersistentTableEntry& entry = i->second;
+
+ // None should be marked
+ assert(entry.m_marked.isEmpty());
+
+ // Mark all the nodes currently in the table
+ entry.m_marked = entry.m_starving;
+}
+
+bool
+PersistentTable::isLocked(const Address& address) const
+{
+ assert(address == line_address(address));
+
+ // If an entry is present, it must be locked
+ return m_map.count(address) > 0;
+}
+
+int
+PersistentTable::countStarvingForAddress(const Address& address) const
+{
+ assert(address == line_address(address));
+ AddressMap::const_iterator i = m_map.find(address);
+ if (i == m_map.end())
+ return 0;
+
+ const PersistentTableEntry& entry = i->second;
+ return entry.m_starving.count();
+}
+
+int
+PersistentTable::countReadStarvingForAddress(const Address& address) const
+{
+ assert(address == line_address(address));
+ AddressMap::const_iterator i = m_map.find(address);
+ if (i == m_map.end())
+ return 0;
+
+ const PersistentTableEntry& entry = i->second;
+ return entry.m_starving.count() - entry.m_request_to_write.count();
+}
+
+void
+PersistentTable::print(ostream& out) const
+{
+}
+
diff --git a/src/mem/ruby/structures/PersistentTable.hh b/src/mem/ruby/structures/PersistentTable.hh
new file mode 100644
index 000000000..b023987a4
--- /dev/null
+++ b/src/mem/ruby/structures/PersistentTable.hh
@@ -0,0 +1,100 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__
+#define __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__
+
+#include <iostream>
+
+#include "base/hashmap.hh"
+#include "mem/protocol/AccessType.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/common/MachineID.hh"
+#include "mem/ruby/common/NetDest.hh"
+
+class PersistentTableEntry
+{
+ public:
+ PersistentTableEntry() {}
+ void print(std::ostream& out) const {}
+
+ NetDest m_starving;
+ NetDest m_marked;
+ NetDest m_request_to_write;
+};
+
+class PersistentTable
+{
+ public:
+ // Constructors
+ PersistentTable();
+
+ // Destructor
+ ~PersistentTable();
+
+ // Public Methods
+ void persistentRequestLock(const Address& address, MachineID locker,
+ AccessType type);
+ void persistentRequestUnlock(const Address& address, MachineID unlocker);
+ bool okToIssueStarving(const Address& address, MachineID machID) const;
+ MachineID findSmallest(const Address& address) const;
+ AccessType typeOfSmallest(const Address& address) const;
+ void markEntries(const Address& address);
+ bool isLocked(const Address& addr) const;
+ int countStarvingForAddress(const Address& addr) const;
+ int countReadStarvingForAddress(const Address& addr) const;
+
+ void print(std::ostream& out) const;
+
+ private:
+ // Private copy constructor and assignment operator
+ PersistentTable(const PersistentTable& obj);
+ PersistentTable& operator=(const PersistentTable& obj);
+
+ // Data Members (m_prefix)
+ typedef m5::hash_map<Address, PersistentTableEntry> AddressMap;
+ AddressMap m_map;
+};
+
+inline std::ostream&
+operator<<(std::ostream& out, const PersistentTable& obj)
+{
+ obj.print(out);
+ out << std::flush;
+ return out;
+}
+
+inline std::ostream&
+operator<<(std::ostream& out, const PersistentTableEntry& obj)
+{
+ obj.print(out);
+ out << std::flush;
+ return out;
+}
+
+#endif // __MEM_RUBY_SYSTEM_PERSISTENTTABLE_HH__
diff --git a/src/mem/ruby/structures/Prefetcher.hh b/src/mem/ruby/structures/Prefetcher.hh
index 967d96086..2bc7d812e 100644
--- a/src/mem/ruby/structures/Prefetcher.hh
+++ b/src/mem/ruby/structures/Prefetcher.hh
@@ -34,8 +34,8 @@
#include <bitset>
#include "base/statistics.hh"
-#include "mem/ruby/buffers/MessageBuffer.hh"
#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/network/MessageBuffer.hh"
#include "mem/ruby/slicc_interface/AbstractController.hh"
#include "mem/ruby/slicc_interface/RubyRequest.hh"
#include "mem/ruby/system/System.hh"
diff --git a/src/mem/ruby/structures/PseudoLRUPolicy.hh b/src/mem/ruby/structures/PseudoLRUPolicy.hh
new file mode 100644
index 000000000..e464bbeac
--- /dev/null
+++ b/src/mem/ruby/structures/PseudoLRUPolicy.hh
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2007 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__
+#define __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__
+
+#include "mem/ruby/structures/AbstractReplacementPolicy.hh"
+
+/**
+ * Implementation of tree-based pseudo-LRU replacement
+ *
+ * Works for any associativity between 1 and 128.
+ *
+ * Also implements associativities that are not a power of 2 by
+ * ignoring paths that lead to a larger index (i.e. truncating the
+ * tree). Note that when this occurs, the algorithm becomes less
+ * fair, as it will favor indicies in the larger (by index) half of
+ * the associative set. This is most unfair when the nearest power of
+ * 2 is one below the associativy, and most fair when it is one above.
+ */
+
+class PseudoLRUPolicy : public AbstractReplacementPolicy
+{
+ public:
+ PseudoLRUPolicy(Index num_sets, Index assoc);
+ ~PseudoLRUPolicy();
+
+ void touch(Index set, Index way, Tick time);
+ Index getVictim(Index set) const;
+
+ private:
+ unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */
+ unsigned int m_num_levels; /** number of levels in the tree */
+ uint64* m_trees; /** bit representation of the
+ * trees, one for each set */
+};
+
+inline
+PseudoLRUPolicy::PseudoLRUPolicy(Index num_sets, Index assoc)
+ : AbstractReplacementPolicy(num_sets, assoc)
+{
+ // associativity cannot exceed capacity of tree representation
+ assert(num_sets > 0 && assoc > 1 && assoc <= (Index) sizeof(uint64)*4);
+
+ m_trees = NULL;
+ m_num_levels = 0;
+
+ m_effective_assoc = 1;
+ while (m_effective_assoc < assoc) {
+ // effective associativity is ceiling power of 2
+ m_effective_assoc <<= 1;
+ }
+ assoc = m_effective_assoc;
+ while (true) {
+ assoc /= 2;
+ if(!assoc) break;
+ m_num_levels++;
+ }
+ assert(m_num_levels < sizeof(unsigned int)*4);
+ m_trees = new uint64[m_num_sets];
+ for (unsigned i = 0; i < m_num_sets; i++) {
+ m_trees[i] = 0;
+ }
+}
+
+inline
+PseudoLRUPolicy::~PseudoLRUPolicy()
+{
+ if (m_trees != NULL)
+ delete[] m_trees;
+}
+
+inline void
+PseudoLRUPolicy::touch(Index set, Index index, Tick time)
+{
+ assert(index >= 0 && index < m_assoc);
+ assert(set >= 0 && set < m_num_sets);
+
+ int tree_index = 0;
+ int node_val;
+ for (int i = m_num_levels - 1; i >= 0; i--) {
+ node_val = (index >> i)&1;
+ if (node_val)
+ m_trees[set] |= node_val << tree_index;
+ else
+ m_trees[set] &= ~(1 << tree_index);
+ tree_index = node_val ? (tree_index*2)+2 : (tree_index*2)+1;
+ }
+ m_last_ref_ptr[set][index] = time;
+}
+
+inline Index
+PseudoLRUPolicy::getVictim(Index set) const
+{
+ // assert(m_assoc != 0);
+ Index index = 0;
+
+ int tree_index = 0;
+ int node_val;
+ for (unsigned i = 0; i < m_num_levels; i++){
+ node_val = (m_trees[set] >> tree_index) & 1;
+ index += node_val ? 0 : (m_effective_assoc >> (i + 1));
+ tree_index = node_val ? (tree_index * 2) + 1 : (tree_index * 2) + 2;
+ }
+ assert(index >= 0 && index < m_effective_assoc);
+
+ /* return either the found index or the max possible index */
+ /* NOTE: this is not a fair replacement when assoc is not a power of 2 */
+ return (index > (m_assoc - 1)) ? m_assoc - 1 : index;
+}
+
+#endif // __MEM_RUBY_SYSTEM_PSEUDOLRUPOLICY_HH__
diff --git a/src/mem/ruby/structures/RubyMemoryControl.cc b/src/mem/ruby/structures/RubyMemoryControl.cc
new file mode 100644
index 000000000..bc01c7f94
--- /dev/null
+++ b/src/mem/ruby/structures/RubyMemoryControl.cc
@@ -0,0 +1,791 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * Copyright (c) 2012 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * Description: This module simulates a basic DDR-style memory controller
+ * (and can easily be extended to do FB-DIMM as well).
+ *
+ * This module models a single channel, connected to any number of
+ * DIMMs with any number of ranks of DRAMs each. If you want multiple
+ * address/data channels, you need to instantiate multiple copies of
+ * this module.
+ *
+ * Each memory request is placed in a queue associated with a specific
+ * memory bank. This queue is of finite size; if the queue is full
+ * the request will back up in an (infinite) common queue and will
+ * effectively throttle the whole system. This sort of behavior is
+ * intended to be closer to real system behavior than if we had an
+ * infinite queue on each bank. If you want the latter, just make
+ * the bank queues unreasonably large.
+ *
+ * The head item on a bank queue is issued when all of the
+ * following are true:
+ * the bank is available
+ * the address path to the DIMM is available
+ * the data path to or from the DIMM is available
+ *
+ * Note that we are not concerned about fixed offsets in time. The bank
+ * will not be used at the same moment as the address path, but since
+ * there is no queue in the DIMM or the DRAM it will be used at a constant
+ * number of cycles later, so it is treated as if it is used at the same
+ * time.
+ *
+ * We are assuming closed bank policy; that is, we automatically close
+ * each bank after a single read or write. Adding an option for open
+ * bank policy is for future work.
+ *
+ * We are assuming "posted CAS"; that is, we send the READ or WRITE
+ * immediately after the ACTIVATE. This makes scheduling the address
+ * bus trivial; we always schedule a fixed set of cycles. For DDR-400,
+ * this is a set of two cycles; for some configurations such as
+ * DDR-800 the parameter tRRD forces this to be set to three cycles.
+ *
+ * We assume a four-bit-time transfer on the data wires. This is
+ * the minimum burst length for DDR-2. This would correspond
+ * to (for example) a memory where each DIMM is 72 bits wide
+ * and DIMMs are ganged in pairs to deliver 64 bytes at a shot.
+ * This gives us the same occupancy on the data wires as on the
+ * address wires (for the two-address-cycle case).
+ *
+ * The only non-trivial scheduling problem is the data wires.
+ * A write will use the wires earlier in the operation than a read
+ * will; typically one cycle earlier as seen at the DRAM, but earlier
+ * by a worst-case round-trip wire delay when seen at the memory controller.
+ * So, while reads from one rank can be scheduled back-to-back
+ * every two cycles, and writes (to any rank) scheduled every two cycles,
+ * when a read is followed by a write we need to insert a bubble.
+ * Furthermore, consecutive reads from two different ranks may need
+ * to insert a bubble due to skew between when one DRAM stops driving the
+ * wires and when the other one starts. (These bubbles are parameters.)
+ *
+ * This means that when some number of reads and writes are at the
+ * heads of their queues, reads could starve writes, and/or reads
+ * to the same rank could starve out other requests, since the others
+ * would never see the data bus ready.
+ * For this reason, we have implemented an anti-starvation feature.
+ * A group of requests is marked "old", and a counter is incremented
+ * each cycle as long as any request from that batch has not issued.
+ * if the counter reaches twice the bank busy time, we hold off any
+ * newer requests until all of the "old" requests have issued.
+ *
+ * We also model tFAW. This is an obscure DRAM parameter that says
+ * that no more than four activate requests can happen within a window
+ * of a certain size. For most configurations this does not come into play,
+ * or has very little effect, but it could be used to throttle the power
+ * consumption of the DRAM. In this implementation (unlike in a DRAM
+ * data sheet) TFAW is measured in memory bus cycles; i.e. if TFAW = 16
+ * then no more than four activates may happen within any 16 cycle window.
+ * Refreshes are included in the activates.
+ *
+ */
+
+#include "base/cast.hh"
+#include "base/cprintf.hh"
+#include "debug/RubyMemory.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/profiler/Profiler.hh"
+#include "mem/ruby/slicc_interface/NetworkMessage.hh"
+#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
+#include "mem/ruby/structures/RubyMemoryControl.hh"
+#include "mem/ruby/system/System.hh"
+
+using namespace std;
+
+// Value to reset watchdog timer to.
+// If we're idle for this many memory control cycles,
+// shut down our clock (our rescheduling of ourselves).
+// Refresh shuts down as well.
+// When we restart, we'll be in a different phase
+// with respect to ruby cycles, so this introduces
+// a slight inaccuracy. But it is necessary or the
+// ruby tester never terminates because the event
+// queue is never empty.
+#define IDLECOUNT_MAX_VALUE 1000
+
+// Output operator definition
+
+ostream&
+operator<<(ostream& out, const RubyMemoryControl& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+// CONSTRUCTOR
+RubyMemoryControl::RubyMemoryControl(const Params *p)
+ : MemoryControl(p)
+{
+ m_banks_per_rank = p->banks_per_rank;
+ m_ranks_per_dimm = p->ranks_per_dimm;
+ m_dimms_per_channel = p->dimms_per_channel;
+ m_bank_bit_0 = p->bank_bit_0;
+ m_rank_bit_0 = p->rank_bit_0;
+ m_dimm_bit_0 = p->dimm_bit_0;
+ m_bank_queue_size = p->bank_queue_size;
+ m_bank_busy_time = p->bank_busy_time;
+ m_rank_rank_delay = p->rank_rank_delay;
+ m_read_write_delay = p->read_write_delay;
+ m_basic_bus_busy_time = p->basic_bus_busy_time;
+ m_mem_ctl_latency = p->mem_ctl_latency;
+ m_refresh_period = p->refresh_period;
+ m_tFaw = p->tFaw;
+ m_mem_random_arbitrate = p->mem_random_arbitrate;
+ m_mem_fixed_delay = p->mem_fixed_delay;
+
+ m_profiler_ptr = new MemCntrlProfiler(name(),
+ m_banks_per_rank,
+ m_ranks_per_dimm,
+ m_dimms_per_channel);
+}
+
+void
+RubyMemoryControl::init()
+{
+ m_msg_counter = 0;
+
+ assert(m_tFaw <= 62); // must fit in a uint64 shift register
+
+ m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
+ m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
+ m_refresh_period_system = m_refresh_period / m_total_banks;
+
+ m_bankQueues = new list<MemoryNode *> [m_total_banks];
+ assert(m_bankQueues);
+
+ m_bankBusyCounter = new int [m_total_banks];
+ assert(m_bankBusyCounter);
+
+ m_oldRequest = new int [m_total_banks];
+ assert(m_oldRequest);
+
+ for (int i = 0; i < m_total_banks; i++) {
+ m_bankBusyCounter[i] = 0;
+ m_oldRequest[i] = 0;
+ }
+
+ m_busBusyCounter_Basic = 0;
+ m_busBusyCounter_Write = 0;
+ m_busBusyCounter_ReadNewRank = 0;
+ m_busBusy_WhichRank = 0;
+
+ m_roundRobin = 0;
+ m_refresh_count = 1;
+ m_need_refresh = 0;
+ m_refresh_bank = 0;
+ m_idleCount = 0;
+ m_ageCounter = 0;
+
+ // Each tfaw shift register keeps a moving bit pattern
+ // which shows when recent activates have occurred.
+ // m_tfaw_count keeps track of how many 1 bits are set
+ // in each shift register. When m_tfaw_count is >= 4,
+ // new activates are not allowed.
+ m_tfaw_shift = new uint64[m_total_ranks];
+ m_tfaw_count = new int[m_total_ranks];
+ for (int i = 0; i < m_total_ranks; i++) {
+ m_tfaw_shift[i] = 0;
+ m_tfaw_count[i] = 0;
+ }
+}
+
+void
+RubyMemoryControl::reset()
+{
+ m_msg_counter = 0;
+
+ assert(m_tFaw <= 62); // must fit in a uint64 shift register
+
+ m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
+ m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
+ m_refresh_period_system = m_refresh_period / m_total_banks;
+
+ assert(m_bankQueues);
+
+ assert(m_bankBusyCounter);
+
+ assert(m_oldRequest);
+
+ for (int i = 0; i < m_total_banks; i++) {
+ m_bankBusyCounter[i] = 0;
+ m_oldRequest[i] = 0;
+ }
+
+ m_busBusyCounter_Basic = 0;
+ m_busBusyCounter_Write = 0;
+ m_busBusyCounter_ReadNewRank = 0;
+ m_busBusy_WhichRank = 0;
+
+ m_roundRobin = 0;
+ m_refresh_count = 1;
+ m_need_refresh = 0;
+ m_refresh_bank = 0;
+ m_idleCount = 0;
+ m_ageCounter = 0;
+
+ // Each tfaw shift register keeps a moving bit pattern
+ // which shows when recent activates have occurred.
+ // m_tfaw_count keeps track of how many 1 bits are set
+ // in each shift register. When m_tfaw_count is >= 4,
+ // new activates are not allowed.
+ for (int i = 0; i < m_total_ranks; i++) {
+ m_tfaw_shift[i] = 0;
+ m_tfaw_count[i] = 0;
+ }
+}
+
+RubyMemoryControl::~RubyMemoryControl()
+{
+ delete [] m_bankQueues;
+ delete [] m_bankBusyCounter;
+ delete [] m_oldRequest;
+ delete m_profiler_ptr;
+}
+
+// enqueue new request from directory
+void
+RubyMemoryControl::enqueue(const MsgPtr& message, Cycles latency)
+{
+ Cycles arrival_time = curCycle() + latency;
+ const MemoryMsg* memMess = safe_cast<const MemoryMsg*>(message.get());
+ physical_address_t addr = memMess->getAddr().getAddress();
+ MemoryRequestType type = memMess->getType();
+ bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
+ MemoryNode *thisReq = new MemoryNode(arrival_time, message, addr,
+ is_mem_read, !is_mem_read);
+ enqueueMemRef(thisReq);
+}
+
+// Alternate entry point used when we already have a MemoryNode
+// structure built.
+void
+RubyMemoryControl::enqueueMemRef(MemoryNode *memRef)
+{
+ m_msg_counter++;
+ memRef->m_msg_counter = m_msg_counter;
+ physical_address_t addr = memRef->m_addr;
+ int bank = getBank(addr);
+
+ DPRINTF(RubyMemory,
+ "New memory request%7d: %#08x %c arrived at %10d bank = %3x sched %c\n",
+ m_msg_counter, addr, memRef->m_is_mem_read ? 'R':'W',
+ memRef->m_time * g_system_ptr->clockPeriod(),
+ bank, m_event.scheduled() ? 'Y':'N');
+
+ m_profiler_ptr->profileMemReq(bank);
+ m_input_queue.push_back(memRef);
+
+ if (!m_event.scheduled()) {
+ schedule(m_event, clockEdge());
+ }
+}
+
+// dequeue, peek, and isReady are used to transfer completed requests
+// back to the directory
+void
+RubyMemoryControl::dequeue()
+{
+ assert(isReady());
+ MemoryNode *req = m_response_queue.front();
+ m_response_queue.pop_front();
+ delete req;
+}
+
+const Message*
+RubyMemoryControl::peek()
+{
+ MemoryNode *node = peekNode();
+ Message* msg_ptr = node->m_msgptr.get();
+ assert(msg_ptr != NULL);
+ return msg_ptr;
+}
+
+MemoryNode *
+RubyMemoryControl::peekNode()
+{
+ assert(isReady());
+ MemoryNode *req = m_response_queue.front();
+ DPRINTF(RubyMemory, "Peek: memory request%7d: %#08x %c sched %c\n",
+ req->m_msg_counter, req->m_addr, req->m_is_mem_read ? 'R':'W',
+ m_event.scheduled() ? 'Y':'N');
+
+ return req;
+}
+
+bool
+RubyMemoryControl::isReady()
+{
+ return ((!m_response_queue.empty()) &&
+ (m_response_queue.front()->m_time <= g_system_ptr->curCycle()));
+}
+
+void
+RubyMemoryControl::setConsumer(Consumer* consumer_ptr)
+{
+ m_consumer_ptr = consumer_ptr;
+}
+
+void
+RubyMemoryControl::print(ostream& out) const
+{
+}
+
+// Queue up a completed request to send back to directory
+void
+RubyMemoryControl::enqueueToDirectory(MemoryNode *req, Cycles latency)
+{
+ Tick arrival_time = clockEdge(latency);
+ Cycles ruby_arrival_time = g_system_ptr->ticksToCycles(arrival_time);
+ req->m_time = ruby_arrival_time;
+ m_response_queue.push_back(req);
+
+ DPRINTF(RubyMemory, "Enqueueing msg %#08x %c back to directory at %15d\n",
+ req->m_addr, req->m_is_mem_read ? 'R':'W', arrival_time);
+
+ // schedule the wake up
+ m_consumer_ptr->scheduleEventAbsolute(arrival_time);
+}
+
+// getBank returns an integer that is unique for each
+// bank across this memory controller.
+const int
+RubyMemoryControl::getBank(const physical_address_t addr) const
+{
+ int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1);
+ int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1);
+ int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1);
+ return (dimm * m_ranks_per_dimm * m_banks_per_rank)
+ + (rank * m_banks_per_rank)
+ + bank;
+}
+
+const int
+RubyMemoryControl::getRank(const physical_address_t addr) const
+{
+ int bank = getBank(addr);
+ int rank = (bank / m_banks_per_rank);
+ assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
+ return rank;
+}
+
+// getRank returns an integer that is unique for each rank
+// and independent of individual bank.
+const int
+RubyMemoryControl::getRank(int bank) const
+{
+ int rank = (bank / m_banks_per_rank);
+ assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
+ return rank;
+}
+
+// Not used!
+const int
+RubyMemoryControl::getChannel(const physical_address_t addr) const
+{
+ assert(false);
+ return -1;
+}
+
+// Not used!
+const int
+RubyMemoryControl::getRow(const physical_address_t addr) const
+{
+ assert(false);
+ return -1;
+}
+
+// queueReady determines if the head item in a bank queue
+// can be issued this cycle
+bool
+RubyMemoryControl::queueReady(int bank)
+{
+ if ((m_bankBusyCounter[bank] > 0) && !m_mem_fixed_delay) {
+ m_profiler_ptr->profileMemBankBusy();
+
+ DPRINTF(RubyMemory, "bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
+ return false;
+ }
+
+ if (m_mem_random_arbitrate >= 2) {
+ if ((random() % 100) < m_mem_random_arbitrate) {
+ m_profiler_ptr->profileMemRandBusy();
+ return false;
+ }
+ }
+
+ if (m_mem_fixed_delay)
+ return true;
+
+ if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) {
+ m_profiler_ptr->profileMemNotOld();
+ return false;
+ }
+
+ if (m_busBusyCounter_Basic == m_basic_bus_busy_time) {
+ // Another bank must have issued this same cycle. For
+ // profiling, we count this as an arb wait rather than a bus
+ // wait. This is a little inaccurate since it MIGHT have also
+ // been blocked waiting for a read-write or a read-read
+ // instead, but it's pretty close.
+ m_profiler_ptr->profileMemArbWait(1);
+ return false;
+ }
+
+ if (m_busBusyCounter_Basic > 0) {
+ m_profiler_ptr->profileMemBusBusy();
+ return false;
+ }
+
+ int rank = getRank(bank);
+ if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) {
+ m_profiler_ptr->profileMemTfawBusy();
+ return false;
+ }
+
+ bool write = !m_bankQueues[bank].front()->m_is_mem_read;
+ if (write && (m_busBusyCounter_Write > 0)) {
+ m_profiler_ptr->profileMemReadWriteBusy();
+ return false;
+ }
+
+ if (!write && (rank != m_busBusy_WhichRank)
+ && (m_busBusyCounter_ReadNewRank > 0)) {
+ m_profiler_ptr->profileMemDataBusBusy();
+ return false;
+ }
+
+ return true;
+}
+
+// issueRefresh checks to see if this bank has a refresh scheduled
+// and, if so, does the refresh and returns true
+bool
+RubyMemoryControl::issueRefresh(int bank)
+{
+ if (!m_need_refresh || (m_refresh_bank != bank))
+ return false;
+ if (m_bankBusyCounter[bank] > 0)
+ return false;
+ // Note that m_busBusyCounter will prevent multiple issues during
+ // the same cycle, as well as on different but close cycles:
+ if (m_busBusyCounter_Basic > 0)
+ return false;
+ int rank = getRank(bank);
+ if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW)
+ return false;
+
+ // Issue it:
+ DPRINTF(RubyMemory, "Refresh bank %3x\n", bank);
+
+ m_profiler_ptr->profileMemRefresh();
+ m_need_refresh--;
+ m_refresh_bank++;
+ if (m_refresh_bank >= m_total_banks)
+ m_refresh_bank = 0;
+ m_bankBusyCounter[bank] = m_bank_busy_time;
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
+ markTfaw(rank);
+ return true;
+}
+
+// Mark the activate in the tFaw shift register
+void
+RubyMemoryControl::markTfaw(int rank)
+{
+ if (m_tFaw) {
+ m_tfaw_shift[rank] |= (1 << (m_tFaw-1));
+ m_tfaw_count[rank]++;
+ }
+}
+
+// Issue a memory request: Activate the bank, reserve the address and
+// data buses, and queue the request for return to the requesting
+// processor after a fixed latency.
+void
+RubyMemoryControl::issueRequest(int bank)
+{
+ int rank = getRank(bank);
+ MemoryNode *req = m_bankQueues[bank].front();
+ m_bankQueues[bank].pop_front();
+
+ DPRINTF(RubyMemory, "Mem issue request%7d: %#08x %c "
+ "bank=%3x sched %c\n", req->m_msg_counter, req->m_addr,
+ req->m_is_mem_read? 'R':'W',
+ bank, m_event.scheduled() ? 'Y':'N');
+
+ if (req->m_msgptr) { // don't enqueue L3 writebacks
+ enqueueToDirectory(req, Cycles(m_mem_ctl_latency + m_mem_fixed_delay));
+ }
+ m_oldRequest[bank] = 0;
+ markTfaw(rank);
+ m_bankBusyCounter[bank] = m_bank_busy_time;
+ m_busBusy_WhichRank = rank;
+ if (req->m_is_mem_read) {
+ m_profiler_ptr->profileMemRead();
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay;
+ m_busBusyCounter_ReadNewRank =
+ m_basic_bus_busy_time + m_rank_rank_delay;
+ } else {
+ m_profiler_ptr->profileMemWrite();
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
+ }
+}
+
+// executeCycle: This function is called once per memory clock cycle
+// to simulate all the periodic hardware.
+void
+RubyMemoryControl::executeCycle()
+{
+ // Keep track of time by counting down the busy counters:
+ for (int bank=0; bank < m_total_banks; bank++) {
+ if (m_bankBusyCounter[bank] > 0) m_bankBusyCounter[bank]--;
+ }
+ if (m_busBusyCounter_Write > 0)
+ m_busBusyCounter_Write--;
+ if (m_busBusyCounter_ReadNewRank > 0)
+ m_busBusyCounter_ReadNewRank--;
+ if (m_busBusyCounter_Basic > 0)
+ m_busBusyCounter_Basic--;
+
+ // Count down the tFAW shift registers:
+ for (int rank=0; rank < m_total_ranks; rank++) {
+ if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--;
+ m_tfaw_shift[rank] >>= 1;
+ }
+
+ // After time period expires, latch an indication that we need a refresh.
+ // Disable refresh if in mem_fixed_delay mode.
+ if (!m_mem_fixed_delay) m_refresh_count--;
+ if (m_refresh_count == 0) {
+ m_refresh_count = m_refresh_period_system;
+
+ // Are we overrunning our ability to refresh?
+ assert(m_need_refresh < 10);
+ m_need_refresh++;
+ }
+
+ // If this batch of requests is all done, make a new batch:
+ m_ageCounter++;
+ int anyOld = 0;
+ for (int bank=0; bank < m_total_banks; bank++) {
+ anyOld |= m_oldRequest[bank];
+ }
+ if (!anyOld) {
+ for (int bank=0; bank < m_total_banks; bank++) {
+ if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1;
+ }
+ m_ageCounter = 0;
+ }
+
+ // If randomness desired, re-randomize round-robin position each cycle
+ if (m_mem_random_arbitrate) {
+ m_roundRobin = random() % m_total_banks;
+ }
+
+ // For each channel, scan round-robin, and pick an old, ready
+ // request and issue it. Treat a refresh request as if it were at
+ // the head of its bank queue. After we issue something, keep
+ // scanning the queues just to gather statistics about how many
+ // are waiting. If in mem_fixed_delay mode, we can issue more
+ // than one request per cycle.
+ int queueHeads = 0;
+ int banksIssued = 0;
+ for (int i = 0; i < m_total_banks; i++) {
+ m_roundRobin++;
+ if (m_roundRobin >= m_total_banks) m_roundRobin = 0;
+ issueRefresh(m_roundRobin);
+ int qs = m_bankQueues[m_roundRobin].size();
+ if (qs > 1) {
+ m_profiler_ptr->profileMemBankQ(qs-1);
+ }
+ if (qs > 0) {
+ // we're not idle if anything is queued
+ m_idleCount = IDLECOUNT_MAX_VALUE;
+ queueHeads++;
+ if (queueReady(m_roundRobin)) {
+ issueRequest(m_roundRobin);
+ banksIssued++;
+ if (m_mem_fixed_delay) {
+ m_profiler_ptr->profileMemWaitCycles(m_mem_fixed_delay);
+ }
+ }
+ }
+ }
+
+ // memWaitCycles is a redundant catch-all for the specific
+ // counters in queueReady
+ m_profiler_ptr->profileMemWaitCycles(queueHeads - banksIssued);
+
+ // Check input queue and move anything to bank queues if not full.
+ // Since this is done here at the end of the cycle, there will
+ // always be at least one cycle of latency in the bank queue. We
+ // deliberately move at most one request per cycle (to simulate
+ // typical hardware). Note that if one bank queue fills up, other
+ // requests can get stuck behind it here.
+ if (!m_input_queue.empty()) {
+ // we're not idle if anything is pending
+ m_idleCount = IDLECOUNT_MAX_VALUE;
+ MemoryNode *req = m_input_queue.front();
+ int bank = getBank(req->m_addr);
+ if (m_bankQueues[bank].size() < m_bank_queue_size) {
+ m_input_queue.pop_front();
+ m_bankQueues[bank].push_back(req);
+ }
+ m_profiler_ptr->profileMemInputQ(m_input_queue.size());
+ }
+}
+
+unsigned int
+RubyMemoryControl::drain(DrainManager *dm)
+{
+ DPRINTF(RubyMemory, "MemoryController drain\n");
+ if(m_event.scheduled()) {
+ deschedule(m_event);
+ }
+ return 0;
+}
+
+// wakeup: This function is called once per memory controller clock cycle.
+void
+RubyMemoryControl::wakeup()
+{
+ DPRINTF(RubyMemory, "MemoryController wakeup\n");
+ // execute everything
+ executeCycle();
+
+ m_idleCount--;
+ if (m_idleCount > 0) {
+ assert(!m_event.scheduled());
+ schedule(m_event, clockEdge(Cycles(1)));
+ }
+}
+
+/**
+ * This function reads the different buffers that exist in the Ruby Memory
+ * Controller, and figures out if any of the buffers hold a message that
+ * contains the data for the address provided in the packet. True is returned
+ * if any of the messages was read, otherwise false is returned.
+ *
+ * I think we should move these buffers to being message buffers, instead of
+ * being lists.
+ */
+bool
+RubyMemoryControl::functionalReadBuffers(Packet *pkt)
+{
+ for (std::list<MemoryNode *>::iterator it = m_input_queue.begin();
+ it != m_input_queue.end(); ++it) {
+ Message* msg_ptr = (*it)->m_msgptr.get();
+ if (msg_ptr->functionalRead(pkt)) {
+ return true;
+ }
+ }
+
+ for (std::list<MemoryNode *>::iterator it = m_response_queue.begin();
+ it != m_response_queue.end(); ++it) {
+ Message* msg_ptr = (*it)->m_msgptr.get();
+ if (msg_ptr->functionalRead(pkt)) {
+ return true;
+ }
+ }
+
+ for (uint32_t bank = 0; bank < m_total_banks; ++bank) {
+ for (std::list<MemoryNode *>::iterator it = m_bankQueues[bank].begin();
+ it != m_bankQueues[bank].end(); ++it) {
+ Message* msg_ptr = (*it)->m_msgptr.get();
+ if (msg_ptr->functionalRead(pkt)) {
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+/**
+ * This function reads the different buffers that exist in the Ruby Memory
+ * Controller, and figures out if any of the buffers hold a message that
+ * needs to functionally written with the data in the packet.
+ *
+ * The number of messages written is returned at the end. This is required
+ * for debugging purposes.
+ */
+uint32_t
+RubyMemoryControl::functionalWriteBuffers(Packet *pkt)
+{
+ uint32_t num_functional_writes = 0;
+
+ for (std::list<MemoryNode *>::iterator it = m_input_queue.begin();
+ it != m_input_queue.end(); ++it) {
+ Message* msg_ptr = (*it)->m_msgptr.get();
+ if (msg_ptr->functionalWrite(pkt)) {
+ num_functional_writes++;
+ }
+ }
+
+ for (std::list<MemoryNode *>::iterator it = m_response_queue.begin();
+ it != m_response_queue.end(); ++it) {
+ Message* msg_ptr = (*it)->m_msgptr.get();
+ if (msg_ptr->functionalWrite(pkt)) {
+ num_functional_writes++;
+ }
+ }
+
+ for (uint32_t bank = 0; bank < m_total_banks; ++bank) {
+ for (std::list<MemoryNode *>::iterator it = m_bankQueues[bank].begin();
+ it != m_bankQueues[bank].end(); ++it) {
+ Message* msg_ptr = (*it)->m_msgptr.get();
+ if (msg_ptr->functionalWrite(pkt)) {
+ num_functional_writes++;
+ }
+ }
+ }
+
+ return num_functional_writes;
+}
+
+void
+RubyMemoryControl::regStats()
+{
+ m_profiler_ptr->regStats();
+}
+
+RubyMemoryControl *
+RubyMemoryControlParams::create()
+{
+ return new RubyMemoryControl(this);
+}
diff --git a/src/mem/ruby/structures/RubyMemoryControl.hh b/src/mem/ruby/structures/RubyMemoryControl.hh
new file mode 100644
index 000000000..f7fb17975
--- /dev/null
+++ b/src/mem/ruby/structures/RubyMemoryControl.hh
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * Copyright (c) 2012 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__
+#define __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__
+
+#include <iostream>
+#include <list>
+#include <string>
+
+#include "mem/protocol/MemoryMsg.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/profiler/MemCntrlProfiler.hh"
+#include "mem/ruby/slicc_interface/Message.hh"
+#include "mem/ruby/structures/MemoryControl.hh"
+#include "mem/ruby/structures/MemoryNode.hh"
+#include "mem/ruby/system/System.hh"
+#include "params/RubyMemoryControl.hh"
+#include "sim/sim_object.hh"
+
+// This constant is part of the definition of tFAW; see
+// the comments in header to RubyMemoryControl.cc
+#define ACTIVATE_PER_TFAW 4
+
+//////////////////////////////////////////////////////////////////////////////
+
+class RubyMemoryControl : public MemoryControl
+{
+ public:
+ typedef RubyMemoryControlParams Params;
+ RubyMemoryControl(const Params *p);
+ void init();
+ void reset();
+
+ ~RubyMemoryControl();
+
+ unsigned int drain(DrainManager *dm);
+
+ void wakeup();
+
+ void setConsumer(Consumer* consumer_ptr);
+ Consumer* getConsumer() { return m_consumer_ptr; };
+ void setDescription(const std::string& name) { m_description = name; };
+ std::string getDescription() { return m_description; };
+
+ // Called from the directory:
+ void enqueue(const MsgPtr& message, Cycles latency);
+ void enqueueMemRef(MemoryNode *memRef);
+ void dequeue();
+ const Message* peek();
+ MemoryNode *peekNode();
+ bool isReady();
+ bool areNSlotsAvailable(int n) { return true; }; // infinite queue length
+
+ void print(std::ostream& out) const;
+ void regStats();
+
+ const int getBank(const physical_address_t addr) const;
+ const int getRank(const physical_address_t addr) const;
+
+ // not used in Ruby memory controller
+ const int getChannel(const physical_address_t addr) const;
+ const int getRow(const physical_address_t addr) const;
+
+ //added by SS
+ int getBanksPerRank() { return m_banks_per_rank; };
+ int getRanksPerDimm() { return m_ranks_per_dimm; };
+ int getDimmsPerChannel() { return m_dimms_per_channel; }
+
+ bool functionalReadBuffers(Packet *pkt);
+ uint32_t functionalWriteBuffers(Packet *pkt);
+
+ private:
+ void enqueueToDirectory(MemoryNode *req, Cycles latency);
+ const int getRank(int bank) const;
+ bool queueReady(int bank);
+ void issueRequest(int bank);
+ bool issueRefresh(int bank);
+ void markTfaw(int rank);
+ void executeCycle();
+
+ // Private copy constructor and assignment operator
+ RubyMemoryControl (const RubyMemoryControl& obj);
+ RubyMemoryControl& operator=(const RubyMemoryControl& obj);
+
+ // data members
+ Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
+ std::string m_description;
+ int m_msg_counter;
+
+ int m_banks_per_rank;
+ int m_ranks_per_dimm;
+ int m_dimms_per_channel;
+ int m_bank_bit_0;
+ int m_rank_bit_0;
+ int m_dimm_bit_0;
+ unsigned int m_bank_queue_size;
+ int m_bank_busy_time;
+ int m_rank_rank_delay;
+ int m_read_write_delay;
+ int m_basic_bus_busy_time;
+ Cycles m_mem_ctl_latency;
+ int m_refresh_period;
+ int m_mem_random_arbitrate;
+ int m_tFaw;
+ Cycles m_mem_fixed_delay;
+
+ int m_total_banks;
+ int m_total_ranks;
+ int m_refresh_period_system;
+
+ // queues where memory requests live
+ std::list<MemoryNode *> m_response_queue;
+ std::list<MemoryNode *> m_input_queue;
+ std::list<MemoryNode *>* m_bankQueues;
+
+ // Each entry indicates number of address-bus cycles until bank
+ // is reschedulable:
+ int* m_bankBusyCounter;
+ int* m_oldRequest;
+
+ uint64* m_tfaw_shift;
+ int* m_tfaw_count;
+
+ // Each of these indicates number of address-bus cycles until
+ // we can issue a new request of the corresponding type:
+ int m_busBusyCounter_Write;
+ int m_busBusyCounter_ReadNewRank;
+ int m_busBusyCounter_Basic;
+
+ int m_busBusy_WhichRank; // which rank last granted
+ int m_roundRobin; // which bank queue was last granted
+ int m_refresh_count; // cycles until next refresh
+ int m_need_refresh; // set whenever m_refresh_count goes to zero
+ int m_refresh_bank; // which bank to refresh next
+ int m_ageCounter; // age of old requests; to detect starvation
+ int m_idleCount; // watchdog timer for shutting down
+
+ MemCntrlProfiler* m_profiler_ptr;
+};
+
+std::ostream& operator<<(std::ostream& out, const RubyMemoryControl& obj);
+
+#endif // __MEM_RUBY_SYSTEM_MEMORY_CONTROL_HH__
diff --git a/src/mem/ruby/structures/RubyMemoryControl.py b/src/mem/ruby/structures/RubyMemoryControl.py
new file mode 100644
index 000000000..f0828fb19
--- /dev/null
+++ b/src/mem/ruby/structures/RubyMemoryControl.py
@@ -0,0 +1,55 @@
+# Copyright (c) 2009 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Steve Reinhardt
+# Brad Beckmann
+
+from m5.params import *
+from m5.SimObject import SimObject
+from MemoryControl import MemoryControl
+
+class RubyMemoryControl(MemoryControl):
+ type = 'RubyMemoryControl'
+ cxx_class = 'RubyMemoryControl'
+ cxx_header = "mem/ruby/structures/RubyMemoryControl.hh"
+ version = Param.Int("");
+
+ banks_per_rank = Param.Int(8, "");
+ ranks_per_dimm = Param.Int(2, "");
+ dimms_per_channel = Param.Int(2, "");
+ bank_bit_0 = Param.Int(8, "");
+ rank_bit_0 = Param.Int(11, "");
+ dimm_bit_0 = Param.Int(12, "");
+ bank_queue_size = Param.Int(12, "");
+ bank_busy_time = Param.Int(11, "");
+ rank_rank_delay = Param.Int(1, "");
+ read_write_delay = Param.Int(2, "");
+ basic_bus_busy_time = Param.Int(2, "");
+ mem_ctl_latency = Param.Cycles(12, "");
+ refresh_period = Param.Cycles(1560, "");
+ tFaw = Param.Int(0, "");
+ mem_random_arbitrate = Param.Int(0, "");
+ mem_fixed_delay = Param.Cycles(0, "");
diff --git a/src/mem/ruby/structures/SConscript b/src/mem/ruby/structures/SConscript
index 170f61e88..a5abbf449 100644
--- a/src/mem/ruby/structures/SConscript
+++ b/src/mem/ruby/structures/SConscript
@@ -33,5 +33,21 @@ Import('*')
if env['PROTOCOL'] == 'None':
Return()
+SimObject('Cache.py')
+SimObject('DirectoryMemory.py')
+SimObject('MemoryControl.py')
+SimObject('RubyMemoryControl.py')
SimObject('RubyPrefetcher.py')
+SimObject('WireBuffer.py')
+
+Source('DirectoryMemory.cc')
+Source('SparseMemory.cc')
+Source('CacheMemory.cc')
+Source('MemoryControl.cc')
+Source('WireBuffer.cc')
+Source('RubyMemoryControl.cc')
+Source('MemoryNode.cc')
+Source('PersistentTable.cc')
Source('Prefetcher.cc')
+Source('TimerTable.cc')
+Source('BankedArray.cc')
diff --git a/src/mem/ruby/structures/SparseMemory.cc b/src/mem/ruby/structures/SparseMemory.cc
new file mode 100644
index 000000000..a63790502
--- /dev/null
+++ b/src/mem/ruby/structures/SparseMemory.cc
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * Copyright (c) 2012 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <queue>
+
+#include "debug/RubyCache.hh"
+#include "mem/ruby/structures/SparseMemory.hh"
+#include "mem/ruby/system/System.hh"
+
+using namespace std;
+
+SparseMemory::SparseMemory(int number_of_levels)
+{
+ int even_level_bits;
+ int extra;
+ m_total_number_of_bits = RubySystem::getMemorySizeBits()
+ - RubySystem::getBlockSizeBits();;
+
+ m_number_of_levels = number_of_levels;
+
+ //
+ // Create the array that describes the bits per level
+ //
+ m_number_of_bits_per_level = new int[m_number_of_levels];
+ even_level_bits = m_total_number_of_bits / m_number_of_levels;
+ extra = m_total_number_of_bits % m_number_of_levels;
+ for (int level = 0; level < m_number_of_levels; level++) {
+ if (level < extra)
+ m_number_of_bits_per_level[level] = even_level_bits + 1;
+ else
+ m_number_of_bits_per_level[level] = even_level_bits;
+ }
+ m_map_head = new SparseMapType;
+}
+
+SparseMemory::~SparseMemory()
+{
+ recursivelyRemoveTables(m_map_head, 0);
+ delete m_map_head;
+ delete [] m_number_of_bits_per_level;
+}
+
+// Recursively search table hierarchy for the lowest level table.
+// Delete the lowest table first, the tables above
+void
+SparseMemory::recursivelyRemoveTables(SparseMapType* curTable, int curLevel)
+{
+ SparseMapType::iterator iter;
+
+ for (iter = curTable->begin(); iter != curTable->end(); iter++) {
+ SparseMemEntry entry = (*iter).second;
+
+ if (curLevel != (m_number_of_levels - 1)) {
+ // If the not at the last level, analyze those lower level
+ // tables first, then delete those next tables
+ SparseMapType* nextTable = (SparseMapType*)(entry);
+ recursivelyRemoveTables(nextTable, (curLevel + 1));
+ delete nextTable;
+ } else {
+ // If at the last level, delete the directory entry
+ delete (AbstractEntry*)(entry);
+ }
+ entry = NULL;
+ }
+
+ // Once all entries have been deleted, erase the entries
+ curTable->erase(curTable->begin(), curTable->end());
+}
+
+// tests to see if an address is present in the memory
+bool
+SparseMemory::exist(const Address& address) const
+{
+ SparseMapType* curTable = m_map_head;
+ Address curAddress;
+
+ // Initiallize the high bit to be the total number of bits plus
+ // the block offset. However the highest bit index is one less
+ // than this value.
+ int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
+ int lowBit;
+ assert(address == line_address(address));
+ DPRINTF(RubyCache, "address: %s\n", address);
+
+ for (int level = 0; level < m_number_of_levels; level++) {
+ // Create the appropriate sub address for this level
+ // Note: that set Address is inclusive of the specified range,
+ // thus the high bit is one less than the total number of bits
+ // used to create the address.
+ lowBit = highBit - m_number_of_bits_per_level[level];
+ curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
+
+ DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, "
+ "curAddress: %s\n",
+ level, lowBit, highBit - 1, curAddress);
+
+ // Adjust the highBit value for the next level
+ highBit -= m_number_of_bits_per_level[level];
+
+ // If the address is found, move on to the next level.
+ // Otherwise, return not found
+ if (curTable->count(curAddress) != 0) {
+ curTable = (SparseMapType*)((*curTable)[curAddress]);
+ } else {
+ DPRINTF(RubyCache, "Not found\n");
+ return false;
+ }
+ }
+
+ DPRINTF(RubyCache, "Entry found\n");
+ return true;
+}
+
+// add an address to memory
+void
+SparseMemory::add(const Address& address, AbstractEntry* entry)
+{
+ assert(address == line_address(address));
+ assert(!exist(address));
+
+ m_total_adds++;
+
+ Address curAddress;
+ SparseMapType* curTable = m_map_head;
+
+ // Initiallize the high bit to be the total number of bits plus
+ // the block offset. However the highest bit index is one less
+ // than this value.
+ int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
+ int lowBit;
+ void* newEntry = NULL;
+
+ for (int level = 0; level < m_number_of_levels; level++) {
+ // create the appropriate address for this level
+ // Note: that set Address is inclusive of the specified range,
+ // thus the high bit is one less than the total number of bits
+ // used to create the address.
+ lowBit = highBit - m_number_of_bits_per_level[level];
+ curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
+
+ // Adjust the highBit value for the next level
+ highBit -= m_number_of_bits_per_level[level];
+
+ // if the address exists in the cur table, move on. Otherwise
+ // create a new table.
+ if (curTable->count(curAddress) != 0) {
+ curTable = (SparseMapType*)((*curTable)[curAddress]);
+ } else {
+ m_adds_per_level[level]++;
+
+ // if the last level, add a directory entry. Otherwise add a map.
+ if (level == (m_number_of_levels - 1)) {
+ entry->getDataBlk().clear();
+ newEntry = (void*)entry;
+ } else {
+ SparseMapType* tempMap = new SparseMapType;
+ newEntry = (void*)(tempMap);
+ }
+
+ // Create the pointer container SparseMemEntry and add it
+ // to the table.
+ (*curTable)[curAddress] = newEntry;
+
+ // Move to the next level of the heirarchy
+ curTable = (SparseMapType*)newEntry;
+ }
+ }
+
+ assert(exist(address));
+ return;
+}
+
+// recursively search table hierarchy for the lowest level table.
+// remove the lowest entry and any empty tables above it.
+int
+SparseMemory::recursivelyRemoveLevels(const Address& address,
+ CurNextInfo& curInfo)
+{
+ Address curAddress;
+ CurNextInfo nextInfo;
+ SparseMemEntry entry;
+
+ // create the appropriate address for this level
+ // Note: that set Address is inclusive of the specified range,
+ // thus the high bit is one less than the total number of bits
+ // used to create the address.
+ curAddress.setAddress(address.bitSelect(curInfo.lowBit,
+ curInfo.highBit - 1));
+
+ DPRINTF(RubyCache, "address: %s, curInfo.level: %d, curInfo.lowBit: %d, "
+ "curInfo.highBit - 1: %d, curAddress: %s\n",
+ address, curInfo.level, curInfo.lowBit,
+ curInfo.highBit - 1, curAddress);
+
+ assert(curInfo.curTable->count(curAddress) != 0);
+
+ entry = (*(curInfo.curTable))[curAddress];
+
+ if (curInfo.level < (m_number_of_levels - 1)) {
+ // set up next level's info
+ nextInfo.curTable = (SparseMapType*)(entry);
+ nextInfo.level = curInfo.level + 1;
+
+ nextInfo.highBit = curInfo.highBit -
+ m_number_of_bits_per_level[curInfo.level];
+
+ nextInfo.lowBit = curInfo.lowBit -
+ m_number_of_bits_per_level[curInfo.level + 1];
+
+ // recursively search the table hierarchy
+ int tableSize = recursivelyRemoveLevels(address, nextInfo);
+
+ // If this table below is now empty, we must delete it and
+ // erase it from our table.
+ if (tableSize == 0) {
+ m_removes_per_level[curInfo.level]++;
+ delete nextInfo.curTable;
+ entry = NULL;
+ curInfo.curTable->erase(curAddress);
+ }
+ } else {
+ // if this is the last level, we have reached the Directory
+ // Entry and thus we should delete it including the
+ // SparseMemEntry container struct.
+ delete (AbstractEntry*)(entry);
+ entry = NULL;
+ curInfo.curTable->erase(curAddress);
+ m_removes_per_level[curInfo.level]++;
+ }
+ return curInfo.curTable->size();
+}
+
+// remove an entry from the table
+void
+SparseMemory::remove(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(exist(address));
+
+ m_total_removes++;
+
+ CurNextInfo nextInfo;
+
+ // Initialize table pointer and level value
+ nextInfo.curTable = m_map_head;
+ nextInfo.level = 0;
+
+ // Initiallize the high bit to be the total number of bits plus
+ // the block offset. However the highest bit index is one less
+ // than this value.
+ nextInfo.highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
+ nextInfo.lowBit = nextInfo.highBit - m_number_of_bits_per_level[0];;
+
+ // recursively search the table hierarchy for empty tables
+ // starting from the level 0. Note we do not check the return
+ // value because the head table is never deleted;
+ recursivelyRemoveLevels(address, nextInfo);
+
+ assert(!exist(address));
+ return;
+}
+
+// looks an address up in memory
+AbstractEntry*
+SparseMemory::lookup(const Address& address)
+{
+ assert(address == line_address(address));
+
+ Address curAddress;
+ SparseMapType* curTable = m_map_head;
+ AbstractEntry* entry = NULL;
+
+ // Initiallize the high bit to be the total number of bits plus
+ // the block offset. However the highest bit index is one less
+ // than this value.
+ int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
+ int lowBit;
+
+ for (int level = 0; level < m_number_of_levels; level++) {
+ // create the appropriate address for this level
+ // Note: that set Address is inclusive of the specified range,
+ // thus the high bit is one less than the total number of bits
+ // used to create the address.
+ lowBit = highBit - m_number_of_bits_per_level[level];
+ curAddress.setAddress(address.bitSelect(lowBit, highBit - 1));
+
+ DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, "
+ "curAddress: %s\n",
+ level, lowBit, highBit - 1, curAddress);
+
+ // Adjust the highBit value for the next level
+ highBit -= m_number_of_bits_per_level[level];
+
+ // If the address is found, move on to the next level.
+ // Otherwise, return not found
+ if (curTable->count(curAddress) != 0) {
+ curTable = (SparseMapType*)((*curTable)[curAddress]);
+ } else {
+ DPRINTF(RubyCache, "Not found\n");
+ return NULL;
+ }
+ }
+
+ // The last entry actually points to the Directory entry not a table
+ entry = (AbstractEntry*)curTable;
+
+ return entry;
+}
+
+void
+SparseMemory::recordBlocks(int cntrl_id, CacheRecorder* tr) const
+{
+ queue<SparseMapType*> unexplored_nodes[2];
+ queue<physical_address_t> address_of_nodes[2];
+
+ unexplored_nodes[0].push(m_map_head);
+ address_of_nodes[0].push(0);
+
+ int parity_of_level = 0;
+ physical_address_t address, temp_address;
+ Address curAddress;
+
+ // Initiallize the high bit to be the total number of bits plus
+ // the block offset. However the highest bit index is one less
+ // than this value.
+ int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits();
+ int lowBit;
+
+ for (int cur_level = 0; cur_level < m_number_of_levels; cur_level++) {
+
+ // create the appropriate address for this level
+ // Note: that set Address is inclusive of the specified range,
+ // thus the high bit is one less than the total number of bits
+ // used to create the address.
+ lowBit = highBit - m_number_of_bits_per_level[cur_level];
+
+ while (!unexplored_nodes[parity_of_level].empty()) {
+
+ SparseMapType* node = unexplored_nodes[parity_of_level].front();
+ unexplored_nodes[parity_of_level].pop();
+
+ address = address_of_nodes[parity_of_level].front();
+ address_of_nodes[parity_of_level].pop();
+
+ SparseMapType::iterator iter;
+
+ for (iter = node->begin(); iter != node->end(); iter++) {
+ SparseMemEntry entry = (*iter).second;
+ curAddress = (*iter).first;
+
+ if (cur_level != (m_number_of_levels - 1)) {
+ // If not at the last level, put this node in the queue
+ unexplored_nodes[1 - parity_of_level].push(
+ (SparseMapType*)(entry));
+ address_of_nodes[1 - parity_of_level].push(address |
+ (curAddress.getAddress() << lowBit));
+ } else {
+ // If at the last level, add a trace record
+ temp_address = address | (curAddress.getAddress()
+ << lowBit);
+ DataBlock block = ((AbstractEntry*)entry)->getDataBlk();
+ tr->addRecord(cntrl_id, temp_address, 0, RubyRequestType_ST, 0,
+ block);
+ }
+ }
+ }
+
+ // Adjust the highBit value for the next level
+ highBit -= m_number_of_bits_per_level[cur_level];
+ parity_of_level = 1 - parity_of_level;
+ }
+}
+
+void
+SparseMemory::regStats(const string &name)
+{
+ m_total_adds.name(name + ".total_adds");
+
+ m_adds_per_level
+ .init(m_number_of_levels)
+ .name(name + ".adds_per_level")
+ .flags(Stats::pdf | Stats::total)
+ ;
+
+ m_total_removes.name(name + ".total_removes");
+ m_removes_per_level
+ .init(m_number_of_levels)
+ .name(name + ".removes_per_level")
+ .flags(Stats::pdf | Stats::total)
+ ;
+}
diff --git a/src/mem/ruby/structures/SparseMemory.hh b/src/mem/ruby/structures/SparseMemory.hh
new file mode 100644
index 000000000..9d3c6a844
--- /dev/null
+++ b/src/mem/ruby/structures/SparseMemory.hh
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2009 Advanced Micro Devices, Inc.
+ * Copyright (c) 2012 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__
+#define __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__
+
+#include <iostream>
+#include <string>
+
+#include "base/hashmap.hh"
+#include "base/statistics.hh"
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/slicc_interface/AbstractEntry.hh"
+#include "mem/ruby/system/CacheRecorder.hh"
+
+typedef void* SparseMemEntry;
+typedef m5::hash_map<Address, SparseMemEntry> SparseMapType;
+
+struct CurNextInfo
+{
+ SparseMapType* curTable;
+ int level;
+ int highBit;
+ int lowBit;
+};
+
+class SparseMemory
+{
+ public:
+ SparseMemory(int number_of_levels);
+ ~SparseMemory();
+
+ bool exist(const Address& address) const;
+ void add(const Address& address, AbstractEntry*);
+ void remove(const Address& address);
+
+ /*!
+ * Function for recording the contents of memory. This function walks
+ * through all the levels of the sparse memory in a breadth first
+ * fashion. This might need more memory than a depth first approach.
+ * But breadth first seems easier to me than a depth first approach.
+ */
+ void recordBlocks(int cntrl_id, CacheRecorder *) const;
+
+ AbstractEntry* lookup(const Address& address);
+ void regStats(const std::string &name);
+
+ private:
+ // Private copy constructor and assignment operator
+ SparseMemory(const SparseMemory& obj);
+ SparseMemory& operator=(const SparseMemory& obj);
+
+ // Used by destructor to recursively remove all tables
+ void recursivelyRemoveTables(SparseMapType* currentTable, int level);
+
+ // recursive search for address and remove associated entries
+ int recursivelyRemoveLevels(const Address& address, CurNextInfo& curInfo);
+
+ // Data Members (m_prefix)
+ SparseMapType* m_map_head;
+
+ int m_total_number_of_bits;
+ int m_number_of_levels;
+ int* m_number_of_bits_per_level;
+
+ Stats::Scalar m_total_adds;
+ Stats::Vector m_adds_per_level;
+ Stats::Scalar m_total_removes;
+ Stats::Vector m_removes_per_level;
+};
+
+#endif // __MEM_RUBY_SYSTEM_SPARSEMEMORY_HH__
diff --git a/src/mem/ruby/structures/TBETable.hh b/src/mem/ruby/structures/TBETable.hh
new file mode 100644
index 000000000..018da6cbb
--- /dev/null
+++ b/src/mem/ruby/structures/TBETable.hh
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_TBETABLE_HH__
+#define __MEM_RUBY_SYSTEM_TBETABLE_HH__
+
+#include <iostream>
+
+#include "base/hashmap.hh"
+#include "mem/ruby/common/Address.hh"
+
+template<class ENTRY>
+class TBETable
+{
+ public:
+ TBETable(int number_of_TBEs)
+ : m_number_of_TBEs(number_of_TBEs)
+ {
+ }
+
+ bool isPresent(const Address& address) const;
+ void allocate(const Address& address);
+ void deallocate(const Address& address);
+ bool
+ areNSlotsAvailable(int n) const
+ {
+ return (m_number_of_TBEs - m_map.size()) >= n;
+ }
+
+ ENTRY* lookup(const Address& address);
+
+ // Print cache contents
+ void print(std::ostream& out) const;
+
+ private:
+ // Private copy constructor and assignment operator
+ TBETable(const TBETable& obj);
+ TBETable& operator=(const TBETable& obj);
+
+ // Data Members (m_prefix)
+ m5::hash_map<Address, ENTRY> m_map;
+
+ private:
+ int m_number_of_TBEs;
+};
+
+template<class ENTRY>
+inline std::ostream&
+operator<<(std::ostream& out, const TBETable<ENTRY>& obj)
+{
+ obj.print(out);
+ out << std::flush;
+ return out;
+}
+
+template<class ENTRY>
+inline bool
+TBETable<ENTRY>::isPresent(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map.size() <= m_number_of_TBEs);
+ return !!m_map.count(address);
+}
+
+template<class ENTRY>
+inline void
+TBETable<ENTRY>::allocate(const Address& address)
+{
+ assert(!isPresent(address));
+ assert(m_map.size() < m_number_of_TBEs);
+ m_map[address] = ENTRY();
+}
+
+template<class ENTRY>
+inline void
+TBETable<ENTRY>::deallocate(const Address& address)
+{
+ assert(isPresent(address));
+ assert(m_map.size() > 0);
+ m_map.erase(address);
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+inline ENTRY*
+TBETable<ENTRY>::lookup(const Address& address)
+{
+ if(m_map.find(address) != m_map.end()) return &(m_map.find(address)->second);
+ return NULL;
+}
+
+
+template<class ENTRY>
+inline void
+TBETable<ENTRY>::print(std::ostream& out) const
+{
+}
+
+#endif // __MEM_RUBY_SYSTEM_TBETABLE_HH__
diff --git a/src/mem/ruby/structures/TimerTable.cc b/src/mem/ruby/structures/TimerTable.cc
new file mode 100644
index 000000000..84c096b05
--- /dev/null
+++ b/src/mem/ruby/structures/TimerTable.cc
@@ -0,0 +1,129 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/structures/TimerTable.hh"
+#include "mem/ruby/system/System.hh"
+
+TimerTable::TimerTable()
+ : m_next_time(0)
+{
+ m_consumer_ptr = NULL;
+ m_clockobj_ptr = NULL;
+
+ m_next_valid = false;
+ m_next_address = Address(0);
+}
+
+bool
+TimerTable::isReady() const
+{
+ if (m_map.empty())
+ return false;
+
+ if (!m_next_valid) {
+ updateNext();
+ }
+ assert(m_next_valid);
+ return (m_clockobj_ptr->curCycle() >= m_next_time);
+}
+
+const Address&
+TimerTable::readyAddress() const
+{
+ assert(isReady());
+
+ if (!m_next_valid) {
+ updateNext();
+ }
+ assert(m_next_valid);
+ return m_next_address;
+}
+
+void
+TimerTable::set(const Address& address, Cycles relative_latency)
+{
+ assert(address == line_address(address));
+ assert(relative_latency > 0);
+ assert(!m_map.count(address));
+
+ Cycles ready_time = m_clockobj_ptr->curCycle() + relative_latency;
+ m_map[address] = ready_time;
+ assert(m_consumer_ptr != NULL);
+ m_consumer_ptr->
+ scheduleEventAbsolute(m_clockobj_ptr->clockPeriod() * ready_time);
+ m_next_valid = false;
+
+ // Don't always recalculate the next ready address
+ if (ready_time <= m_next_time) {
+ m_next_valid = false;
+ }
+}
+
+void
+TimerTable::unset(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(m_map.count(address));
+ m_map.erase(address);
+
+ // Don't always recalculate the next ready address
+ if (address == m_next_address) {
+ m_next_valid = false;
+ }
+}
+
+void
+TimerTable::print(std::ostream& out) const
+{
+}
+
+void
+TimerTable::updateNext() const
+{
+ if (m_map.empty()) {
+ assert(!m_next_valid);
+ return;
+ }
+
+ AddressMap::const_iterator i = m_map.begin();
+ AddressMap::const_iterator end = m_map.end();
+
+ m_next_address = i->first;
+ m_next_time = i->second;
+ ++i;
+
+ for (; i != end; ++i) {
+ if (i->second < m_next_time) {
+ m_next_address = i->first;
+ m_next_time = i->second;
+ }
+ }
+
+ m_next_valid = true;
+}
diff --git a/src/mem/ruby/structures/TimerTable.hh b/src/mem/ruby/structures/TimerTable.hh
new file mode 100644
index 000000000..b271d3e37
--- /dev/null
+++ b/src/mem/ruby/structures/TimerTable.hh
@@ -0,0 +1,107 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_TIMERTABLE_HH__
+#define __MEM_RUBY_SYSTEM_TIMERTABLE_HH__
+
+#include <cassert>
+#include <iostream>
+#include <map>
+#include <string>
+
+#include "mem/ruby/common/Address.hh"
+#include "mem/ruby/common/Consumer.hh"
+
+class TimerTable
+{
+ public:
+ TimerTable();
+
+ void
+ setConsumer(Consumer* consumer_ptr)
+ {
+ assert(m_consumer_ptr == NULL);
+ m_consumer_ptr = consumer_ptr;
+ }
+
+ void setClockObj(ClockedObject* obj)
+ {
+ assert(m_clockobj_ptr == NULL);
+ m_clockobj_ptr = obj;
+ }
+
+ void
+ setDescription(const std::string& name)
+ {
+ m_name = name;
+ }
+
+ bool isReady() const;
+ const Address& readyAddress() const;
+ bool isSet(const Address& address) const { return !!m_map.count(address); }
+ void set(const Address& address, Cycles relative_latency);
+ void set(const Address& address, uint64_t relative_latency)
+ { set(address, Cycles(relative_latency)); }
+
+ void unset(const Address& address);
+ void print(std::ostream& out) const;
+
+ private:
+ void updateNext() const;
+
+ // Private copy constructor and assignment operator
+ TimerTable(const TimerTable& obj);
+ TimerTable& operator=(const TimerTable& obj);
+
+ // Data Members (m_prefix)
+
+ // use a std::map for the address map as this container is sorted
+ // and ensures a well-defined iteration order
+ typedef std::map<Address, Cycles> AddressMap;
+ AddressMap m_map;
+ mutable bool m_next_valid;
+ mutable Cycles m_next_time; // Only valid if m_next_valid is true
+ mutable Address m_next_address; // Only valid if m_next_valid is true
+
+ //! Object used for querying time.
+ ClockedObject* m_clockobj_ptr;
+ //! Consumer to signal a wakeup()
+ Consumer* m_consumer_ptr;
+
+ std::string m_name;
+};
+
+inline std::ostream&
+operator<<(std::ostream& out, const TimerTable& obj)
+{
+ obj.print(out);
+ out << std::flush;
+ return out;
+}
+
+#endif // __MEM_RUBY_SYSTEM_TIMERTABLE_HH__
diff --git a/src/mem/ruby/structures/WireBuffer.cc b/src/mem/ruby/structures/WireBuffer.cc
new file mode 100644
index 000000000..702a53f16
--- /dev/null
+++ b/src/mem/ruby/structures/WireBuffer.cc
@@ -0,0 +1,158 @@
+/*
+ * Copyright (c) 2010 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Lisa Hsu
+ *
+ */
+
+#include <algorithm>
+#include <functional>
+
+#include "base/cprintf.hh"
+#include "base/stl_helpers.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/structures/WireBuffer.hh"
+#include "mem/ruby/system/System.hh"
+
+using namespace std;
+
+// Output operator definition
+
+ostream&
+operator<<(ostream& out, const WireBuffer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+// CONSTRUCTOR
+WireBuffer::WireBuffer(const Params *p)
+ : SimObject(p)
+{
+ m_msg_counter = 0;
+}
+
+void
+WireBuffer::init()
+{
+}
+
+WireBuffer::~WireBuffer()
+{
+}
+
+void
+WireBuffer::enqueue(MsgPtr message, Cycles latency)
+{
+ m_msg_counter++;
+ Cycles current_time = g_system_ptr->curCycle();
+ Cycles arrival_time = current_time + latency;
+ assert(arrival_time > current_time);
+
+ MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
+ m_message_queue.push_back(thisNode);
+ if (m_consumer_ptr != NULL) {
+ m_consumer_ptr->
+ scheduleEventAbsolute(g_system_ptr->clockPeriod() * arrival_time);
+ } else {
+ panic("No Consumer for WireBuffer! %s\n", *this);
+ }
+}
+
+void
+WireBuffer::dequeue()
+{
+ assert(isReady());
+ pop_heap(m_message_queue.begin(), m_message_queue.end(),
+ greater<MessageBufferNode>());
+ m_message_queue.pop_back();
+}
+
+const Message*
+WireBuffer::peek()
+{
+ MessageBufferNode node = peekNode();
+ Message* msg_ptr = node.m_msgptr.get();
+ assert(msg_ptr != NULL);
+ return msg_ptr;
+}
+
+MessageBufferNode
+WireBuffer::peekNode()
+{
+ assert(isReady());
+ MessageBufferNode req = m_message_queue.front();
+ return req;
+}
+
+void
+WireBuffer::recycle()
+{
+ // Because you don't want anything reordered, make sure the recycle latency
+ // is just 1 cycle. As a result, you really want to use this only in
+ // Wire-like situations because you don't want to deadlock as a result of
+ // being stuck behind something if you're not actually supposed to.
+ assert(isReady());
+ MessageBufferNode node = m_message_queue.front();
+ pop_heap(m_message_queue.begin(), m_message_queue.end(),
+ greater<MessageBufferNode>());
+
+ node.m_time = g_system_ptr->curCycle() + Cycles(1);
+ m_message_queue.back() = node;
+ push_heap(m_message_queue.begin(), m_message_queue.end(),
+ greater<MessageBufferNode>());
+ m_consumer_ptr->
+ scheduleEventAbsolute(g_system_ptr->clockPeriod() * node.m_time);
+}
+
+bool
+WireBuffer::isReady()
+{
+ return ((!m_message_queue.empty()) &&
+ (m_message_queue.front().m_time <= g_system_ptr->curCycle()));
+}
+
+void
+WireBuffer::print(ostream& out) const
+{
+}
+
+void
+WireBuffer::wakeup()
+{
+}
+
+WireBuffer *
+RubyWireBufferParams::create()
+{
+ return new WireBuffer(this);
+}
+
diff --git a/src/mem/ruby/structures/WireBuffer.hh b/src/mem/ruby/structures/WireBuffer.hh
new file mode 100644
index 000000000..6dee01ae0
--- /dev/null
+++ b/src/mem/ruby/structures/WireBuffer.hh
@@ -0,0 +1,102 @@
+/*
+ * Copyright (c) 2010 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Author: Lisa Hsu
+ *
+ */
+
+#ifndef __MEM_RUBY_SYSTEM_WIREBUFFER_HH__
+#define __MEM_RUBY_SYSTEM_WIREBUFFER_HH__
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include "mem/ruby/common/Consumer.hh"
+#include "mem/ruby/network/MessageBufferNode.hh"
+#include "params/RubyWireBuffer.hh"
+#include "sim/sim_object.hh"
+
+//////////////////////////////////////////////////////////////////////////////
+// This object was written to literally mimic a Wire in Ruby, in the sense
+// that there is no way for messages to get reordered en route on the WireBuffer.
+// With Message Buffers, even if randomization is off and ordered is on,
+// messages can arrive in different orders than they were sent because of
+// network issues. This mimics a Wire, such that that is not possible. This can
+// allow for messages between closely coupled controllers that are not actually
+// separated by a network in real systems to simplify coherence.
+/////////////////////////////////////////////////////////////////////////////
+
+class Message;
+
+class WireBuffer : public SimObject
+{
+ public:
+ typedef RubyWireBufferParams Params;
+ WireBuffer(const Params *p);
+ void init();
+
+ ~WireBuffer();
+
+ void wakeup();
+
+ void setConsumer(Consumer* consumer_ptr)
+ {
+ m_consumer_ptr = consumer_ptr;
+ }
+ Consumer* getConsumer() { return m_consumer_ptr; };
+ void setDescription(const std::string& name) { m_description = name; };
+ std::string getDescription() { return m_description; };
+
+ void enqueue(MsgPtr message, Cycles latency);
+ void dequeue();
+ const Message* peek();
+ MessageBufferNode peekNode();
+ void recycle();
+ bool isReady();
+ bool areNSlotsAvailable(int n) { return true; }; // infinite queue length
+
+ void print(std::ostream& out) const;
+ uint64_t m_msg_counter;
+
+ private:
+ // Private copy constructor and assignment operator
+ WireBuffer (const WireBuffer& obj);
+ WireBuffer& operator=(const WireBuffer& obj);
+
+ // data members
+ Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
+ std::string m_description;
+
+ // queues where memory requests live
+ std::vector<MessageBufferNode> m_message_queue;
+
+};
+
+std::ostream& operator<<(std::ostream& out, const WireBuffer& obj);
+
+#endif // __MEM_RUBY_SYSTEM_WireBuffer_HH__
diff --git a/src/mem/ruby/structures/WireBuffer.py b/src/mem/ruby/structures/WireBuffer.py
new file mode 100644
index 000000000..441947adf
--- /dev/null
+++ b/src/mem/ruby/structures/WireBuffer.py
@@ -0,0 +1,35 @@
+# Copyright (c) 2010 Advanced Micro Devices, Inc.
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Author: Lisa Hsu
+
+from m5.params import *
+from m5.SimObject import SimObject
+
+class RubyWireBuffer(SimObject):
+ type = 'RubyWireBuffer'
+ cxx_class = 'WireBuffer'
+ cxx_header = "mem/ruby/structures/WireBuffer.hh"