summaryrefslogtreecommitdiff
path: root/src/mem/ruby/system
diff options
context:
space:
mode:
authorNathan Binkert <nate@binkert.org>2009-05-11 10:38:43 -0700
committerNathan Binkert <nate@binkert.org>2009-05-11 10:38:43 -0700
commit2f30950143cc70bc42a3c8a4111d7cf8198ec881 (patch)
tree708f6c22edb3c6feb31dd82866c26623a5329580 /src/mem/ruby/system
parentc70241810d4e4f523f173c1646b008dc40faad8e (diff)
downloadgem5-2f30950143cc70bc42a3c8a4111d7cf8198ec881.tar.xz
ruby: Import ruby and slicc from GEMS
We eventually plan to replace the m5 cache hierarchy with the GEMS hierarchy, but for now we will make both live alongside eachother.
Diffstat (limited to 'src/mem/ruby/system')
-rw-r--r--src/mem/ruby/system/AbstractBloomFilter.hh72
-rw-r--r--src/mem/ruby/system/AbstractMemOrCache.hh42
-rw-r--r--src/mem/ruby/system/AbstractReplacementPolicy.hh62
-rw-r--r--src/mem/ruby/system/BlockBloomFilter.cc147
-rw-r--r--src/mem/ruby/system/BlockBloomFilter.hh83
-rw-r--r--src/mem/ruby/system/BulkBloomFilter.cc233
-rw-r--r--src/mem/ruby/system/BulkBloomFilter.hh88
-rw-r--r--src/mem/ruby/system/CacheMemory.hh559
-rw-r--r--src/mem/ruby/system/DirectoryMemory.cc175
-rw-r--r--src/mem/ruby/system/DirectoryMemory.hh91
-rw-r--r--src/mem/ruby/system/GenericBloomFilter.cc154
-rw-r--r--src/mem/ruby/system/GenericBloomFilter.hh96
-rw-r--r--src/mem/ruby/system/H3BloomFilter.cc210
-rw-r--r--src/mem/ruby/system/H3BloomFilter.hh1259
-rw-r--r--src/mem/ruby/system/LRUPolicy.hh65
-rw-r--r--src/mem/ruby/system/LSB_CountingBloomFilter.cc141
-rw-r--r--src/mem/ruby/system/LSB_CountingBloomFilter.hh83
-rw-r--r--src/mem/ruby/system/MachineID.hh89
-rw-r--r--src/mem/ruby/system/MemoryControl.cc632
-rw-r--r--src/mem/ruby/system/MemoryControl.hh176
-rw-r--r--src/mem/ruby/system/MemoryNode.cc37
-rw-r--r--src/mem/ruby/system/MemoryNode.hh95
-rw-r--r--src/mem/ruby/system/MultiBitSelBloomFilter.cc191
-rw-r--r--src/mem/ruby/system/MultiBitSelBloomFilter.hh98
-rw-r--r--src/mem/ruby/system/MultiGrainBloomFilter.cc172
-rw-r--r--src/mem/ruby/system/MultiGrainBloomFilter.hh89
-rw-r--r--src/mem/ruby/system/NodeID.hh50
-rw-r--r--src/mem/ruby/system/NodePersistentTable.cc194
-rw-r--r--src/mem/ruby/system/NodePersistentTable.hh99
-rw-r--r--src/mem/ruby/system/NonCountingBloomFilter.cc145
-rw-r--r--src/mem/ruby/system/NonCountingBloomFilter.hh89
-rw-r--r--src/mem/ruby/system/PerfectCacheMemory.hh239
-rw-r--r--src/mem/ruby/system/PersistentArbiter.cc165
-rw-r--r--src/mem/ruby/system/PersistentArbiter.hh108
-rw-r--r--src/mem/ruby/system/PersistentTable.cc195
-rw-r--r--src/mem/ruby/system/PersistentTable.hh99
-rw-r--r--src/mem/ruby/system/PseudoLRUPolicy.hh110
-rw-r--r--src/mem/ruby/system/Sequencer.cc1161
-rw-r--r--src/mem/ruby/system/Sequencer.hh170
-rw-r--r--src/mem/ruby/system/StoreBuffer.cc300
-rw-r--r--src/mem/ruby/system/StoreBuffer.hh120
-rw-r--r--src/mem/ruby/system/StoreCache.cc178
-rw-r--r--src/mem/ruby/system/StoreCache.hh85
-rw-r--r--src/mem/ruby/system/System.cc269
-rw-r--r--src/mem/ruby/system/System.hh137
-rw-r--r--src/mem/ruby/system/TBETable.hh165
-rw-r--r--src/mem/ruby/system/TimerTable.cc129
-rw-r--r--src/mem/ruby/system/TimerTable.hh98
48 files changed, 9444 insertions, 0 deletions
diff --git a/src/mem/ruby/system/AbstractBloomFilter.hh b/src/mem/ruby/system/AbstractBloomFilter.hh
new file mode 100644
index 000000000..3b0c703ae
--- /dev/null
+++ b/src/mem/ruby/system/AbstractBloomFilter.hh
@@ -0,0 +1,72 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * AbstractBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef ABSTRACT_BLOOM_FILTER_H
+#define ABSTRACT_BLOOM_FILTER_H
+
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+
+class AbstractBloomFilter {
+public:
+
+ virtual ~AbstractBloomFilter() {};
+ virtual void clear() = 0;
+ virtual void increment(const Address& addr) = 0;
+ virtual void decrement(const Address& addr) = 0;
+ virtual void merge(AbstractBloomFilter * other_filter) = 0;
+ virtual void set(const Address& addr) = 0;
+ virtual void unset(const Address& addr) = 0;
+
+ virtual bool isSet(const Address& addr) = 0;
+ virtual int getCount(const Address& addr) = 0;
+ virtual int getTotalCount() = 0;
+
+ virtual void print(ostream& out) const = 0;
+
+ virtual int getIndex(const Address& addr) = 0;
+ virtual int readBit(const int index) = 0;
+ virtual void writeBit(const int index, const int value) = 0;
+
+private:
+
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/AbstractMemOrCache.hh b/src/mem/ruby/system/AbstractMemOrCache.hh
new file mode 100644
index 000000000..a96a1328f
--- /dev/null
+++ b/src/mem/ruby/system/AbstractMemOrCache.hh
@@ -0,0 +1,42 @@
+
+/*
+ * AbstractMemOrCache.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef ABSTRACT_MEM_OR_CACHE_H
+#define ABSTRACT_MEM_OR_CACHE_H
+
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+
+class AbstractMemOrCache {
+public:
+
+ virtual ~AbstractMemOrCache() {};
+ virtual void setConsumer(Consumer* consumer_ptr) = 0;
+ virtual Consumer* getConsumer() = 0;
+
+ virtual void enqueue (const MsgPtr& message, int latency ) = 0;
+ virtual void enqueueMemRef (MemoryNode& memRef) = 0;
+ virtual void dequeue () = 0;
+ virtual const Message* peek () = 0;
+ virtual bool isReady () = 0;
+ virtual MemoryNode peekNode () = 0;
+ virtual bool areNSlotsAvailable (int n) = 0;
+ virtual void printConfig (ostream& out) = 0;
+ virtual void print (ostream& out) const = 0;
+ virtual void setDebug (int debugFlag) = 0;
+
+private:
+
+};
+
+
+#endif
+
diff --git a/src/mem/ruby/system/AbstractReplacementPolicy.hh b/src/mem/ruby/system/AbstractReplacementPolicy.hh
new file mode 100644
index 000000000..497226fad
--- /dev/null
+++ b/src/mem/ruby/system/AbstractReplacementPolicy.hh
@@ -0,0 +1,62 @@
+
+#ifndef ABSTRACTREPLACEMENTPOLICY_H
+#define ABSTRACTREPLACEMENTPOLICY_H
+
+#include "Global.hh"
+
+class AbstractReplacementPolicy {
+
+public:
+
+ AbstractReplacementPolicy(Index num_sets, Index assoc);
+ virtual ~AbstractReplacementPolicy();
+
+ /* touch a block. a.k.a. update timestamp */
+ virtual void touch(Index set, Index way, Time time) = 0;
+
+ /* returns the way to replace */
+ virtual Index getVictim(Index set) const = 0;
+
+ /* get the time of the last access */
+ Time getLastAccess(Index set, Index way);
+
+ protected:
+ unsigned int m_num_sets; /** total number of sets */
+ unsigned int m_assoc; /** set associativity */
+ Time **m_last_ref_ptr; /** timestamp of last reference */
+};
+
+inline
+AbstractReplacementPolicy::AbstractReplacementPolicy(Index num_sets, Index assoc)
+{
+ m_num_sets = num_sets;
+ m_assoc = assoc;
+ m_last_ref_ptr = new Time*[m_num_sets];
+ for(unsigned int i = 0; i < m_num_sets; i++){
+ m_last_ref_ptr[i] = new Time[m_assoc];
+ for(unsigned int j = 0; j < m_assoc; j++){
+ m_last_ref_ptr[i][j] = 0;
+ }
+ }
+}
+
+inline
+AbstractReplacementPolicy::~AbstractReplacementPolicy()
+{
+ if(m_last_ref_ptr != NULL){
+ for(unsigned int i = 0; i < m_num_sets; i++){
+ if(m_last_ref_ptr[i] != NULL){
+ delete[] m_last_ref_ptr[i];
+ }
+ }
+ delete[] m_last_ref_ptr;
+ }
+}
+
+inline
+Time AbstractReplacementPolicy::getLastAccess(Index set, Index way)
+{
+ return m_last_ref_ptr[set][way];
+}
+
+#endif // ABSTRACTREPLACEMENTPOLICY_H
diff --git a/src/mem/ruby/system/BlockBloomFilter.cc b/src/mem/ruby/system/BlockBloomFilter.cc
new file mode 100644
index 000000000..dbb0b5458
--- /dev/null
+++ b/src/mem/ruby/system/BlockBloomFilter.cc
@@ -0,0 +1,147 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BlockBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "BlockBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+BlockBloomFilter::BlockBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_filter.setSize(m_filter_size);
+
+ clear();
+}
+
+BlockBloomFilter::~BlockBloomFilter(){
+}
+
+void BlockBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void BlockBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void BlockBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void BlockBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void BlockBloomFilter::set(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 1;
+}
+
+void BlockBloomFilter::unset(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 0;
+}
+
+bool BlockBloomFilter::isSet(const Address& addr)
+{
+ int i = get_index(addr);
+ return (m_filter[i]);
+}
+
+
+int BlockBloomFilter::getCount(const Address& addr)
+{
+ return m_filter[get_index(addr)];
+}
+
+int BlockBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ if (m_filter[i]) {
+ count++;
+ }
+ }
+ return count;
+}
+
+int BlockBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+void BlockBloomFilter::print(ostream& out) const
+{
+}
+
+int BlockBloomFilter::readBit(const int index) {
+ return m_filter[index];
+}
+
+void BlockBloomFilter::writeBit(const int index, const int value) {
+ m_filter[index] = value;
+}
+
+int BlockBloomFilter::get_index(const Address& addr)
+{
+ // Pull out some bit field ==> B1
+ // Pull out additional bits, not the same as B1 ==> B2
+ // XOR B1 and B2 to get hash index
+ physical_address_t block_bits = addr.bitSelect( RubyConfig::dataBlockBits(), 2*RubyConfig::dataBlockBits() - 1);
+ int offset = 5;
+ physical_address_t other_bits = addr.bitSelect( 2*RubyConfig::dataBlockBits() + offset, 2*RubyConfig::dataBlockBits() + offset + m_filter_size_bits - 1);
+ int index = block_bits ^ other_bits;
+ assert(index < m_filter_size);
+ return index;
+}
+
+
diff --git a/src/mem/ruby/system/BlockBloomFilter.hh b/src/mem/ruby/system/BlockBloomFilter.hh
new file mode 100644
index 000000000..82f457157
--- /dev/null
+++ b/src/mem/ruby/system/BlockBloomFilter.hh
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BlockBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef BLOCK_BLOOM_FILTER_H
+#define BLOCK_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class BlockBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~BlockBloomFilter();
+ BlockBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_index(const Address& addr);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/BulkBloomFilter.cc b/src/mem/ruby/system/BulkBloomFilter.cc
new file mode 100644
index 000000000..3408dfada
--- /dev/null
+++ b/src/mem/ruby/system/BulkBloomFilter.cc
@@ -0,0 +1,233 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BulkBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "BulkBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+BulkBloomFilter::BulkBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+ // split the filter bits in half, c0 and c1
+ m_sector_bits = m_filter_size_bits - 1;
+
+ m_temp_filter.setSize(m_filter_size);
+ m_filter.setSize(m_filter_size);
+ clear();
+
+ // clear temp filter
+ for(int i=0; i < m_filter_size; ++i){
+ m_temp_filter[i] = 0;
+ }
+}
+
+BulkBloomFilter::~BulkBloomFilter(){
+
+}
+
+void BulkBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void BulkBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void BulkBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void BulkBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void BulkBloomFilter::set(const Address& addr)
+{
+ // c0 contains the cache index bits
+ int set_bits = m_sector_bits;
+ int block_bits = RubyConfig::dataBlockBits();
+ int c0 = addr.bitSelect( block_bits, block_bits + set_bits - 1);
+ // c1 contains the lower m_sector_bits permuted bits
+ //Address permuted_bits = permute(addr);
+ //int c1 = permuted_bits.bitSelect(0, set_bits-1);
+ int c1 = addr.bitSelect( block_bits+set_bits, (block_bits+2*set_bits) - 1);
+ //ASSERT(c0 < (m_filter_size/2));
+ //ASSERT(c0 + (m_filter_size/2) < m_filter_size);
+ //ASSERT(c1 < (m_filter_size/2));
+ // set v0 bit
+ m_filter[c0 + (m_filter_size/2)] = 1;
+ // set v1 bit
+ m_filter[c1] = 1;
+}
+
+void BulkBloomFilter::unset(const Address& addr)
+{
+ // not used
+}
+
+bool BulkBloomFilter::isSet(const Address& addr)
+{
+ // c0 contains the cache index bits
+ int set_bits = m_sector_bits;
+ int block_bits = RubyConfig::dataBlockBits();
+ int c0 = addr.bitSelect( block_bits, block_bits + set_bits - 1);
+ // c1 contains the lower 10 permuted bits
+ //Address permuted_bits = permute(addr);
+ //int c1 = permuted_bits.bitSelect(0, set_bits-1);
+ int c1 = addr.bitSelect( block_bits+set_bits, (block_bits+2*set_bits) - 1);
+ //ASSERT(c0 < (m_filter_size/2));
+ //ASSERT(c0 + (m_filter_size/2) < m_filter_size);
+ //ASSERT(c1 < (m_filter_size/2));
+ // set v0 bit
+ m_temp_filter[c0 + (m_filter_size/2)] = 1;
+ // set v1 bit
+ m_temp_filter[c1] = 1;
+
+ // perform filter intersection. If any c part is 0, no possibility of address being in signature.
+ // get first c intersection part
+ bool zero = false;
+ for(int i=0; i < m_filter_size/2; ++i){
+ // get intersection of signatures
+ m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
+ zero = zero || m_temp_filter[i];
+ }
+ zero = !zero;
+ if(zero){
+ // one section is zero, no possiblility of address in signature
+ // reset bits we just set
+ m_temp_filter[c0 + (m_filter_size/2)] = 0;
+ m_temp_filter[c1] = 0;
+ return false;
+ }
+
+ // check second section
+ zero = false;
+ for(int i=m_filter_size/2; i < m_filter_size; ++i){
+ // get intersection of signatures
+ m_temp_filter[i] = m_temp_filter[i] && m_filter[i];
+ zero = zero || m_temp_filter[i];
+ }
+ zero = !zero;
+ if(zero){
+ // one section is zero, no possiblility of address in signature
+ m_temp_filter[c0 + (m_filter_size/2)] = 0;
+ m_temp_filter[c1] = 0;
+ return false;
+ }
+ // one section has at least one bit set
+ m_temp_filter[c0 + (m_filter_size/2)] = 0;
+ m_temp_filter[c1] = 0;
+ return true;
+}
+
+
+int BulkBloomFilter::getCount(const Address& addr)
+{
+ // not used
+ return 0;
+}
+
+int BulkBloomFilter::getTotalCount()
+{
+ int count = 0;
+ for (int i = 0; i < m_filter_size; i++) {
+ if (m_filter[i]) {
+ count++;
+ }
+ }
+ return count;
+}
+
+int BulkBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+int BulkBloomFilter::readBit(const int index) {
+ return 0;
+ // TODO
+}
+
+void BulkBloomFilter::writeBit(const int index, const int value) {
+ // TODO
+}
+
+void BulkBloomFilter::print(ostream& out) const
+{
+}
+
+int BulkBloomFilter::get_index(const Address& addr)
+{
+ return addr.bitSelect( RubyConfig::dataBlockBits(), RubyConfig::dataBlockBits() + m_filter_size_bits - 1);
+}
+
+Address BulkBloomFilter::permute(const Address & addr){
+ // permutes the original address bits according to Table 5
+ int block_offset = RubyConfig::dataBlockBits();
+ physical_address_t part1 = addr.bitSelect( block_offset, block_offset + 6 );
+ physical_address_t part2 = addr.bitSelect( block_offset + 9, block_offset + 9 );
+ physical_address_t part3 = addr.bitSelect( block_offset + 11, block_offset + 11 );
+ physical_address_t part4 = addr.bitSelect( block_offset + 17, block_offset + 17 );
+ physical_address_t part5 = addr.bitSelect( block_offset + 7, block_offset + 8 );
+ physical_address_t part6 = addr.bitSelect( block_offset + 10, block_offset + 10 );
+ physical_address_t part7 = addr.bitSelect( block_offset + 12, block_offset + 12 );
+ physical_address_t part8 = addr.bitSelect( block_offset + 13, block_offset + 13 );
+ physical_address_t part9 = addr.bitSelect( block_offset + 15, block_offset + 16 );
+ physical_address_t part10 = addr.bitSelect( block_offset + 18, block_offset + 20 );
+ physical_address_t part11 = addr.bitSelect( block_offset + 14, block_offset + 14 );
+
+ physical_address_t result = (part1 << 14 ) | (part2 << 13 ) | (part3 << 12 ) | (part4 << 11 ) | (part5 << 9) | (part6 << 8)
+ | (part7 << 7) | (part8 << 6) | (part9 << 4) | (part10 << 1) | (part11);
+ // assume 32 bit addresses (both virtual and physical)
+ // select the remaining high-order 11 bits
+ physical_address_t remaining_bits = (addr.bitSelect( block_offset + 21, 31 )) << 21;
+ result = result | remaining_bits;
+
+ return Address(result);
+}
diff --git a/src/mem/ruby/system/BulkBloomFilter.hh b/src/mem/ruby/system/BulkBloomFilter.hh
new file mode 100644
index 000000000..f05b83a87
--- /dev/null
+++ b/src/mem/ruby/system/BulkBloomFilter.hh
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * BulkBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef BULK_BLOOM_FILTER_H
+#define BULK_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class BulkBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~BulkBloomFilter();
+ BulkBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_index(const Address& addr);
+ Address permute(const Address & addr);
+
+ Vector<int> m_filter;
+ Vector<int> m_temp_filter;
+
+ int m_filter_size;
+ int m_filter_size_bits;
+
+ int m_sector_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/CacheMemory.hh b/src/mem/ruby/system/CacheMemory.hh
new file mode 100644
index 000000000..9344f1463
--- /dev/null
+++ b/src/mem/ruby/system/CacheMemory.hh
@@ -0,0 +1,559 @@
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * CacheMemory.h
+ *
+ * Description:
+ *
+ * $Id: CacheMemory.h,v 3.7 2004/06/18 20:15:15 beckmann Exp $
+ *
+ */
+
+#ifndef CACHEMEMORY_H
+#define CACHEMEMORY_H
+
+#include "AbstractChip.hh"
+#include "Global.hh"
+#include "AccessPermission.hh"
+#include "Address.hh"
+#include "CacheRecorder.hh"
+#include "CacheRequestType.hh"
+#include "Vector.hh"
+#include "DataBlock.hh"
+#include "MachineType.hh"
+#include "RubySlicc_ComponentMapping.hh"
+#include "PseudoLRUPolicy.hh"
+#include "LRUPolicy.hh"
+#include <vector>
+
+template<class ENTRY>
+class CacheMemory {
+public:
+
+ // Constructors
+ CacheMemory(AbstractChip* chip_ptr, int numSetBits, int cacheAssoc, const MachineType machType, const string& description);
+
+ // Destructor
+ ~CacheMemory();
+
+ // Public Methods
+ void printConfig(ostream& out);
+
+ // perform a cache access and see if we hit or not. Return true on a hit.
+ bool tryCacheAccess(const Address& address, CacheRequestType type, DataBlock*& data_ptr);
+
+ // similar to above, but doesn't require full access check
+ bool testCacheAccess(const Address& address, CacheRequestType type, DataBlock*& data_ptr);
+
+ // tests to see if an address is present in the cache
+ bool isTagPresent(const Address& address) const;
+
+ // Returns true if there is:
+ // a) a tag match on this address or there is
+ // b) an unused line in the same cache "way"
+ bool cacheAvail(const Address& address) const;
+
+ // find an unused entry and sets the tag appropriate for the address
+ void allocate(const Address& address);
+
+ // Explicitly free up this address
+ void deallocate(const Address& address);
+
+ // Returns with the physical address of the conflicting cache line
+ Address cacheProbe(const Address& address) const;
+
+ // looks an address up in the cache
+ ENTRY& lookup(const Address& address);
+ const ENTRY& lookup(const Address& address) const;
+
+ // Get/Set permission of cache block
+ AccessPermission getPermission(const Address& address) const;
+ void changePermission(const Address& address, AccessPermission new_perm);
+
+ // Hook for checkpointing the contents of the cache
+ void recordCacheContents(CacheRecorder& tr) const;
+ void setAsInstructionCache(bool is_icache) { m_is_instruction_cache = is_icache; }
+
+ // Set this address to most recently used
+ void setMRU(const Address& address);
+
+ void getMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes );
+ void setMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes );
+
+ // Print cache contents
+ void print(ostream& out) const;
+ void printData(ostream& out) const;
+
+private:
+ // Private Methods
+
+ // convert a Address to its location in the cache
+ Index addressToCacheSet(const Address& address) const;
+
+ // Given a cache tag: returns the index of the tag in a set.
+ // returns -1 if the tag is not found.
+ int findTagInSet(Index line, const Address& tag) const;
+ int findTagInSetIgnorePermissions(Index cacheSet, const Address& tag) const;
+
+ // Private copy constructor and assignment operator
+ CacheMemory(const CacheMemory& obj);
+ CacheMemory& operator=(const CacheMemory& obj);
+
+ // Data Members (m_prefix)
+ AbstractChip* m_chip_ptr;
+ MachineType m_machType;
+ string m_description;
+ bool m_is_instruction_cache;
+
+ // The first index is the # of cache lines.
+ // The second index is the the amount associativity.
+ Vector<Vector<ENTRY> > m_cache;
+
+ AbstractReplacementPolicy *m_replacementPolicy_ptr;
+
+ int m_cache_num_sets;
+ int m_cache_num_set_bits;
+ int m_cache_assoc;
+};
+
+// Output operator declaration
+//ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+template<class ENTRY>
+inline
+ostream& operator<<(ostream& out, const CacheMemory<ENTRY>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+template<class ENTRY>
+inline
+CacheMemory<ENTRY>::CacheMemory(AbstractChip* chip_ptr, int numSetBits,
+ int cacheAssoc, const MachineType machType, const string& description)
+
+{
+ //cout << "CacheMemory constructor numThreads = " << numThreads << endl;
+ m_chip_ptr = chip_ptr;
+ m_machType = machType;
+ m_description = MachineType_to_string(m_machType)+"_"+description;
+ m_cache_num_set_bits = numSetBits;
+ m_cache_num_sets = 1 << numSetBits;
+ m_cache_assoc = cacheAssoc;
+ m_is_instruction_cache = false;
+
+ m_cache.setSize(m_cache_num_sets);
+ if(strcmp(g_REPLACEMENT_POLICY, "PSEDUO_LRU") == 0)
+ m_replacementPolicy_ptr = new PseudoLRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else if(strcmp(g_REPLACEMENT_POLICY, "LRU") == 0)
+ m_replacementPolicy_ptr = new LRUPolicy(m_cache_num_sets, m_cache_assoc);
+ else
+ assert(false);
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ m_cache[i].setSize(m_cache_assoc);
+ for (int j = 0; j < m_cache_assoc; j++) {
+ m_cache[i][j].m_Address.setAddress(0);
+ m_cache[i][j].m_Permission = AccessPermission_NotPresent;
+ }
+ }
+
+
+ // cout << "Before setting trans address list size" << endl;
+ //create a trans address for each SMT thread
+// m_trans_address_list.setSize(numThreads);
+// for(int i=0; i < numThreads; ++i){
+// cout << "Setting list size for list " << i << endl;
+// m_trans_address_list[i].setSize(30);
+// }
+ //cout << "CacheMemory constructor finished" << endl;
+}
+
+template<class ENTRY>
+inline
+CacheMemory<ENTRY>::~CacheMemory()
+{
+ if(m_replacementPolicy_ptr != NULL)
+ delete m_replacementPolicy_ptr;
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::printConfig(ostream& out)
+{
+ out << "Cache config: " << m_description << endl;
+ out << " cache_associativity: " << m_cache_assoc << endl;
+ out << " num_cache_sets_bits: " << m_cache_num_set_bits << endl;
+ const int cache_num_sets = 1 << m_cache_num_set_bits;
+ out << " num_cache_sets: " << cache_num_sets << endl;
+ out << " cache_set_size_bytes: " << cache_num_sets * RubyConfig::dataBlockBytes() << endl;
+ out << " cache_set_size_Kbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes()) / (1<<10) << endl;
+ out << " cache_set_size_Mbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes()) / (1<<20) << endl;
+ out << " cache_size_bytes: "
+ << cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc << endl;
+ out << " cache_size_Kbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc) / (1<<10) << endl;
+ out << " cache_size_Mbytes: "
+ << double(cache_num_sets * RubyConfig::dataBlockBytes() * m_cache_assoc) / (1<<20) << endl;
+}
+
+// PRIVATE METHODS
+
+// convert a Address to its location in the cache
+template<class ENTRY>
+inline
+Index CacheMemory<ENTRY>::addressToCacheSet(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index temp = -1;
+ switch (m_machType) {
+ case MACHINETYPE_L1CACHE_ENUM:
+ temp = map_address_to_L1CacheSet(address, m_cache_num_set_bits);
+ break;
+ case MACHINETYPE_L2CACHE_ENUM:
+ temp = map_address_to_L2CacheSet(address, m_cache_num_set_bits);
+ break;
+ default:
+ ERROR_MSG("Don't recognize m_machType");
+ }
+ assert(temp < m_cache_num_sets);
+ assert(temp >= 0);
+ return temp;
+}
+
+// Given a cache index: returns the index of the tag in a set.
+// returns -1 if the tag is not found.
+template<class ENTRY>
+inline
+int CacheMemory<ENTRY>::findTagInSet(Index cacheSet, const Address& tag) const
+{
+ assert(tag == line_address(tag));
+ // search the set for the tags
+ for (int i=0; i < m_cache_assoc; i++) {
+ if ((m_cache[cacheSet][i].m_Address == tag) &&
+ (m_cache[cacheSet][i].m_Permission != AccessPermission_NotPresent)) {
+ return i;
+ }
+ }
+ return -1; // Not found
+}
+
+// Given a cache index: returns the index of the tag in a set.
+// returns -1 if the tag is not found.
+template<class ENTRY>
+inline
+int CacheMemory<ENTRY>::findTagInSetIgnorePermissions(Index cacheSet, const Address& tag) const
+{
+ assert(tag == line_address(tag));
+ // search the set for the tags
+ for (int i=0; i < m_cache_assoc; i++) {
+ if (m_cache[cacheSet][i].m_Address == tag)
+ return i;
+ }
+ return -1; // Not found
+}
+
+// PUBLIC METHODS
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::tryCacheAccess(const Address& address,
+ CacheRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == line_address(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if(loc != -1){ // Do we even have a tag match?
+ ENTRY& entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, g_eventQueue_ptr->getTime());
+ data_ptr = &(entry.getDataBlk());
+
+ if(entry.m_Permission == AccessPermission_Read_Write) {
+ return true;
+ }
+ if ((entry.m_Permission == AccessPermission_Read_Only) &&
+ (type == CacheRequestType_LD || type == CacheRequestType_IFETCH)) {
+ return true;
+ }
+ // The line must not be accessible
+ }
+ data_ptr = NULL;
+ return false;
+}
+
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::testCacheAccess(const Address& address,
+ CacheRequestType type,
+ DataBlock*& data_ptr)
+{
+ assert(address == line_address(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ if(loc != -1){ // Do we even have a tag match?
+ ENTRY& entry = m_cache[cacheSet][loc];
+ m_replacementPolicy_ptr->touch(cacheSet, loc, g_eventQueue_ptr->getTime());
+ data_ptr = &(entry.getDataBlk());
+
+ return (m_cache[cacheSet][loc].m_Permission != AccessPermission_NotPresent);
+ }
+ data_ptr = NULL;
+ return false;
+}
+
+// tests to see if an address is present in the cache
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::isTagPresent(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int location = findTagInSet(cacheSet, address);
+
+ if (location == -1) {
+ // We didn't find the tag
+ DEBUG_EXPR(CACHE_COMP, LowPrio, address);
+ DEBUG_MSG(CACHE_COMP, LowPrio, "No tag match");
+ return false;
+ }
+ DEBUG_EXPR(CACHE_COMP, LowPrio, address);
+ DEBUG_MSG(CACHE_COMP, LowPrio, "found");
+ return true;
+}
+
+// Returns true if there is:
+// a) a tag match on this address or there is
+// b) an unused line in the same cache "way"
+template<class ENTRY>
+inline
+bool CacheMemory<ENTRY>::cacheAvail(const Address& address) const
+{
+ assert(address == line_address(address));
+
+ Index cacheSet = addressToCacheSet(address);
+
+ for (int i=0; i < m_cache_assoc; i++) {
+ if (m_cache[cacheSet][i].m_Address == address) {
+ // Already in the cache
+ return true;
+ }
+
+ if (m_cache[cacheSet][i].m_Permission == AccessPermission_NotPresent) {
+ // We found an empty entry
+ return true;
+ }
+ }
+ return false;
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::allocate(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(!isTagPresent(address));
+ assert(cacheAvail(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+
+ // Find the first open slot
+ Index cacheSet = addressToCacheSet(address);
+ for (int i=0; i < m_cache_assoc; i++) {
+ if (m_cache[cacheSet][i].m_Permission == AccessPermission_NotPresent) {
+ m_cache[cacheSet][i] = ENTRY(); // Init entry
+ m_cache[cacheSet][i].m_Address = address;
+ m_cache[cacheSet][i].m_Permission = AccessPermission_Invalid;
+
+ m_replacementPolicy_ptr->touch(cacheSet, i, g_eventQueue_ptr->getTime());
+
+ return;
+ }
+ }
+ ERROR_MSG("Allocate didn't find an available entry");
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::deallocate(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(isTagPresent(address));
+ DEBUG_EXPR(CACHE_COMP, HighPrio, address);
+ lookup(address).m_Permission = AccessPermission_NotPresent;
+}
+
+// Returns with the physical address of the conflicting cache line
+template<class ENTRY>
+inline
+Address CacheMemory<ENTRY>::cacheProbe(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(!cacheAvail(address));
+
+ Index cacheSet = addressToCacheSet(address);
+ return m_cache[cacheSet][m_replacementPolicy_ptr->getVictim(cacheSet)].m_Address;
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+inline
+ENTRY& CacheMemory<ENTRY>::lookup(const Address& address)
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ return m_cache[cacheSet][loc];
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+inline
+const ENTRY& CacheMemory<ENTRY>::lookup(const Address& address) const
+{
+ assert(address == line_address(address));
+ Index cacheSet = addressToCacheSet(address);
+ int loc = findTagInSet(cacheSet, address);
+ assert(loc != -1);
+ return m_cache[cacheSet][loc];
+}
+
+template<class ENTRY>
+inline
+AccessPermission CacheMemory<ENTRY>::getPermission(const Address& address) const
+{
+ assert(address == line_address(address));
+ return lookup(address).m_Permission;
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::changePermission(const Address& address, AccessPermission new_perm)
+{
+ assert(address == line_address(address));
+ lookup(address).m_Permission = new_perm;
+ assert(getPermission(address) == new_perm);
+}
+
+// Sets the most recently used bit for a cache block
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::setMRU(const Address& address)
+{
+ Index cacheSet;
+
+ cacheSet = addressToCacheSet(address);
+ m_replacementPolicy_ptr->touch(cacheSet,
+ findTagInSet(cacheSet, address),
+ g_eventQueue_ptr->getTime());
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::recordCacheContents(CacheRecorder& tr) const
+{
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ for (int j = 0; j < m_cache_assoc; j++) {
+ AccessPermission perm = m_cache[i][j].m_Permission;
+ CacheRequestType request_type = CacheRequestType_NULL;
+ if (perm == AccessPermission_Read_Only) {
+ if (m_is_instruction_cache) {
+ request_type = CacheRequestType_IFETCH;
+ } else {
+ request_type = CacheRequestType_LD;
+ }
+ } else if (perm == AccessPermission_Read_Write) {
+ request_type = CacheRequestType_ST;
+ }
+
+ if (request_type != CacheRequestType_NULL) {
+ tr.addRecord(m_chip_ptr->getID(), m_cache[i][j].m_Address,
+ Address(0), request_type, m_replacementPolicy_ptr->getLastAccess(i, j));
+ }
+ }
+ }
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::print(ostream& out) const
+{
+ out << "Cache dump: " << m_description << endl;
+ for (int i = 0; i < m_cache_num_sets; i++) {
+ for (int j = 0; j < m_cache_assoc; j++) {
+ out << " Index: " << i
+ << " way: " << j
+ << " entry: " << m_cache[i][j] << endl;
+ }
+ }
+}
+
+template<class ENTRY>
+inline
+void CacheMemory<ENTRY>::printData(ostream& out) const
+{
+ out << "printData() not supported" << endl;
+}
+
+template<class ENTRY>
+void CacheMemory<ENTRY>::getMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ){
+ ENTRY entry = lookup(line_address(addr));
+ unsigned int startByte = addr.getAddress() - line_address(addr).getAddress();
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ value[i] = entry.m_DataBlk.getByte(i + startByte);
+ }
+}
+
+template<class ENTRY>
+void CacheMemory<ENTRY>::setMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ){
+ ENTRY& entry = lookup(line_address(addr));
+ unsigned int startByte = addr.getAddress() - line_address(addr).getAddress();
+ assert(size_in_bytes > 0);
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ entry.m_DataBlk.setByte(i + startByte, value[i]);
+ }
+
+ entry = lookup(line_address(addr));
+}
+
+#endif //CACHEMEMORY_H
+
diff --git a/src/mem/ruby/system/DirectoryMemory.cc b/src/mem/ruby/system/DirectoryMemory.cc
new file mode 100644
index 000000000..a1ec38cd2
--- /dev/null
+++ b/src/mem/ruby/system/DirectoryMemory.cc
@@ -0,0 +1,175 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DirectoryMemory.C
+ *
+ * Description: See DirectoryMemory.h
+ *
+ * $Id$
+ *
+ */
+
+#include "System.hh"
+#include "Driver.hh"
+#include "DirectoryMemory.hh"
+#include "RubySlicc_Util.hh"
+#include "RubyConfig.hh"
+#include "Chip.hh"
+#include "interface.hh"
+
+DirectoryMemory::DirectoryMemory(Chip* chip_ptr, int version)
+{
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+ // THIS DOESN'T SEEM TO WORK -- MRM
+ // m_size = RubyConfig::memoryModuleBlocks()/RubyConfig::numberOfDirectory();
+ m_size = RubyConfig::memoryModuleBlocks();
+ assert(m_size > 0);
+ // allocates an array of directory entry pointers & sets them to NULL
+ m_entries = new Directory_Entry*[m_size];
+ if (m_entries == NULL) {
+ ERROR_MSG("Directory Memory: unable to allocate memory.");
+ }
+
+ for (int i=0; i < m_size; i++) {
+ m_entries[i] = NULL;
+ }
+}
+
+DirectoryMemory::~DirectoryMemory()
+{
+ // free up all the directory entries
+ for (int i=0; i < m_size; i++) {
+ if (m_entries[i] != NULL) {
+ delete m_entries[i];
+ m_entries[i] = NULL;
+ }
+ }
+
+ // free up the array of directory entries
+ delete[] m_entries;
+}
+
+// Static method
+void DirectoryMemory::printConfig(ostream& out)
+{
+ out << "Memory config:" << endl;
+ out << " memory_bits: " << RubyConfig::memorySizeBits() << endl;
+ out << " memory_size_bytes: " << RubyConfig::memorySizeBytes() << endl;
+ out << " memory_size_Kbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<10) << endl;
+ out << " memory_size_Mbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<20) << endl;
+ out << " memory_size_Gbytes: " << double(RubyConfig::memorySizeBytes()) / (1<<30) << endl;
+
+ out << " module_bits: " << RubyConfig::memoryModuleBits() << endl;
+ out << " module_size_lines: " << RubyConfig::memoryModuleBlocks() << endl;
+ out << " module_size_bytes: " << RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes() << endl;
+ out << " module_size_Kbytes: " << double(RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes()) / (1<<10) << endl;
+ out << " module_size_Mbytes: " << double(RubyConfig::memoryModuleBlocks() * RubyConfig::dataBlockBytes()) / (1<<20) << endl;
+}
+
+// Public method
+bool DirectoryMemory::isPresent(PhysAddress address)
+{
+ return (map_Address_to_DirectoryNode(address) == m_chip_ptr->getID()*RubyConfig::numberOfDirectoryPerChip()+m_version);
+}
+
+Directory_Entry& DirectoryMemory::lookup(PhysAddress address)
+{
+ assert(isPresent(address));
+ Index index = address.memoryModuleIndex();
+
+ if (index < 0 || index > m_size) {
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(address.getAddress());
+ WARN_EXPR(index);
+ WARN_EXPR(m_size);
+ ERROR_MSG("Directory Memory Assertion: accessing memory out of range.");
+ }
+ Directory_Entry* entry = m_entries[index];
+
+ // allocate the directory entry on demand.
+ if (entry == NULL) {
+ entry = new Directory_Entry;
+
+ // entry->getProcOwner() = m_chip_ptr->getID(); // FIXME - This should not be hard coded
+ // entry->getDirOwner() = true; // FIXME - This should not be hard-coded
+
+ // load the data from SimICS when first initalizing
+ if (g_SIMICS) {
+ if (DATA_BLOCK) {
+ physical_address_t physAddr = address.getAddress();
+
+ for(int j=0; j < RubyConfig::dataBlockBytes(); j++) {
+ int8 data_byte = (int8) SIMICS_read_physical_memory( m_chip_ptr->getID(),
+ physAddr + j, 1 );
+ //printf("SimICS, byte %d: %lld\n", j, data_byte );
+ entry->getDataBlk().setByte(j, data_byte);
+ }
+ DEBUG_EXPR(NODE_COMP, MedPrio,entry->getDataBlk());
+ }
+ }
+
+ // store entry to the table
+ m_entries[index] = entry;
+ }
+
+ return (*entry);
+}
+
+/*
+void DirectoryMemory::invalidateBlock(PhysAddress address)
+{
+ assert(isPresent(address));
+
+ Index index = address.memoryModuleIndex();
+
+ if (index < 0 || index > m_size) {
+ ERROR_MSG("Directory Memory Assertion: accessing memory out of range.");
+ }
+
+ if(m_entries[index] != NULL){
+ delete m_entries[index];
+ m_entries[index] = NULL;
+ }
+
+}
+*/
+
+void DirectoryMemory::print(ostream& out) const
+{
+ out << "Directory dump: " << endl;
+ for (int i=0; i < m_size; i++) {
+ if (m_entries[i] != NULL) {
+ out << i << ": ";
+ out << *m_entries[i] << endl;
+ }
+ }
+}
+
diff --git a/src/mem/ruby/system/DirectoryMemory.hh b/src/mem/ruby/system/DirectoryMemory.hh
new file mode 100644
index 000000000..7c0831af6
--- /dev/null
+++ b/src/mem/ruby/system/DirectoryMemory.hh
@@ -0,0 +1,91 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * DirectoryMemory.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef DIRECTORYMEMORY_H
+#define DIRECTORYMEMORY_H
+
+#include "Global.hh"
+#include "Address.hh"
+#include "Directory_Entry.hh"
+
+class Chip;
+
+class DirectoryMemory {
+public:
+ // Constructors
+ DirectoryMemory(Chip* chip_ptr, int version);
+
+ // Destructor
+ ~DirectoryMemory();
+
+ // Public Methods
+ static void printConfig(ostream& out);
+ bool isPresent(PhysAddress address);
+ Directory_Entry& lookup(PhysAddress address);
+
+ void print(ostream& out) const;
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ DirectoryMemory(const DirectoryMemory& obj);
+ DirectoryMemory& operator=(const DirectoryMemory& obj);
+
+ // Data Members (m_ prefix)
+ Directory_Entry **m_entries;
+ Chip* m_chip_ptr;
+ int m_size; // # of memory module blocks for this directory
+ int m_version;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const DirectoryMemory& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const DirectoryMemory& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //DIRECTORYMEMORY_H
diff --git a/src/mem/ruby/system/GenericBloomFilter.cc b/src/mem/ruby/system/GenericBloomFilter.cc
new file mode 100644
index 000000000..38dd7f437
--- /dev/null
+++ b/src/mem/ruby/system/GenericBloomFilter.cc
@@ -0,0 +1,154 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * GenericBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+
+#include "GenericBloomFilter.hh"
+#include "LSB_CountingBloomFilter.hh"
+#include "NonCountingBloomFilter.hh"
+#include "BulkBloomFilter.hh"
+#include "BlockBloomFilter.hh"
+#include "MultiGrainBloomFilter.hh"
+#include "MultiBitSelBloomFilter.hh"
+#include "H3BloomFilter.hh"
+
+GenericBloomFilter::GenericBloomFilter(AbstractChip* chip_ptr, string config)
+{
+ m_chip_ptr = chip_ptr;
+
+
+ string tail(config);
+ string head = string_split(tail,'_');
+
+ if (head == "LSB_Counting" ) {
+ m_filter = new LSB_CountingBloomFilter(tail);
+ }
+ else if(head == "NonCounting" ) {
+ m_filter = new NonCountingBloomFilter(tail);
+ }
+ else if(head == "Bulk" ) {
+ m_filter = new BulkBloomFilter(tail);
+ }
+ else if(head == "Block") {
+ m_filter = new BlockBloomFilter(tail);
+ }
+ else if(head == "Multigrain"){
+ m_filter = new MultiGrainBloomFilter(tail);
+ }
+ else if(head == "MultiBitSel"){
+ m_filter = new MultiBitSelBloomFilter(tail);
+ }
+ else if(head == "H3"){
+ m_filter = new H3BloomFilter(tail);
+ }
+ else {
+ assert(0);
+ }
+}
+
+GenericBloomFilter::~GenericBloomFilter()
+{
+ delete m_filter;
+}
+
+void GenericBloomFilter::clear()
+{
+ m_filter->clear();
+}
+
+void GenericBloomFilter::increment(const Address& addr)
+{
+ m_filter->increment(addr);
+}
+
+void GenericBloomFilter::decrement(const Address& addr)
+{
+ m_filter->decrement(addr);
+}
+
+void GenericBloomFilter::merge(GenericBloomFilter * other_filter)
+{
+ m_filter->merge(other_filter->getFilter());
+}
+
+void GenericBloomFilter::set(const Address& addr)
+{
+ m_filter->set(addr);
+}
+
+void GenericBloomFilter::unset(const Address& addr)
+{
+ m_filter->unset(addr);
+}
+
+bool GenericBloomFilter::isSet(const Address& addr)
+{
+ return m_filter->isSet(addr);
+}
+
+int GenericBloomFilter::getCount(const Address& addr)
+{
+ return m_filter->getCount(addr);
+}
+
+int GenericBloomFilter::getTotalCount()
+{
+ return m_filter->getTotalCount();
+}
+
+int GenericBloomFilter::getIndex(const Address& addr)
+{
+ return m_filter->getIndex(addr);
+}
+
+int GenericBloomFilter::readBit(const int index) {
+ return m_filter->readBit(index);
+}
+
+void GenericBloomFilter::writeBit(const int index, const int value) {
+ m_filter->writeBit(index, value);
+}
+
+void GenericBloomFilter::print(ostream& out) const
+{
+ return m_filter->print(out);
+}
+
+
diff --git a/src/mem/ruby/system/GenericBloomFilter.hh b/src/mem/ruby/system/GenericBloomFilter.hh
new file mode 100644
index 000000000..91cfdfd6e
--- /dev/null
+++ b/src/mem/ruby/system/GenericBloomFilter.hh
@@ -0,0 +1,96 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * GenericBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef GENERIC_BLOOM_FILTER_H
+#define GENERIC_BLOOM_FILTER_H
+
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class GenericBloomFilter {
+public:
+
+ // Constructors
+ GenericBloomFilter(AbstractChip* chip_ptr, string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(GenericBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+ AbstractBloomFilter * getFilter(){
+ return m_filter;
+ }
+
+ bool isSet(const Address& addr);
+
+ int getCount(const Address& addr);
+
+ int getTotalCount();
+
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+ void printConfig(ostream& out) { out << "GenericBloomFilter" << endl; }
+
+ // Destructor
+ ~GenericBloomFilter();
+
+
+private:
+
+ AbstractChip* m_chip_ptr;
+ AbstractBloomFilter* m_filter;
+};
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const GenericBloomFilter& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+#endif
diff --git a/src/mem/ruby/system/H3BloomFilter.cc b/src/mem/ruby/system/H3BloomFilter.cc
new file mode 100644
index 000000000..43a47e873
--- /dev/null
+++ b/src/mem/ruby/system/H3BloomFilter.cc
@@ -0,0 +1,210 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "H3BloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+H3BloomFilter::H3BloomFilter(string str)
+{
+ //TODO: change this ugly init code...
+ primes_list[0] = 9323;
+ primes_list[1] = 11279;
+ primes_list[2] = 10247;
+ primes_list[3] = 30637;
+ primes_list[4] = 25717;
+ primes_list[5] = 43711;
+
+ mults_list[0] = 255;
+ mults_list[1] = 29;
+ mults_list[2] = 51;
+ mults_list[3] = 3;
+ mults_list[4] = 77;
+ mults_list[5] = 43;
+
+ adds_list[0] = 841;
+ adds_list[1] = 627;
+ adds_list[2] = 1555;
+ adds_list[3] = 241;
+ adds_list[4] = 7777;
+ adds_list[5] = 65931;
+
+
+
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ // head contains filter size, tail contains bit offset from block number
+ m_filter_size = atoi(head.c_str());
+
+ head = string_split(tail, '_');
+ m_num_hashes = atoi(head.c_str());
+
+ if(tail == "Regular") {
+ isParallel = false;
+ } else if (tail == "Parallel") {
+ isParallel = true;
+ } else {
+ cout << "ERROR: Incorrect config string for MultiHash Bloom! :" << str << endl;
+ assert(0);
+ }
+
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_par_filter_size = m_filter_size/m_num_hashes;
+ m_par_filter_size_bits = log_int(m_par_filter_size);
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+H3BloomFilter::~H3BloomFilter(){
+}
+
+void H3BloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void H3BloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void H3BloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void H3BloomFilter::merge(AbstractBloomFilter * other_filter){
+ // assumes both filters are the same size!
+ H3BloomFilter * temp = (H3BloomFilter*) other_filter;
+ for(int i=0; i < m_filter_size; ++i){
+ m_filter[i] |= (*temp)[i];
+ }
+
+}
+
+void H3BloomFilter::set(const Address& addr)
+{
+ for (int i = 0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ m_filter[idx] = 1;
+
+ //Profile hash value distribution
+ //g_system_ptr->getProfiler()->getXactProfiler()->profileHashValue(i, idx); // gem5:Arka decomissiong of log_tm
+ }
+}
+
+void H3BloomFilter::unset(const Address& addr)
+{
+ cout << "ERROR: Unset should never be called in a Bloom filter";
+ assert(0);
+}
+
+bool H3BloomFilter::isSet(const Address& addr)
+{
+ bool res = true;
+
+ for (int i=0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ res = res && m_filter[idx];
+ }
+ return res;
+}
+
+
+int H3BloomFilter::getCount(const Address& addr)
+{
+ return isSet(addr)? 1: 0;
+}
+
+int H3BloomFilter::getIndex(const Address& addr)
+{
+ return 0;
+}
+
+int H3BloomFilter::readBit(const int index) {
+ return 0;
+}
+
+void H3BloomFilter::writeBit(const int index, const int value) {
+
+}
+
+int H3BloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+void H3BloomFilter::print(ostream& out) const
+{
+}
+
+int H3BloomFilter::get_index(const Address& addr, int i)
+{
+ uint64 x = addr.getLineAddress();
+ //uint64 y = (x*mults_list[i] + adds_list[i]) % primes_list[i];
+ int y = hash_H3(x,i);
+
+ if(isParallel) {
+ return (y % m_par_filter_size) + i*m_par_filter_size;
+ } else {
+ return y % m_filter_size;
+ }
+}
+
+int H3BloomFilter::hash_H3(uint64 value, int index) {
+ uint64 mask = 1;
+ uint64 val = value;
+ int result = 0;
+
+ for(int i = 0; i < 64; i++) {
+ if(val&mask) result ^= H3[i][index];
+ val = val >> 1;
+ }
+ return result;
+ }
+
diff --git a/src/mem/ruby/system/H3BloomFilter.hh b/src/mem/ruby/system/H3BloomFilter.hh
new file mode 100644
index 000000000..9da6cdef5
--- /dev/null
+++ b/src/mem/ruby/system/H3BloomFilter.hh
@@ -0,0 +1,1259 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * H3BloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef H3_BLOOM_FILTER_H
+#define H3_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+#include "Profiler.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+static int H3[64][16] = {
+{
+33268410,
+395488709,
+311024285,
+456111753,
+181495008,
+119997521,
+220697869,
+433891432,
+755927921,
+515226970,
+719448198,
+349842774,
+269183649,
+463275672,
+429800228,
+521598937
+},
+{
+628677802,
+820947732,
+809435975,
+1024657192,
+887631270,
+412050215,
+391365090,
+324227279,
+318338329,
+1038393087,
+489807930,
+387366128,
+518096428,
+324184340,
+429376066,
+447109279
+},
+{
+599747653,
+404960623,
+103933604,
+946416030,
+656460913,
+925957005,
+1047665689,
+163552053,
+88359290,
+841315415,
+899833584,
+1067336680,
+348549994,
+464045876,
+270252128,
+829897652
+},
+{
+215495230,
+966696438,
+82589012,
+750102795,
+909780866,
+920285789,
+769759214,
+331966823,
+939936006,
+439950703,
+883794828,
+1009277508,
+61634610,
+741444350,
+98689608,
+524144422
+},
+{
+93868534,
+196958667,
+774076619,
+327921978,
+122538783,
+879785030,
+690748527,
+3498564,
+83163077,
+1027963025,
+582088444,
+466152216,
+312424878,
+550064499,
+646612667,
+561099434
+},
+{
+1002047931,
+395477707,
+821317480,
+890482112,
+697094476,
+263813044,
+840275189,
+469664185,
+795625845,
+211504898,
+99204277,
+1004491153,
+725930417,
+1064479221,
+893834767,
+839719181
+},
+{
+278507126,
+985111995,
+706462983,
+1042178726,
+123281719,
+963778122,
+500881056,
+726291104,
+134293026,
+568379664,
+317050609,
+533470307,
+1022365922,
+197645211,
+315125721,
+634827678
+},
+{
+219227366,
+553960647,
+870169525,
+322232839,
+508322497,
+648672696,
+249405795,
+883596102,
+476433133,
+541372919,
+646647793,
+1042679515,
+43242483,
+600187508,
+499866821,
+135713210
+},
+{
+52837162,
+96966684,
+401840460,
+1071661176,
+733560065,
+150035417,
+341319946,
+811582750,
+636173904,
+519054065,
+196321433,
+1028294565,
+882204070,
+522965093,
+48884074,
+117810166
+},
+{
+650860353,
+789534698,
+328813544,
+473250022,
+143128306,
+173196006,
+846958825,
+174632187,
+683273509,
+405459497,
+787235556,
+773873501,
+240110267,
+426797736,
+92043842,
+711789240
+},
+{
+586637493,
+5059646,
+398035664,
+6686087,
+498300175,
+948278148,
+681227731,
+592751744,
+572019677,
+558044722,
+589368271,
+695745538,
+1073416749,
+529192035,
+550984939,
+1070620580
+},
+{
+102904663,
+647598516,
+758863940,
+313426443,
+76504114,
+1050747783,
+708436441,
+563815069,
+224107668,
+875925186,
+167675944,
+926209739,
+279737287,
+1040288182,
+768184312,
+371708956
+},
+{
+683968868,
+1027427757,
+180781926,
+742898864,
+624078545,
+645659833,
+577225838,
+987150210,
+723410002,
+224013421,
+993286634,
+33188488,
+247264323,
+888018697,
+38048664,
+189037096
+},
+{
+475612146,
+426739285,
+873726278,
+529192871,
+607715202,
+388486246,
+987001312,
+474493980,
+259747270,
+417465536,
+217062395,
+392858482,
+563810075,
+137852805,
+1051814153,
+72895217
+},
+{
+71277086,
+785496675,
+500608842,
+89633426,
+274085706,
+248467935,
+838061983,
+48106147,
+773662506,
+49545328,
+9071573,
+100739031,
+602018002,
+904371654,
+534132064,
+332211304
+},
+{
+401893602,
+735125342,
+775548339,
+210224843,
+256081130,
+482894412,
+350801633,
+1035713633,
+429458128,
+327281409,
+739927752,
+359327650,
+886942880,
+847691759,
+752417993,
+359445596
+},
+{
+267472014,
+1050659620,
+1068232362,
+1049684368,
+17130239,
+690524969,
+793224378,
+14455158,
+423092885,
+873853424,
+430535778,
+7867877,
+309731959,
+370260786,
+862353083,
+403906850
+},
+{
+993077283,
+218812656,
+389234651,
+393202875,
+413116501,
+263300295,
+470013158,
+592730725,
+441847172,
+732392823,
+407574059,
+875664777,
+271347307,
+792954404,
+554774761,
+1022424300
+},
+{
+675919719,
+637054073,
+784720745,
+149714381,
+813144874,
+502525801,
+635436670,
+1003196587,
+160786091,
+947509775,
+969788637,
+26854073,
+257964369,
+63898568,
+539767732,
+772364518
+},
+{
+943076868,
+1021732472,
+697575075,
+15843624,
+617573396,
+534113303,
+122953324,
+964873912,
+942995378,
+87830944,
+1012914818,
+455484661,
+592160054,
+599844284,
+810394353,
+836812568
+},
+{
+688992674,
+279465370,
+731582262,
+687883235,
+438178468,
+80493001,
+342701501,
+663561405,
+23360106,
+531315007,
+508931618,
+36294623,
+231216223,
+840438413,
+255665680,
+663205938
+},
+{
+857265418,
+552630887,
+8173237,
+792122963,
+210140052,
+823124938,
+667709953,
+751538219,
+991957789,
+462064153,
+19070176,
+726604748,
+714567823,
+151147895,
+1012619677,
+697114353
+},
+{
+467105652,
+683256174,
+702387467,
+28730434,
+549942998,
+48712701,
+960519696,
+1008345587,
+679267717,
+370932249,
+880419471,
+352141567,
+331640403,
+598772468,
+95160685,
+812053015
+},
+{
+1053491323,
+430526562,
+1014938507,
+109685515,
+765949103,
+177288303,
+1034642653,
+485421658,
+71850281,
+981034542,
+61620389,
+601367920,
+504420930,
+220599168,
+583051998,
+158735752
+},
+{
+103033901,
+522494916,
+658494760,
+959206022,
+931348143,
+834510661,
+21542994,
+189699884,
+679327018,
+171983002,
+96774168,
+456133168,
+543103352,
+923945936,
+970074188,
+643658485
+},
+{
+566379913,
+805798263,
+840662512,
+820206124,
+796507494,
+223712542,
+118811519,
+662246595,
+809326534,
+416471323,
+748027186,
+161169753,
+739149488,
+276330378,
+924837051,
+964873733
+},
+{
+585882743,
+135502711,
+3386031,
+625631285,
+1068193307,
+270342640,
+432739484,
+556606453,
+826419155,
+1038540977,
+158000202,
+69109538,
+207087256,
+298111218,
+678046259,
+184611498
+},
+{
+305310710,
+46237988,
+855726974,
+735975153,
+930663798,
+425764232,
+104362407,
+391371443,
+867622101,
+71645091,
+61824734,
+661902640,
+293738633,
+309416189,
+281710675,
+879317360
+},
+{
+398146324,
+398293087,
+689145387,
+1038451703,
+521637478,
+516134620,
+314658937,
+830334981,
+583400300,
+340083705,
+68029852,
+675389876,
+994635780,
+788959180,
+406967042,
+74403607
+},
+{
+69463153,
+744427484,
+191639960,
+590927798,
+969916795,
+546846769,
+728756758,
+889355646,
+520855076,
+136068426,
+776132410,
+189663815,
+252051082,
+533662856,
+362198652,
+1026161384
+},
+{
+584984279,
+1004834381,
+568439705,
+834508761,
+21812513,
+670870173,
+1052043300,
+341868768,
+473755574,
+124339439,
+36193947,
+437997647,
+137419489,
+58705193,
+337793711,
+340738909
+},
+{
+898051466,
+512792906,
+234874060,
+655358775,
+683745319,
+671676404,
+428888546,
+639928192,
+672697722,
+176477579,
+747020991,
+758211282,
+443045009,
+205395173,
+1016944273,
+5584717
+},
+{
+156038300,
+138620174,
+588466825,
+1061494056,
+1013672100,
+1064257198,
+881417791,
+839470738,
+83519030,
+100875683,
+237486447,
+461483733,
+681527127,
+777996147,
+574635362,
+815974538
+},
+{
+184168473,
+519509808,
+62531892,
+51821173,
+43787358,
+385711644,
+141325169,
+36069511,
+584183031,
+571372909,
+671503175,
+226486781,
+194932686,
+1045460970,
+753718579,
+331442433
+},
+{
+73065106,
+1015327221,
+630916840,
+1058053470,
+306737587,
+296343219,
+907194989,
+920172546,
+224516225,
+818625553,
+551143849,
+634570650,
+432966225,
+756438259,
+939564853,
+767999933
+},
+{
+884775648,
+394862257,
+446787794,
+219833788,
+727195727,
+728122304,
+249888353,
+732947974,
+289908868,
+448282580,
+618161877,
+898939716,
+739554163,
+860631799,
+1058977530,
+86916736
+},
+{
+143850006,
+352708694,
+200194048,
+979764914,
+629404175,
+546279766,
+72106714,
+860980514,
+313190585,
+897143111,
+308425797,
+953791785,
+349924906,
+221457005,
+950588925,
+908254505
+},
+{
+950032043,
+829868728,
+68623614,
+714624605,
+69760597,
+297275854,
+355894016,
+985369737,
+882852618,
+864071289,
+958512902,
+950910111,
+991368991,
+829645051,
+434698210,
+771350575
+},
+{
+552695074,
+319195551,
+80297396,
+496413831,
+944046531,
+621525571,
+617653363,
+416729825,
+441842808,
+9847464,
+99420657,
+1033914550,
+812966458,
+937053011,
+673390195,
+934577365
+},
+{
+1034695843,
+190969665,
+332900185,
+51897434,
+523888639,
+883512843,
+146908572,
+506785674,
+565814307,
+692255649,
+314052926,
+826386588,
+430691325,
+866927620,
+413880214,
+936474339
+},
+{
+129380164,
+741739952,
+1013703462,
+494392795,
+957214600,
+1010879043,
+931790677,
+94551922,
+988065869,
+120637871,
+882506912,
+395075379,
+210570485,
+812422692,
+910383687,
+817722285
+},
+{
+51850866,
+283408630,
+1053047202,
+858940389,
+818507731,
+477082181,
+353546901,
+993324368,
+407093779,
+231608253,
+1067319867,
+73159811,
+429792535,
+971320614,
+565699344,
+718823399
+},
+{
+408185106,
+491493570,
+596050720,
+310776444,
+703628192,
+454438809,
+523988035,
+728512200,
+686012353,
+976339656,
+72816924,
+116926720,
+165866591,
+452043792,
+866943072,
+968545481
+},
+{
+443231195,
+905907843,
+1061421320,
+746360489,
+1043120338,
+1069659155,
+463359031,
+688303227,
+186550710,
+155347339,
+1044842421,
+1005904570,
+69332909,
+706951903,
+422513657,
+882038450
+},
+{
+430990623,
+946501980,
+742556791,
+278398643,
+183759217,
+659404315,
+279754382,
+1069347846,
+843746517,
+222777670,
+990835599,
+548741637,
+129220580,
+1392170,
+1032654091,
+894058935
+},
+{
+452042227,
+751640705,
+259481376,
+765824585,
+145991469,
+1013683228,
+1055491225,
+536379588,
+392593350,
+913368594,
+1029429776,
+226857786,
+31505342,
+1054416381,
+32341741,
+687106649
+},
+{
+404750944,
+811417027,
+869530820,
+773491060,
+810901282,
+979340397,
+1036910290,
+461764404,
+834235095,
+765695033,
+604692390,
+452158120,
+928988098,
+442719218,
+1024059719,
+167723114
+},
+{
+974245177,
+1046377300,
+1003424287,
+787349855,
+336314155,
+875074696,
+1018462718,
+890313003,
+367376809,
+86355556,
+1020618772,
+890710345,
+444741481,
+373230261,
+767064947,
+840920177
+},
+{
+719581124,
+431808156,
+138301690,
+668222575,
+497413494,
+740492013,
+485033226,
+125301442,
+831265111,
+879071459,
+341690480,
+152975256,
+850330086,
+717444507,
+694225877,
+785340566
+},
+{
+1032766252,
+140959364,
+737474726,
+1062767538,
+364464647,
+331414723,
+356152634,
+642832379,
+158733632,
+374691640,
+285504811,
+345349905,
+876599880,
+476392727,
+479589210,
+606376325
+},
+{
+174997730,
+778177086,
+319164313,
+163614456,
+10331364,
+599358958,
+8331663,
+237538058,
+159173957,
+174533880,
+65588684,
+878222844,
+424467599,
+901803515,
+187504218,
+776690353
+},
+{
+803856182,
+965850321,
+694948067,
+218315960,
+358416571,
+683713254,
+178069303,
+428076035,
+686176454,
+579553217,
+357306738,
+315018080,
+886852373,
+568563910,
+896839725,
+257416821
+},
+{
+401650013,
+183289141,
+497957228,
+879734476,
+265024455,
+825794561,
+889237440,
+323359863,
+100258491,
+991414783,
+313986632,
+85847250,
+362520248,
+276103512,
+1041630342,
+525981595
+},
+{
+487732740,
+46201705,
+990837834,
+62744493,
+1067364756,
+58015363,
+690846283,
+680262648,
+997278956,
+469357861,
+432164624,
+996763915,
+211907847,
+167824295,
+144928194,
+454839915
+},
+{
+41404232,
+514493300,
+259546924,
+578217256,
+972345130,
+123299213,
+346040332,
+1014668104,
+520910639,
+579955198,
+36627803,
+179072921,
+547684341,
+598950511,
+269497394,
+854352266
+},
+{
+603906768,
+100863318,
+708837659,
+204175569,
+375560904,
+908375384,
+28314106,
+6303733,
+175283124,
+749851198,
+308667367,
+415293931,
+225365403,
+1032188331,
+977112710,
+819705229
+},
+{
+399767123,
+697985692,
+356790426,
+643687584,
+298624218,
+185095167,
+381653926,
+876816342,
+296720023,
+2205879,
+235816616,
+521850105,
+622753786,
+1021421218,
+726349744,
+256504902
+},
+{
+851245024,
+1022500222,
+511909628,
+313809625,
+99776025,
+39710175,
+798739932,
+741832408,
+140631966,
+898295927,
+607660421,
+870669312,
+1051422478,
+789055529,
+669113756,
+681943450
+},
+{
+853872755,
+491465269,
+503341472,
+98019440,
+258267420,
+335602837,
+320687824,
+1053324395,
+24932389,
+955011453,
+934255131,
+435625663,
+501568768,
+238967025,
+549987406,
+248619780
+},
+{
+411151284,
+576471205,
+757985419,
+544137226,
+968135693,
+877548443,
+194586894,
+74882373,
+248353663,
+21207540,
+273789651,
+853653916,
+861267970,
+533253322,
+3739570,
+661358586
+},
+{
+271430986,
+71390029,
+257643671,
+949329860,
+348156406,
+251939238,
+445808698,
+48269799,
+907589462,
+105677619,
+635451508,
+20805932,
+464874661,
+7542147,
+243619464,
+288304568
+},
+{
+368215982,
+530288964,
+770090421,
+660961164,
+614935537,
+630760399,
+931299233,
+794519275,
+779918979,
+401746493,
+561237006,
+1027202224,
+258968003,
+339508073,
+1050610516,
+1064307013
+},
+{
+1039172162,
+448331205,
+928997884,
+49813151,
+198712120,
+992335354,
+671024050,
+879525220,
+745915336,
+1038822580,
+138669665,
+917958819,
+681422342,
+792868818,
+924762727,
+816386174
+},
+{
+515190336,
+313808618,
+441296783,
+1022120897,
+792325033,
+354387581,
+59273006,
+280075434,
+411357221,
+665274694,
+4054464,
+1059046246,
+394261773,
+848616745,
+15446017,
+517723271
+}};
+
+
+class H3BloomFilter : public AbstractBloomFilter {
+public:
+
+ ~H3BloomFilter();
+ H3BloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ void print(ostream& out) const;
+
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ int operator[](const int index) const{
+ return this->m_filter[index];
+ }
+
+private:
+
+ int get_index(const Address& addr, int hashNumber);
+
+ int hash_H3(uint64 value, int index);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_num_hashes;
+ int m_filter_size_bits;
+
+ int m_par_filter_size;
+ int m_par_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+
+
+
+ int primes_list[6];// = {9323,11279,10247,30637,25717,43711};
+ int mults_list[6]; //= {255,29,51,3,77,43};
+ int adds_list[6]; //= {841,627,1555,241,7777,65391};
+
+ bool isParallel;
+
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/LRUPolicy.hh b/src/mem/ruby/system/LRUPolicy.hh
new file mode 100644
index 000000000..ea621bf4b
--- /dev/null
+++ b/src/mem/ruby/system/LRUPolicy.hh
@@ -0,0 +1,65 @@
+
+#ifndef LRUPOLICY_H
+#define LRUPOLICY_H
+
+#include "AbstractReplacementPolicy.hh"
+
+/* Simple true LRU replacement policy */
+
+class LRUPolicy : public AbstractReplacementPolicy {
+ public:
+
+ LRUPolicy(Index num_sets, Index assoc);
+ ~LRUPolicy();
+
+ void touch(Index set, Index way, Time time);
+ Index getVictim(Index set) const;
+};
+
+inline
+LRUPolicy::LRUPolicy(Index num_sets, Index assoc)
+ : AbstractReplacementPolicy(num_sets, assoc)
+{
+}
+
+inline
+LRUPolicy::~LRUPolicy()
+{
+}
+
+inline
+void LRUPolicy::touch(Index set, Index index, Time time){
+ assert(index >= 0 && index < m_assoc);
+ assert(set >= 0 && set < m_num_sets);
+
+ m_last_ref_ptr[set][index] = time;
+}
+
+inline
+Index LRUPolicy::getVictim(Index set) const {
+ // assert(m_assoc != 0);
+ Time time, smallest_time;
+ Index smallest_index;
+
+ smallest_index = 0;
+ smallest_time = m_last_ref_ptr[set][0];
+
+ for (unsigned int i=0; i < m_assoc; i++) {
+ time = m_last_ref_ptr[set][i];
+ //assert(m_cache[cacheSet][i].m_Permission != AccessPermission_NotPresent);
+
+ if (time < smallest_time){
+ smallest_index = i;
+ smallest_time = time;
+ }
+ }
+
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, cacheSet);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, smallest_index);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, m_cache[cacheSet][smallest_index]);
+ // DEBUG_EXPR(CACHE_COMP, MedPrio, *this);
+
+ return smallest_index;
+}
+
+#endif // PSEUDOLRUBITS_H
diff --git a/src/mem/ruby/system/LSB_CountingBloomFilter.cc b/src/mem/ruby/system/LSB_CountingBloomFilter.cc
new file mode 100644
index 000000000..ddfa97f5f
--- /dev/null
+++ b/src/mem/ruby/system/LSB_CountingBloomFilter.cc
@@ -0,0 +1,141 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * LSB_CountingBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "LSB_CountingBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+LSB_CountingBloomFilter::LSB_CountingBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, ':');
+
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_count = atoi(tail.c_str());
+ m_count_bits = log_int(m_count);
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+LSB_CountingBloomFilter::~LSB_CountingBloomFilter(){
+}
+
+void LSB_CountingBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void LSB_CountingBloomFilter::increment(const Address& addr)
+{
+ int i = get_index(addr);
+ if (m_filter[i] < m_count);
+ m_filter[i] += 1;
+}
+
+
+void LSB_CountingBloomFilter::decrement(const Address& addr)
+{
+ int i = get_index(addr);
+ if (m_filter[i] > 0)
+ m_filter[i] -= 1;
+}
+
+void LSB_CountingBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void LSB_CountingBloomFilter::set(const Address& addr)
+{
+ // TODO
+}
+
+void LSB_CountingBloomFilter::unset(const Address& addr)
+{
+ // TODO
+}
+
+bool LSB_CountingBloomFilter::isSet(const Address& addr)
+{
+ // TODO
+}
+
+
+int LSB_CountingBloomFilter::getCount(const Address& addr)
+{
+ return m_filter[get_index(addr)];
+}
+
+int LSB_CountingBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+int LSB_CountingBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+void LSB_CountingBloomFilter::print(ostream& out) const
+{
+}
+
+int LSB_CountingBloomFilter::readBit(const int index) {
+ return 0;
+ // TODO
+}
+
+void LSB_CountingBloomFilter::writeBit(const int index, const int value) {
+ // TODO
+}
+
+int LSB_CountingBloomFilter::get_index(const Address& addr)
+{
+ return addr.bitSelect( RubyConfig::dataBlockBits(), RubyConfig::dataBlockBits() + m_filter_size_bits - 1);
+}
+
+
diff --git a/src/mem/ruby/system/LSB_CountingBloomFilter.hh b/src/mem/ruby/system/LSB_CountingBloomFilter.hh
new file mode 100644
index 000000000..5b0cdc87c
--- /dev/null
+++ b/src/mem/ruby/system/LSB_CountingBloomFilter.hh
@@ -0,0 +1,83 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * LSB_CountingBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef LSB_COUNTING_BLOOM_FILTER_H
+#define LSB_COUNTING_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class LSB_CountingBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~LSB_CountingBloomFilter();
+ LSB_CountingBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_index(const Address& addr);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/MachineID.hh b/src/mem/ruby/system/MachineID.hh
new file mode 100644
index 000000000..2f294dc54
--- /dev/null
+++ b/src/mem/ruby/system/MachineID.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NodeID.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef MACHINEID_H
+#define MACHINEID_H
+
+#include "Global.hh"
+#include "util.hh"
+#include "MachineType.hh"
+
+struct MachineID {
+ MachineType type;
+ int num; // range: 0 ... number of this machine's components in the system - 1
+};
+
+extern inline
+string MachineIDToString (MachineID machine) {
+ return MachineType_to_string(machine.type)+"_"+int_to_string(machine.num);
+}
+
+extern inline
+bool operator==(const MachineID & obj1, const MachineID & obj2)
+{
+ return (obj1.type == obj2.type && obj1.num == obj2.num);
+}
+
+extern inline
+bool operator!=(const MachineID & obj1, const MachineID & obj2)
+{
+ return (obj1.type != obj2.type || obj1.num != obj2.num);
+}
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const MachineID& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MachineID& obj)
+{
+ if ((obj.type < MachineType_NUM) && (obj.type >= MachineType_FIRST)) {
+ out << MachineType_to_string(obj.type);
+ } else {
+ out << "NULL";
+ }
+ out << "-";
+ out << obj.num;
+ out << flush;
+ return out;
+}
+
+
+#endif //MACHINEID_H
diff --git a/src/mem/ruby/system/MemoryControl.cc b/src/mem/ruby/system/MemoryControl.cc
new file mode 100644
index 000000000..e9f8a5ca8
--- /dev/null
+++ b/src/mem/ruby/system/MemoryControl.cc
@@ -0,0 +1,632 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MemoryControl.C
+ *
+ * Description: This module simulates a basic DDR-style memory controller
+ * (and can easily be extended to do FB-DIMM as well).
+ *
+ * This module models a single channel, connected to any number of
+ * DIMMs with any number of ranks of DRAMs each. If you want multiple
+ * address/data channels, you need to instantiate multiple copies of
+ * this module.
+ *
+ * Each memory request is placed in a queue associated with a specific
+ * memory bank. This queue is of finite size; if the queue is full
+ * the request will back up in an (infinite) common queue and will
+ * effectively throttle the whole system. This sort of behavior is
+ * intended to be closer to real system behavior than if we had an
+ * infinite queue on each bank. If you want the latter, just make
+ * the bank queues unreasonably large.
+ *
+ * The head item on a bank queue is issued when all of the
+ * following are true:
+ * the bank is available
+ * the address path to the DIMM is available
+ * the data path to or from the DIMM is available
+ *
+ * Note that we are not concerned about fixed offsets in time. The bank
+ * will not be used at the same moment as the address path, but since
+ * there is no queue in the DIMM or the DRAM it will be used at a constant
+ * number of cycles later, so it is treated as if it is used at the same
+ * time.
+ *
+ * We are assuming closed bank policy; that is, we automatically close
+ * each bank after a single read or write. Adding an option for open
+ * bank policy is for future work.
+ *
+ * We are assuming "posted CAS"; that is, we send the READ or WRITE
+ * immediately after the ACTIVATE. This makes scheduling the address
+ * bus trivial; we always schedule a fixed set of cycles. For DDR-400,
+ * this is a set of two cycles; for some configurations such as
+ * DDR-800 the parameter tRRD forces this to be set to three cycles.
+ *
+ * We assume a four-bit-time transfer on the data wires. This is
+ * the minimum burst length for DDR-2. This would correspond
+ * to (for example) a memory where each DIMM is 72 bits wide
+ * and DIMMs are ganged in pairs to deliver 64 bytes at a shot.
+ * This gives us the same occupancy on the data wires as on the
+ * address wires (for the two-address-cycle case).
+ *
+ * The only non-trivial scheduling problem is the data wires.
+ * A write will use the wires earlier in the operation than a read
+ * will; typically one cycle earlier as seen at the DRAM, but earlier
+ * by a worst-case round-trip wire delay when seen at the memory controller.
+ * So, while reads from one rank can be scheduled back-to-back
+ * every two cycles, and writes (to any rank) scheduled every two cycles,
+ * when a read is followed by a write we need to insert a bubble.
+ * Furthermore, consecutive reads from two different ranks may need
+ * to insert a bubble due to skew between when one DRAM stops driving the
+ * wires and when the other one starts. (These bubbles are parameters.)
+ *
+ * This means that when some number of reads and writes are at the
+ * heads of their queues, reads could starve writes, and/or reads
+ * to the same rank could starve out other requests, since the others
+ * would never see the data bus ready.
+ * For this reason, we have implemented an anti-starvation feature.
+ * A group of requests is marked "old", and a counter is incremented
+ * each cycle as long as any request from that batch has not issued.
+ * if the counter reaches twice the bank busy time, we hold off any
+ * newer requests until all of the "old" requests have issued.
+ *
+ * We also model tFAW. This is an obscure DRAM parameter that says
+ * that no more than four activate requests can happen within a window
+ * of a certain size. For most configurations this does not come into play,
+ * or has very little effect, but it could be used to throttle the power
+ * consumption of the DRAM. In this implementation (unlike in a DRAM
+ * data sheet) TFAW is measured in memory bus cycles; i.e. if TFAW = 16
+ * then no more than four activates may happen within any 16 cycle window.
+ * Refreshes are included in the activates.
+ *
+ *
+ * $Id: $
+ *
+ */
+
+#include "Global.hh"
+#include "Map.hh"
+#include "Address.hh"
+#include "Profiler.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+#include "RubySlicc_ComponentMapping.hh"
+#include "NetworkMessage.hh"
+#include "Network.hh"
+
+#include "Consumer.hh"
+
+#include "MemoryControl.hh"
+
+#include <list>
+
+class Consumer;
+
+// Value to reset watchdog timer to.
+// If we're idle for this many memory control cycles,
+// shut down our clock (our rescheduling of ourselves).
+// Refresh shuts down as well.
+// When we restart, we'll be in a different phase
+// with respect to ruby cycles, so this introduces
+// a slight inaccuracy. But it is necessary or the
+// ruby tester never terminates because the event
+// queue is never empty.
+#define IDLECOUNT_MAX_VALUE 1000
+
+// Output operator definition
+
+ostream& operator<<(ostream& out, const MemoryControl& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+// CONSTRUCTOR
+
+MemoryControl::MemoryControl (AbstractChip* chip_ptr, int version) {
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+ m_msg_counter = 0;
+
+ m_debug = 0;
+ //if (m_version == 0) m_debug = 1;
+
+ m_mem_bus_cycle_multiplier = RubyConfig::memBusCycleMultiplier();
+ m_banks_per_rank = RubyConfig::banksPerRank();
+ m_ranks_per_dimm = RubyConfig::ranksPerDimm();
+ m_dimms_per_channel = RubyConfig::dimmsPerChannel();
+ m_bank_bit_0 = RubyConfig::bankBit0();
+ m_rank_bit_0 = RubyConfig::rankBit0();
+ m_dimm_bit_0 = RubyConfig::dimmBit0();
+ m_bank_queue_size = RubyConfig::bankQueueSize();
+ m_bank_busy_time = RubyConfig::bankBusyTime();
+ m_rank_rank_delay = RubyConfig::rankRankDelay();
+ m_read_write_delay = RubyConfig::readWriteDelay();
+ m_basic_bus_busy_time = RubyConfig::basicBusBusyTime();
+ m_mem_ctl_latency = RubyConfig::memCtlLatency();
+ m_refresh_period = RubyConfig::refreshPeriod();
+ m_memRandomArbitrate = RubyConfig::memRandomArbitrate();
+ m_tFaw = RubyConfig::tFaw();
+ m_memFixedDelay = RubyConfig::memFixedDelay();
+
+ assert(m_tFaw <= 62); // must fit in a uint64 shift register
+
+ m_total_banks = m_banks_per_rank * m_ranks_per_dimm * m_dimms_per_channel;
+ m_total_ranks = m_ranks_per_dimm * m_dimms_per_channel;
+ m_refresh_period_system = m_refresh_period / m_total_banks;
+
+ m_bankQueues = new list<MemoryNode> [m_total_banks];
+ assert(m_bankQueues);
+
+ m_bankBusyCounter = new int [m_total_banks];
+ assert(m_bankBusyCounter);
+
+ m_oldRequest = new int [m_total_banks];
+ assert(m_oldRequest);
+
+ for (int i=0; i<m_total_banks; i++) {
+ m_bankBusyCounter[i] = 0;
+ m_oldRequest[i] = 0;
+ }
+
+ m_busBusyCounter_Basic = 0;
+ m_busBusyCounter_Write = 0;
+ m_busBusyCounter_ReadNewRank = 0;
+ m_busBusy_WhichRank = 0;
+
+ m_roundRobin = 0;
+ m_refresh_count = 1;
+ m_need_refresh = 0;
+ m_refresh_bank = 0;
+ m_awakened = 0;
+ m_idleCount = 0;
+ m_ageCounter = 0;
+
+ // Each tfaw shift register keeps a moving bit pattern
+ // which shows when recent activates have occurred.
+ // m_tfaw_count keeps track of how many 1 bits are set
+ // in each shift register. When m_tfaw_count is >= 4,
+ // new activates are not allowed.
+ m_tfaw_shift = new uint64 [m_total_ranks];
+ m_tfaw_count = new int [m_total_ranks];
+ for (int i=0; i<m_total_ranks; i++) {
+ m_tfaw_shift[i] = 0;
+ m_tfaw_count[i] = 0;
+ }
+}
+
+
+// DESTRUCTOR
+
+MemoryControl::~MemoryControl () {
+ delete [] m_bankQueues;
+ delete [] m_bankBusyCounter;
+ delete [] m_oldRequest;
+}
+
+
+// PUBLIC METHODS
+
+// enqueue new request from directory
+
+void MemoryControl::enqueue (const MsgPtr& message, int latency) {
+ Time current_time = g_eventQueue_ptr->getTime();
+ Time arrival_time = current_time + latency;
+ const MemoryMsg* memMess = dynamic_cast<const MemoryMsg*>(message.ref());
+ physical_address_t addr = memMess->getAddress().getAddress();
+ MemoryRequestType type = memMess->getType();
+ bool is_mem_read = (type == MemoryRequestType_MEMORY_READ);
+ MemoryNode thisReq(arrival_time, message, addr, is_mem_read, !is_mem_read);
+ enqueueMemRef(thisReq);
+}
+
+// Alternate entry point used when we already have a MemoryNode structure built.
+
+void MemoryControl::enqueueMemRef (MemoryNode& memRef) {
+ m_msg_counter++;
+ memRef.m_msg_counter = m_msg_counter;
+ Time arrival_time = memRef.m_time;
+ uint64 at = arrival_time;
+ bool is_mem_read = memRef.m_is_mem_read;
+ bool dirtyWB = memRef.m_is_dirty_wb;
+ physical_address_t addr = memRef.m_addr;
+ int bank = getBank(addr);
+ if (m_debug) {
+ printf("New memory request%7d: 0x%08llx %c arrived at %10lld ", m_msg_counter, addr, is_mem_read? 'R':'W', at);
+ printf("bank =%3x\n", bank);
+ }
+ g_system_ptr->getProfiler()->profileMemReq(bank);
+ m_input_queue.push_back(memRef);
+ if (!m_awakened) {
+ g_eventQueue_ptr->scheduleEvent(this, 1);
+ m_awakened = 1;
+ }
+}
+
+
+
+// dequeue, peek, and isReady are used to transfer completed requests
+// back to the directory
+
+void MemoryControl::dequeue () {
+ assert(isReady());
+ m_response_queue.pop_front();
+}
+
+
+const Message* MemoryControl::peek () {
+ MemoryNode node = peekNode();
+ Message* msg_ptr = node.m_msgptr.ref();
+ assert(msg_ptr != NULL);
+ return msg_ptr;
+}
+
+
+MemoryNode MemoryControl::peekNode () {
+ assert(isReady());
+ MemoryNode req = m_response_queue.front();
+ uint64 returnTime = req.m_time;
+ if (m_debug) {
+ printf("Old memory request%7d: 0x%08llx %c peeked at %10lld\n",
+ req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', returnTime);
+ }
+ return req;
+}
+
+
+bool MemoryControl::isReady () {
+ return ((!m_response_queue.empty()) &&
+ (m_response_queue.front().m_time <= g_eventQueue_ptr->getTime()));
+}
+
+void MemoryControl::setConsumer (Consumer* consumer_ptr) {
+ m_consumer_ptr = consumer_ptr;
+}
+
+void MemoryControl::print (ostream& out) const {
+}
+
+
+void MemoryControl::printConfig (ostream& out) {
+ out << "Memory Control " << m_version << ":" << endl;
+ out << " Ruby cycles per memory cycle: " << m_mem_bus_cycle_multiplier << endl;
+ out << " Basic read latency: " << m_mem_ctl_latency << endl;
+ if (m_memFixedDelay) {
+ out << " Fixed Latency mode: Added cycles = " << m_memFixedDelay << endl;
+ } else {
+ out << " Bank busy time: " << BANK_BUSY_TIME << " memory cycles" << endl;
+ out << " Memory channel busy time: " << m_basic_bus_busy_time << endl;
+ out << " Dead cycles between reads to different ranks: " << m_rank_rank_delay << endl;
+ out << " Dead cycle between a read and a write: " << m_read_write_delay << endl;
+ out << " tFaw (four-activate) window: " << m_tFaw << endl;
+ }
+ out << " Banks per rank: " << m_banks_per_rank << endl;
+ out << " Ranks per DIMM: " << m_ranks_per_dimm << endl;
+ out << " DIMMs per channel: " << m_dimms_per_channel << endl;
+ out << " LSB of bank field in address: " << m_bank_bit_0 << endl;
+ out << " LSB of rank field in address: " << m_rank_bit_0 << endl;
+ out << " LSB of DIMM field in address: " << m_dimm_bit_0 << endl;
+ out << " Max size of each bank queue: " << m_bank_queue_size << endl;
+ out << " Refresh period (within one bank): " << m_refresh_period << endl;
+ out << " Arbitration randomness: " << m_memRandomArbitrate << endl;
+}
+
+
+void MemoryControl::setDebug (int debugFlag) {
+ m_debug = debugFlag;
+}
+
+
+// ****************************************************************
+
+// PRIVATE METHODS
+
+// Queue up a completed request to send back to directory
+
+void MemoryControl::enqueueToDirectory (MemoryNode req, int latency) {
+ Time arrival_time = g_eventQueue_ptr->getTime()
+ + (latency * m_mem_bus_cycle_multiplier);
+ req.m_time = arrival_time;
+ m_response_queue.push_back(req);
+
+ // schedule the wake up
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, arrival_time);
+}
+
+
+
+// getBank returns an integer that is unique for each
+// bank across this memory controller.
+
+int MemoryControl::getBank (physical_address_t addr) {
+ int dimm = (addr >> m_dimm_bit_0) & (m_dimms_per_channel - 1);
+ int rank = (addr >> m_rank_bit_0) & (m_ranks_per_dimm - 1);
+ int bank = (addr >> m_bank_bit_0) & (m_banks_per_rank - 1);
+ return (dimm * m_ranks_per_dimm * m_banks_per_rank)
+ + (rank * m_banks_per_rank)
+ + bank;
+}
+
+// getRank returns an integer that is unique for each rank
+// and independent of individual bank.
+
+int MemoryControl::getRank (int bank) {
+ int rank = (bank / m_banks_per_rank);
+ assert (rank < (m_ranks_per_dimm * m_dimms_per_channel));
+ return rank;
+}
+
+
+// queueReady determines if the head item in a bank queue
+// can be issued this cycle
+
+bool MemoryControl::queueReady (int bank) {
+ if ((m_bankBusyCounter[bank] > 0) && !m_memFixedDelay) {
+ g_system_ptr->getProfiler()->profileMemBankBusy();
+ //if (m_debug) printf(" bank %x busy %d\n", bank, m_bankBusyCounter[bank]);
+ return false;
+ }
+ if (m_memRandomArbitrate >= 2) {
+ if ((random() % 100) < m_memRandomArbitrate) {
+ g_system_ptr->getProfiler()->profileMemRandBusy();
+ return false;
+ }
+ }
+ if (m_memFixedDelay) return true;
+ if ((m_ageCounter > (2 * m_bank_busy_time)) && !m_oldRequest[bank]) {
+ g_system_ptr->getProfiler()->profileMemNotOld();
+ return false;
+ }
+ if (m_busBusyCounter_Basic == m_basic_bus_busy_time) {
+ // Another bank must have issued this same cycle.
+ // For profiling, we count this as an arb wait rather than
+ // a bus wait. This is a little inaccurate since it MIGHT
+ // have also been blocked waiting for a read-write or a
+ // read-read instead, but it's pretty close.
+ g_system_ptr->getProfiler()->profileMemArbWait(1);
+ return false;
+ }
+ if (m_busBusyCounter_Basic > 0) {
+ g_system_ptr->getProfiler()->profileMemBusBusy();
+ return false;
+ }
+ int rank = getRank(bank);
+ if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) {
+ g_system_ptr->getProfiler()->profileMemTfawBusy();
+ return false;
+ }
+ bool write = !m_bankQueues[bank].front().m_is_mem_read;
+ if (write && (m_busBusyCounter_Write > 0)) {
+ g_system_ptr->getProfiler()->profileMemReadWriteBusy();
+ return false;
+ }
+ if (!write && (rank != m_busBusy_WhichRank)
+ && (m_busBusyCounter_ReadNewRank > 0)) {
+ g_system_ptr->getProfiler()->profileMemDataBusBusy();
+ return false;
+ }
+ return true;
+}
+
+
+// issueRefresh checks to see if this bank has a refresh scheduled
+// and, if so, does the refresh and returns true
+
+bool MemoryControl::issueRefresh (int bank) {
+ if (!m_need_refresh || (m_refresh_bank != bank)) return false;
+ if (m_bankBusyCounter[bank] > 0) return false;
+ // Note that m_busBusyCounter will prevent multiple issues during
+ // the same cycle, as well as on different but close cycles:
+ if (m_busBusyCounter_Basic > 0) return false;
+ int rank = getRank(bank);
+ if (m_tfaw_count[rank] >= ACTIVATE_PER_TFAW) return false;
+
+ // Issue it:
+
+ //if (m_debug) {
+ //uint64 current_time = g_eventQueue_ptr->getTime();
+ //printf(" Refresh bank %3x at %lld\n", bank, current_time);
+ //}
+ g_system_ptr->getProfiler()->profileMemRefresh();
+ m_need_refresh--;
+ m_refresh_bank++;
+ if (m_refresh_bank >= m_total_banks) m_refresh_bank = 0;
+ m_bankBusyCounter[bank] = m_bank_busy_time;
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
+ markTfaw(rank);
+ return true;
+}
+
+
+// Mark the activate in the tFaw shift register
+void MemoryControl::markTfaw (int rank) {
+ if (m_tFaw) {
+ m_tfaw_shift[rank] |= (1 << (m_tFaw-1));
+ m_tfaw_count[rank]++;
+ }
+}
+
+
+// Issue a memory request: Activate the bank,
+// reserve the address and data buses, and queue
+// the request for return to the requesting
+// processor after a fixed latency.
+
+void MemoryControl::issueRequest (int bank) {
+ int rank = getRank(bank);
+ MemoryNode req = m_bankQueues[bank].front();
+ m_bankQueues[bank].pop_front();
+ if (m_debug) {
+ uint64 current_time = g_eventQueue_ptr->getTime();
+ printf(" Mem issue request%7d: 0x%08llx %c at %10lld bank =%3x\n",
+ req.m_msg_counter, req.m_addr, req.m_is_mem_read? 'R':'W', current_time, bank);
+ }
+ if (req.m_msgptr.ref() != NULL) { // don't enqueue L3 writebacks
+ enqueueToDirectory(req, m_mem_ctl_latency + m_memFixedDelay);
+ }
+ m_oldRequest[bank] = 0;
+ markTfaw(rank);
+ m_bankBusyCounter[bank] = m_bank_busy_time;
+ m_busBusy_WhichRank = rank;
+ if (req.m_is_mem_read) {
+ g_system_ptr->getProfiler()->profileMemRead();
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time + m_read_write_delay;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time + m_rank_rank_delay;
+ } else {
+ g_system_ptr->getProfiler()->profileMemWrite();
+ m_busBusyCounter_Basic = m_basic_bus_busy_time;
+ m_busBusyCounter_Write = m_basic_bus_busy_time;
+ m_busBusyCounter_ReadNewRank = m_basic_bus_busy_time;
+ }
+}
+
+
+// executeCycle: This function is called once per memory clock cycle
+// to simulate all the periodic hardware.
+
+void MemoryControl::executeCycle () {
+ // Keep track of time by counting down the busy counters:
+ for (int bank=0; bank < m_total_banks; bank++) {
+ if (m_bankBusyCounter[bank] > 0) m_bankBusyCounter[bank]--;
+ }
+ if (m_busBusyCounter_Write > 0) m_busBusyCounter_Write--;
+ if (m_busBusyCounter_ReadNewRank > 0) m_busBusyCounter_ReadNewRank--;
+ if (m_busBusyCounter_Basic > 0) m_busBusyCounter_Basic--;
+
+ // Count down the tFAW shift registers:
+ for (int rank=0; rank < m_total_ranks; rank++) {
+ if (m_tfaw_shift[rank] & 1) m_tfaw_count[rank]--;
+ m_tfaw_shift[rank] >>= 1;
+ }
+
+ // After time period expires, latch an indication that we need a refresh.
+ // Disable refresh if in memFixedDelay mode.
+ if (!m_memFixedDelay) m_refresh_count--;
+ if (m_refresh_count == 0) {
+ m_refresh_count = m_refresh_period_system;
+ assert (m_need_refresh < 10); // Are we overrunning our ability to refresh?
+ m_need_refresh++;
+ }
+
+ // If this batch of requests is all done, make a new batch:
+ m_ageCounter++;
+ int anyOld = 0;
+ for (int bank=0; bank < m_total_banks; bank++) {
+ anyOld |= m_oldRequest[bank];
+ }
+ if (!anyOld) {
+ for (int bank=0; bank < m_total_banks; bank++) {
+ if (!m_bankQueues[bank].empty()) m_oldRequest[bank] = 1;
+ }
+ m_ageCounter = 0;
+ }
+
+ // If randomness desired, re-randomize round-robin position each cycle
+ if (m_memRandomArbitrate) {
+ m_roundRobin = random() % m_total_banks;
+ }
+
+
+ // For each channel, scan round-robin, and pick an old, ready
+ // request and issue it. Treat a refresh request as if it
+ // were at the head of its bank queue. After we issue something,
+ // keep scanning the queues just to gather statistics about
+ // how many are waiting. If in memFixedDelay mode, we can issue
+ // more than one request per cycle.
+
+ int queueHeads = 0;
+ int banksIssued = 0;
+ for (int i = 0; i < m_total_banks; i++) {
+ m_roundRobin++;
+ if (m_roundRobin >= m_total_banks) m_roundRobin = 0;
+ issueRefresh(m_roundRobin);
+ int qs = m_bankQueues[m_roundRobin].size();
+ if (qs > 1) {
+ g_system_ptr->getProfiler()->profileMemBankQ(qs-1);
+ }
+ if (qs > 0) {
+ m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is queued
+ queueHeads++;
+ if (queueReady(m_roundRobin)) {
+ issueRequest(m_roundRobin);
+ banksIssued++;
+ if (m_memFixedDelay) {
+ g_system_ptr->getProfiler()->profileMemWaitCycles(m_memFixedDelay);
+ }
+ }
+ }
+ }
+
+ // memWaitCycles is a redundant catch-all for the specific counters in queueReady
+ g_system_ptr->getProfiler()->profileMemWaitCycles(queueHeads - banksIssued);
+
+ // Check input queue and move anything to bank queues if not full.
+ // Since this is done here at the end of the cycle, there will always
+ // be at least one cycle of latency in the bank queue.
+ // We deliberately move at most one request per cycle (to simulate
+ // typical hardware). Note that if one bank queue fills up, other
+ // requests can get stuck behind it here.
+
+ if (!m_input_queue.empty()) {
+ m_idleCount = IDLECOUNT_MAX_VALUE; // we're not idle if anything is pending
+ MemoryNode req = m_input_queue.front();
+ int bank = getBank(req.m_addr);
+ if (m_bankQueues[bank].size() < m_bank_queue_size) {
+ m_input_queue.pop_front();
+ m_bankQueues[bank].push_back(req);
+ }
+ g_system_ptr->getProfiler()->profileMemInputQ(m_input_queue.size());
+ }
+}
+
+
+// wakeup: This function is called once per memory controller clock cycle.
+
+void MemoryControl::wakeup () {
+
+ // execute everything
+ executeCycle();
+
+ m_idleCount--;
+ if (m_idleCount <= 0) {
+ m_awakened = 0;
+ } else {
+ // Reschedule ourselves so that we run every memory cycle:
+ g_eventQueue_ptr->scheduleEvent(this, m_mem_bus_cycle_multiplier);
+ }
+}
+
+
diff --git a/src/mem/ruby/system/MemoryControl.hh b/src/mem/ruby/system/MemoryControl.hh
new file mode 100644
index 000000000..ee71b8f51
--- /dev/null
+++ b/src/mem/ruby/system/MemoryControl.hh
@@ -0,0 +1,176 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MemoryControl.h
+ *
+ * Description: See MemoryControl.C
+ *
+ * $Id: $
+ *
+ */
+
+#ifndef MEMORY_CONTROL_H
+#define MEMORY_CONTROL_H
+
+#include "Global.hh"
+#include "Map.hh"
+#include "Address.hh"
+#include "Profiler.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+#include "Message.hh"
+#include "util.hh"
+#include "MemoryNode.hh"
+// Note that "MemoryMsg" is in the "generated" directory:
+#include "MemoryMsg.hh"
+#include "Consumer.hh"
+#include "AbstractMemOrCache.hh"
+
+#include <list>
+
+// This constant is part of the definition of tFAW; see
+// the comments in header to MemoryControl.C
+#define ACTIVATE_PER_TFAW 4
+
+//////////////////////////////////////////////////////////////////////////////
+
+class Consumer;
+
+class MemoryControl : public Consumer, public AbstractMemOrCache {
+public:
+
+ // Constructors
+ MemoryControl (AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~MemoryControl ();
+
+ // Public Methods
+
+ void wakeup() ;
+
+ void setConsumer (Consumer* consumer_ptr);
+ Consumer* getConsumer () { return m_consumer_ptr; };
+ void setDescription (const string& name) { m_name = name; };
+ string getDescription () { return m_name; };
+
+ // Called from the directory:
+ void enqueue (const MsgPtr& message, int latency );
+ void enqueueMemRef (MemoryNode& memRef);
+ void dequeue ();
+ const Message* peek ();
+ MemoryNode peekNode ();
+ bool isReady();
+ bool areNSlotsAvailable (int n) { return true; }; // infinite queue length
+
+ //// Called from L3 cache:
+ //void writeBack(physical_address_t addr);
+
+ void printConfig (ostream& out);
+ void print (ostream& out) const;
+ void setDebug (int debugFlag);
+
+private:
+
+ void enqueueToDirectory (MemoryNode req, int latency);
+ int getBank (physical_address_t addr);
+ int getRank (int bank);
+ bool queueReady (int bank);
+ void issueRequest (int bank);
+ bool issueRefresh (int bank);
+ void markTfaw (int rank);
+ void executeCycle ();
+
+ // Private copy constructor and assignment operator
+ MemoryControl (const MemoryControl& obj);
+ MemoryControl& operator=(const MemoryControl& obj);
+
+ // data members
+ AbstractChip* m_chip_ptr;
+ Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
+ string m_name;
+ int m_version;
+ int m_msg_counter;
+ int m_awakened;
+
+ int m_mem_bus_cycle_multiplier;
+ int m_banks_per_rank;
+ int m_ranks_per_dimm;
+ int m_dimms_per_channel;
+ int m_bank_bit_0;
+ int m_rank_bit_0;
+ int m_dimm_bit_0;
+ unsigned int m_bank_queue_size;
+ int m_bank_busy_time;
+ int m_rank_rank_delay;
+ int m_read_write_delay;
+ int m_basic_bus_busy_time;
+ int m_mem_ctl_latency;
+ int m_refresh_period;
+ int m_memRandomArbitrate;
+ int m_tFaw;
+ int m_memFixedDelay;
+
+ int m_total_banks;
+ int m_total_ranks;
+ int m_refresh_period_system;
+
+ // queues where memory requests live
+
+ list<MemoryNode> m_response_queue;
+ list<MemoryNode> m_input_queue;
+ list<MemoryNode>* m_bankQueues;
+
+ // Each entry indicates number of address-bus cycles until bank
+ // is reschedulable:
+ int* m_bankBusyCounter;
+ int* m_oldRequest;
+
+ uint64* m_tfaw_shift;
+ int* m_tfaw_count;
+
+ // Each of these indicates number of address-bus cycles until
+ // we can issue a new request of the corresponding type:
+ int m_busBusyCounter_Write;
+ int m_busBusyCounter_ReadNewRank;
+ int m_busBusyCounter_Basic;
+
+ int m_busBusy_WhichRank; // which rank last granted
+ int m_roundRobin; // which bank queue was last granted
+ int m_refresh_count; // cycles until next refresh
+ int m_need_refresh; // set whenever m_refresh_count goes to zero
+ int m_refresh_bank; // which bank to refresh next
+ int m_ageCounter; // age of old requests; to detect starvation
+ int m_idleCount; // watchdog timer for shutting down
+ int m_debug; // turn on printf's
+};
+
+#endif // MEMORY_CONTROL_H
+
diff --git a/src/mem/ruby/system/MemoryNode.cc b/src/mem/ruby/system/MemoryNode.cc
new file mode 100644
index 000000000..5cba14eff
--- /dev/null
+++ b/src/mem/ruby/system/MemoryNode.cc
@@ -0,0 +1,37 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * MemoryNode.C
+ *
+ * Description: See MemoryNode.h
+ *
+ * $Id: MemoryNode.C 1.3 04/08/04 14:15:38-05:00 beckmann@c2-141.cs.wisc.edu $
+ *
+ */
+
+#include "MemoryNode.hh"
+
+void MemoryNode::print(ostream& out) const
+{
+ out << "[";
+ out << m_time << ", ";
+ out << m_msg_counter << ", ";
+ out << m_msgptr << "; ";
+ out << "]";
+}
diff --git a/src/mem/ruby/system/MemoryNode.hh b/src/mem/ruby/system/MemoryNode.hh
new file mode 100644
index 000000000..1ed3968bb
--- /dev/null
+++ b/src/mem/ruby/system/MemoryNode.hh
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 1999 by Mark Hill and David Wood for the Wisconsin
+ * Multifacet Project. ALL RIGHTS RESERVED.
+ *
+ * ##HEADER##
+ *
+ * This software is furnished under a license and may be used and
+ * copied only in accordance with the terms of such license and the
+ * inclusion of the above copyright notice. This software or any
+ * other copies thereof or any derivative works may not be provided or
+ * otherwise made available to any other persons. Title to and
+ * ownership of the software is retained by Mark Hill and David Wood.
+ * Any use of this software must include the above copyright notice.
+ *
+ * THIS SOFTWARE IS PROVIDED "AS IS". THE LICENSOR MAKES NO
+ * WARRANTIES ABOUT ITS CORRECTNESS OR PERFORMANCE.
+ * */
+
+/*
+ * EventQueueNode.h
+ *
+ * Description:
+ * This structure records everything known about a single
+ * memory request that is queued in the memory controller.
+ * It is created when the memory request first arrives
+ * at a memory controller and is deleted when the underlying
+ * message is enqueued to be sent back to the directory.
+ *
+ * $Id: MemoryNode.h,v 3.3 2003/12/04 15:01:34 xu Exp $
+ *
+ */
+
+#ifndef MEMORYNODE_H
+#define MEMORYNODE_H
+
+#include "Global.hh"
+#include "Message.hh"
+#include "MemoryRequestType.hh"
+
+class MemoryNode {
+
+public:
+ // Constructors
+
+// old one:
+ MemoryNode(const Time& time, int counter, const MsgPtr& msgptr, const physical_address_t addr, const bool is_mem_read) {
+ m_time = time;
+ m_msg_counter = counter;
+ m_msgptr = msgptr;
+ m_addr = addr;
+ m_is_mem_read = is_mem_read;
+ m_is_dirty_wb = !is_mem_read;
+ }
+
+// new one:
+ MemoryNode(const Time& time, const MsgPtr& msgptr, const physical_address_t addr, const bool is_mem_read, const bool is_dirty_wb) {
+ m_time = time;
+ m_msg_counter = 0;
+ m_msgptr = msgptr;
+ m_addr = addr;
+ m_is_mem_read = is_mem_read;
+ m_is_dirty_wb = is_dirty_wb;
+ }
+
+ // Destructor
+ ~MemoryNode() {};
+
+ // Public Methods
+ void print(ostream& out) const;
+
+ // Data Members (m_ prefix) (all public -- this is really more a struct)
+
+ Time m_time;
+ int m_msg_counter;
+ MsgPtr m_msgptr;
+ physical_address_t m_addr;
+ bool m_is_mem_read;
+ bool m_is_dirty_wb;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const MemoryNode& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const MemoryNode& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //MEMORYNODE_H
diff --git a/src/mem/ruby/system/MultiBitSelBloomFilter.cc b/src/mem/ruby/system/MultiBitSelBloomFilter.cc
new file mode 100644
index 000000000..a42463d1e
--- /dev/null
+++ b/src/mem/ruby/system/MultiBitSelBloomFilter.cc
@@ -0,0 +1,191 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "MultiBitSelBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+MultiBitSelBloomFilter::MultiBitSelBloomFilter(string str)
+{
+
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ // head contains filter size, tail contains bit offset from block number
+ m_filter_size = atoi(head.c_str());
+
+ head = string_split(tail, '_');
+ m_num_hashes = atoi(head.c_str());
+
+ head = string_split(tail, '_');
+ m_skip_bits = atoi(head.c_str());
+
+ if(tail == "Regular") {
+ isParallel = false;
+ } else if (tail == "Parallel") {
+ isParallel = true;
+ } else {
+ cout << "ERROR: Incorrect config string for MultiBitSel Bloom! :" << str << endl;
+ assert(0);
+ }
+
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_par_filter_size = m_filter_size/m_num_hashes;
+ m_par_filter_size_bits = log_int(m_par_filter_size);
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+MultiBitSelBloomFilter::~MultiBitSelBloomFilter(){
+}
+
+void MultiBitSelBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void MultiBitSelBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void MultiBitSelBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void MultiBitSelBloomFilter::merge(AbstractBloomFilter * other_filter){
+ // assumes both filters are the same size!
+ MultiBitSelBloomFilter * temp = (MultiBitSelBloomFilter*) other_filter;
+ for(int i=0; i < m_filter_size; ++i){
+ m_filter[i] |= (*temp)[i];
+ }
+
+}
+
+void MultiBitSelBloomFilter::set(const Address& addr)
+{
+ for (int i = 0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ m_filter[idx] = 1;
+
+ //Profile hash value distribution
+ //g_system_ptr->getProfiler()->getXactProfiler()->profileHashValue(i, idx); //gem5:Arka for decomissioning of log_tm
+ }
+}
+
+void MultiBitSelBloomFilter::unset(const Address& addr)
+{
+ cout << "ERROR: Unset should never be called in a Bloom filter";
+ assert(0);
+}
+
+bool MultiBitSelBloomFilter::isSet(const Address& addr)
+{
+ bool res = true;
+
+ for (int i=0; i < m_num_hashes; i++) {
+ int idx = get_index(addr, i);
+ res = res && m_filter[idx];
+ }
+ return res;
+}
+
+
+int MultiBitSelBloomFilter::getCount(const Address& addr)
+{
+ return isSet(addr)? 1: 0;
+}
+
+int MultiBitSelBloomFilter::getIndex(const Address& addr)
+{
+ return 0;
+}
+
+int MultiBitSelBloomFilter::readBit(const int index) {
+ return 0;
+}
+
+void MultiBitSelBloomFilter::writeBit(const int index, const int value) {
+
+}
+
+int MultiBitSelBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+void MultiBitSelBloomFilter::print(ostream& out) const
+{
+}
+
+int MultiBitSelBloomFilter::get_index(const Address& addr, int i)
+{
+ // m_skip_bits is used to perform BitSelect after skipping some bits. Used to simulate BitSel hashing on larger than cache-line granularities
+ uint64 x = (addr.getLineAddress()) >> m_skip_bits;
+ int y = hash_bitsel(x, i, m_num_hashes, 30, m_filter_size_bits);
+ //36-bit addresses, 6-bit cache lines
+
+ if(isParallel) {
+ return (y % m_par_filter_size) + i*m_par_filter_size;
+ } else {
+ return y % m_filter_size;
+ }
+}
+
+
+int MultiBitSelBloomFilter::hash_bitsel(uint64 value, int index, int jump, int maxBits, int numBits) {
+ uint64 mask = 1;
+ int result = 0;
+ int bit, i;
+
+ for(i = 0; i < numBits; i++) {
+ bit = (index + jump*i) % maxBits;
+ if (value & (mask << bit)) result += mask << i;
+ }
+ return result;
+}
diff --git a/src/mem/ruby/system/MultiBitSelBloomFilter.hh b/src/mem/ruby/system/MultiBitSelBloomFilter.hh
new file mode 100644
index 000000000..eaf4ff943
--- /dev/null
+++ b/src/mem/ruby/system/MultiBitSelBloomFilter.hh
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MultiBitSelBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef MULTIBITSEL_BLOOM_FILTER_H
+#define MULTIBITSEL_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+#include "Profiler.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class MultiBitSelBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~MultiBitSelBloomFilter();
+ MultiBitSelBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ void print(ostream& out) const;
+
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ int operator[](const int index) const{
+ return this->m_filter[index];
+ }
+
+private:
+
+ int get_index(const Address& addr, int hashNumber);
+
+ int hash_bitsel(uint64 value, int index, int jump, int maxBits, int numBits);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_num_hashes;
+ int m_filter_size_bits;
+ int m_skip_bits;
+
+ int m_par_filter_size;
+ int m_par_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+
+ bool isParallel;
+
+};
+
+#endif
diff --git a/src/mem/ruby/system/MultiGrainBloomFilter.cc b/src/mem/ruby/system/MultiGrainBloomFilter.cc
new file mode 100644
index 000000000..f1e110b12
--- /dev/null
+++ b/src/mem/ruby/system/MultiGrainBloomFilter.cc
@@ -0,0 +1,172 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MultiGrainBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "MultiGrainBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+MultiGrainBloomFilter::MultiGrainBloomFilter(string str)
+{
+ string tail(str);
+
+ // split into the 2 filter sizes
+ string head = string_split(tail, '_');
+
+ // head contains size of 1st bloom filter, tail contains size of 2nd bloom filter
+
+ m_filter_size = atoi(head.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+ m_page_filter_size = atoi(tail.c_str());
+ m_page_filter_size_bits = log_int(m_page_filter_size);
+
+ m_filter.setSize(m_filter_size);
+ m_page_filter.setSize(m_page_filter_size);
+ clear();
+}
+
+MultiGrainBloomFilter::~MultiGrainBloomFilter(){
+}
+
+void MultiGrainBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+ for(int i=0; i < m_page_filter_size; ++i){
+ m_page_filter[i] = 0;
+ }
+}
+
+void MultiGrainBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void MultiGrainBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void MultiGrainBloomFilter::merge(AbstractBloomFilter * other_filter)
+{
+ // TODO
+}
+
+void MultiGrainBloomFilter::set(const Address& addr)
+{
+ int i = get_block_index(addr);
+ int j = get_page_index(addr);
+ assert(i < m_filter_size);
+ assert(j < m_page_filter_size);
+ m_filter[i] = 1;
+ m_page_filter[i] = 1;
+
+}
+
+void MultiGrainBloomFilter::unset(const Address& addr)
+{
+ // not used
+}
+
+bool MultiGrainBloomFilter::isSet(const Address& addr)
+{
+ int i = get_block_index(addr);
+ int j = get_page_index(addr);
+ assert(i < m_filter_size);
+ assert(j < m_page_filter_size);
+ // we have to have both indices set
+ return (m_filter[i] && m_page_filter[i]);
+}
+
+int MultiGrainBloomFilter::getCount(const Address& addr)
+{
+ // not used
+ return 0;
+}
+
+int MultiGrainBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+
+ for(int i=0; i < m_page_filter_size; ++i){
+ count += m_page_filter[i] = 0;
+ }
+
+ return count;
+}
+
+int MultiGrainBloomFilter::getIndex(const Address& addr)
+{
+ return 0;
+ // TODO
+}
+
+int MultiGrainBloomFilter::readBit(const int index) {
+ return 0;
+ // TODO
+}
+
+void MultiGrainBloomFilter::writeBit(const int index, const int value) {
+ // TODO
+}
+
+void MultiGrainBloomFilter::print(ostream& out) const
+{
+}
+
+int MultiGrainBloomFilter::get_block_index(const Address& addr)
+{
+ // grap a chunk of bits after byte offset
+ return addr.bitSelect( RubyConfig::dataBlockBits(), RubyConfig::dataBlockBits() + m_filter_size_bits - 1);
+}
+
+int MultiGrainBloomFilter::get_page_index(const Address & addr)
+{
+ // grap a chunk of bits after first chunk
+ return addr.bitSelect( RubyConfig::dataBlockBits() + m_filter_size_bits - 1,
+ RubyConfig::dataBlockBits() + m_filter_size_bits - 1 + m_page_filter_size_bits - 1);
+}
+
+
+
+
diff --git a/src/mem/ruby/system/MultiGrainBloomFilter.hh b/src/mem/ruby/system/MultiGrainBloomFilter.hh
new file mode 100644
index 000000000..692960853
--- /dev/null
+++ b/src/mem/ruby/system/MultiGrainBloomFilter.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * MultiGrainBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef MULTIGRAIN_BLOOM_FILTER_H
+#define MULTIGRAIN_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class MultiGrainBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~MultiGrainBloomFilter();
+ MultiGrainBloomFilter(string str);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+private:
+
+ int get_block_index(const Address& addr);
+ int get_page_index(const Address & addr);
+
+ // The block filter
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_filter_size_bits;
+ // The page number filter
+ Vector<int> m_page_filter;
+ int m_page_filter_size;
+ int m_page_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/NodeID.hh b/src/mem/ruby/system/NodeID.hh
new file mode 100644
index 000000000..23df8bb46
--- /dev/null
+++ b/src/mem/ruby/system/NodeID.hh
@@ -0,0 +1,50 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NodeID.h
+ *
+ * Description:
+ *
+ * $Id: NodeID.h,v 3.3 2003/12/04 15:01:39 xu Exp $
+ *
+ */
+
+#ifndef NODEID_H
+#define NODEID_H
+
+#include "Global.hh"
+#include "util.hh"
+
+typedef int NodeID;
+
+extern inline
+string NodeIDToString (NodeID node) { return int_to_string(node); }
+
+#endif //NODEID_H
diff --git a/src/mem/ruby/system/NodePersistentTable.cc b/src/mem/ruby/system/NodePersistentTable.cc
new file mode 100644
index 000000000..df2076c1e
--- /dev/null
+++ b/src/mem/ruby/system/NodePersistentTable.cc
@@ -0,0 +1,194 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: NodePersistentTable.C 1.3 04/08/16 14:12:33-05:00 beckmann@c2-143.cs.wisc.edu $
+ *
+ */
+
+#include "NodePersistentTable.hh"
+#include "Set.hh"
+#include "Map.hh"
+#include "Address.hh"
+#include "AbstractChip.hh"
+#include "util.hh"
+
+// randomize so that handoffs are not locality-aware
+// int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
+int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+
+class NodePersistentTableEntry {
+public:
+ Set m_starving;
+ Set m_marked;
+ Set m_request_to_write;
+};
+
+NodePersistentTable::NodePersistentTable(AbstractChip* chip_ptr, int version)
+{
+ m_chip_ptr = chip_ptr;
+ m_map_ptr = new Map<Address, NodePersistentTableEntry>;
+ m_version = version;
+}
+
+NodePersistentTable::~NodePersistentTable()
+{
+ delete m_map_ptr;
+ m_map_ptr = NULL;
+ m_chip_ptr = NULL;
+}
+
+void NodePersistentTable::persistentRequestLock(const Address& address, NodeID llocker, AccessType type)
+{
+
+ // if (locker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << llocker << " requesting lock for " << address << endl;
+
+ NodeID locker = (NodeID) persistent_randomize[llocker];
+
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ // Allocate if not present
+ NodePersistentTableEntry entry;
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ m_map_ptr->add(address, entry);
+ } else {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(!(entry.m_starving.isElement(locker))); // Make sure we're not already in the locked set
+
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ }
+}
+
+void NodePersistentTable::persistentRequestUnlock(const Address& address, NodeID uunlocker)
+{
+ // if (unlocker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker << " requesting unlock for " << address << endl;
+
+ NodeID unlocker = (NodeID) persistent_randomize[uunlocker];
+
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_starving.isElement(unlocker)); // Make sure we're in the locked set
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ entry.m_starving.remove(unlocker);
+ entry.m_marked.remove(unlocker);
+ entry.m_request_to_write.remove(unlocker);
+ assert(entry.m_marked.isSubset(entry.m_starving));
+
+ // Deallocate if empty
+ if (entry.m_starving.isEmpty()) {
+ assert(entry.m_marked.isEmpty());
+ m_map_ptr->erase(address);
+ }
+}
+
+bool NodePersistentTable::okToIssueStarving(const Address& address) const
+{
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ return true; // No entry present
+ } else if (m_map_ptr->lookup(address).m_starving.isElement(m_chip_ptr->getID())) {
+ return false; // We can't issue another lockdown until are previous unlock has occurred
+ } else {
+ return (m_map_ptr->lookup(address).m_marked.isEmpty());
+ }
+}
+
+NodeID NodePersistentTable::findSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ // cout << "Node " << m_chip_ptr->getID() << " returning " << persistent_randomize[entry.m_starving.smallestElement()] << " for findSmallest(" << address << ")" << endl;
+ return (NodeID) persistent_randomize[entry.m_starving.smallestElement()];
+}
+
+AccessType NodePersistentTable::typeOfSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ if (entry.m_request_to_write.isElement(entry.m_starving.smallestElement())) {
+ return AccessType_Write;
+ } else {
+ return AccessType_Read;
+ }
+}
+
+void NodePersistentTable::markEntries(const Address& address)
+{
+ assert(address == line_address(address));
+ if (m_map_ptr->exist(address)) {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_marked.isEmpty()); // None should be marked
+ entry.m_marked = entry.m_starving; // Mark all the nodes currently in the table
+ }
+}
+
+bool NodePersistentTable::isLocked(const Address& address) const
+{
+ assert(address == line_address(address));
+ // If an entry is present, it must be locked
+ return (m_map_ptr->exist(address));
+}
+
+int NodePersistentTable::countStarvingForAddress(const Address& address) const
+{
+ if (m_map_ptr->exist(address)) {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+int NodePersistentTable::countReadStarvingForAddress(const Address& address) const
+{
+ int count = 0;
+ if (m_map_ptr->exist(address)) {
+ NodePersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count() - entry.m_request_to_write.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+
diff --git a/src/mem/ruby/system/NodePersistentTable.hh b/src/mem/ruby/system/NodePersistentTable.hh
new file mode 100644
index 000000000..ac25552b8
--- /dev/null
+++ b/src/mem/ruby/system/NodePersistentTable.hh
@@ -0,0 +1,99 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: NodePersistentTable.h 1.3 04/08/16 14:12:33-05:00 beckmann@c2-143.cs.wisc.edu $
+ *
+ * Description:
+ *
+ */
+
+#ifndef NodePersistentTable_H
+#define NodePersistentTable_H
+
+#include "Global.hh"
+#include "NodeID.hh"
+#include "AccessType.hh"
+
+class AbstractChip;
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+class Address;
+class NodePersistentTableEntry;
+
+class NodePersistentTable {
+public:
+ // Constructors
+ NodePersistentTable(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~NodePersistentTable();
+
+ // Public Methods
+ void persistentRequestLock(const Address& address, NodeID locker, AccessType type);
+ void persistentRequestUnlock(const Address& address, NodeID unlocker);
+ bool okToIssueStarving(const Address& address) const;
+ NodeID findSmallest(const Address& address) const;
+ AccessType typeOfSmallest(const Address& address) const;
+ void markEntries(const Address& address);
+ bool isLocked(const Address& addr) const;
+ int countStarvingForAddress(const Address& addr) const;
+ int countReadStarvingForAddress(const Address& addr) const;
+
+ static void printConfig(ostream& out) {}
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ NodePersistentTable(const NodePersistentTable& obj);
+ NodePersistentTable& operator=(const NodePersistentTable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, NodePersistentTableEntry>* m_map_ptr;
+ AbstractChip* m_chip_ptr;
+ int m_version;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const NodePersistentTable& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const NodePersistentTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //NodePersistentTable_H
diff --git a/src/mem/ruby/system/NonCountingBloomFilter.cc b/src/mem/ruby/system/NonCountingBloomFilter.cc
new file mode 100644
index 000000000..81e4adbcd
--- /dev/null
+++ b/src/mem/ruby/system/NonCountingBloomFilter.cc
@@ -0,0 +1,145 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.C
+ *
+ * Description:
+ *
+ *
+ */
+
+#include "NonCountingBloomFilter.hh"
+#include "Map.hh"
+#include "Address.hh"
+
+NonCountingBloomFilter::NonCountingBloomFilter(string str)
+{
+ string tail(str);
+ string head = string_split(tail, '_');
+
+ // head contains filter size, tail contains bit offset from block number
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ m_filter_size = atoi(head.c_str());
+ m_offset = atoi(tail.c_str());
+ m_filter_size_bits = log_int(m_filter_size);
+
+
+ m_filter.setSize(m_filter_size);
+ clear();
+}
+
+NonCountingBloomFilter::~NonCountingBloomFilter(){
+}
+
+void NonCountingBloomFilter::clear()
+{
+ for (int i = 0; i < m_filter_size; i++) {
+ m_filter[i] = 0;
+ }
+}
+
+void NonCountingBloomFilter::increment(const Address& addr)
+{
+ // Not used
+}
+
+
+void NonCountingBloomFilter::decrement(const Address& addr)
+{
+ // Not used
+}
+
+void NonCountingBloomFilter::merge(AbstractBloomFilter * other_filter){
+ // assumes both filters are the same size!
+ NonCountingBloomFilter * temp = (NonCountingBloomFilter*) other_filter;
+ for(int i=0; i < m_filter_size; ++i){
+ m_filter[i] |= (*temp)[i];
+ }
+
+}
+
+void NonCountingBloomFilter::set(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 1;
+}
+
+void NonCountingBloomFilter::unset(const Address& addr)
+{
+ int i = get_index(addr);
+ m_filter[i] = 0;
+}
+
+bool NonCountingBloomFilter::isSet(const Address& addr)
+{
+ int i = get_index(addr);
+ return (m_filter[i]);
+}
+
+
+int NonCountingBloomFilter::getCount(const Address& addr)
+{
+ return m_filter[get_index(addr)];
+}
+
+int NonCountingBloomFilter::getTotalCount()
+{
+ int count = 0;
+
+ for (int i = 0; i < m_filter_size; i++) {
+ count += m_filter[i];
+ }
+ return count;
+}
+
+void NonCountingBloomFilter::print(ostream& out) const
+{
+}
+
+int NonCountingBloomFilter::getIndex(const Address& addr)
+{
+ return get_index(addr);
+}
+
+int NonCountingBloomFilter::readBit(const int index) {
+ return m_filter[index];
+}
+
+void NonCountingBloomFilter::writeBit(const int index, const int value) {
+ m_filter[index] = value;
+}
+
+int NonCountingBloomFilter::get_index(const Address& addr)
+{
+ return addr.bitSelect( RubyConfig::dataBlockBits() + m_offset,
+ RubyConfig::dataBlockBits() + m_offset + m_filter_size_bits - 1);
+}
+
+
diff --git a/src/mem/ruby/system/NonCountingBloomFilter.hh b/src/mem/ruby/system/NonCountingBloomFilter.hh
new file mode 100644
index 000000000..f2912c08c
--- /dev/null
+++ b/src/mem/ruby/system/NonCountingBloomFilter.hh
@@ -0,0 +1,89 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * NonCountingBloomFilter.h
+ *
+ * Description:
+ *
+ *
+ */
+
+#ifndef NONCOUNTING_BLOOM_FILTER_H
+#define NONCOUNTING_BLOOM_FILTER_H
+
+#include "Map.hh"
+#include "Global.hh"
+#include "AbstractChip.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "AbstractBloomFilter.hh"
+
+class NonCountingBloomFilter : public AbstractBloomFilter {
+public:
+
+ ~NonCountingBloomFilter();
+ NonCountingBloomFilter(string config);
+
+ void clear();
+ void increment(const Address& addr);
+ void decrement(const Address& addr);
+ void merge(AbstractBloomFilter * other_filter);
+ void set(const Address& addr);
+ void unset(const Address& addr);
+
+ bool isSet(const Address& addr);
+ int getCount(const Address& addr);
+ int getTotalCount();
+
+ int getIndex(const Address& addr);
+ int readBit(const int index);
+ void writeBit(const int index, const int value);
+
+ void print(ostream& out) const;
+
+ int operator[](const int index) const{
+ return this->m_filter[index];
+ }
+
+private:
+
+ int get_index(const Address& addr);
+
+ Vector<int> m_filter;
+ int m_filter_size;
+ int m_offset;
+ int m_filter_size_bits;
+
+ int m_count_bits;
+ int m_count;
+};
+
+
+#endif
diff --git a/src/mem/ruby/system/PerfectCacheMemory.hh b/src/mem/ruby/system/PerfectCacheMemory.hh
new file mode 100644
index 000000000..590b265c4
--- /dev/null
+++ b/src/mem/ruby/system/PerfectCacheMemory.hh
@@ -0,0 +1,239 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PerfectCacheMemory.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef PERFECTCACHEMEMORY_H
+#define PERFECTCACHEMEMORY_H
+
+#include "Global.hh"
+#include "Map.hh"
+#include "AccessPermission.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "interface.hh"
+#include "AbstractChip.hh"
+
+template<class ENTRY>
+class PerfectCacheLineState {
+public:
+ PerfectCacheLineState() { m_permission = AccessPermission_NUM; }
+ AccessPermission m_permission;
+ ENTRY m_entry;
+};
+
+template<class ENTRY>
+class PerfectCacheMemory {
+public:
+
+ // Constructors
+ PerfectCacheMemory(AbstractChip* chip_ptr);
+
+ // Destructor
+ //~PerfectCacheMemory();
+
+ // Public Methods
+
+ static void printConfig(ostream& out);
+
+ // perform a cache access and see if we hit or not. Return true on
+ // a hit.
+ bool tryCacheAccess(const CacheMsg& msg, bool& block_stc, ENTRY*& entry);
+
+ // tests to see if an address is present in the cache
+ bool isTagPresent(const Address& address) const;
+
+ // Returns true if there is:
+ // a) a tag match on this address or there is
+ // b) an Invalid line in the same cache "way"
+ bool cacheAvail(const Address& address) const;
+
+ // find an Invalid entry and sets the tag appropriate for the address
+ void allocate(const Address& address);
+
+ void deallocate(const Address& address);
+
+ // Returns with the physical address of the conflicting cache line
+ Address cacheProbe(const Address& newAddress) const;
+
+ // looks an address up in the cache
+ ENTRY& lookup(const Address& address);
+ const ENTRY& lookup(const Address& address) const;
+
+ // Get/Set permission of cache block
+ AccessPermission getPermission(const Address& address) const;
+ void changePermission(const Address& address, AccessPermission new_perm);
+
+ // Print cache contents
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ PerfectCacheMemory(const PerfectCacheMemory& obj);
+ PerfectCacheMemory& operator=(const PerfectCacheMemory& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, PerfectCacheLineState<ENTRY> > m_map;
+ AbstractChip* m_chip_ptr;
+};
+
+// Output operator declaration
+//ostream& operator<<(ostream& out, const PerfectCacheMemory<ENTRY>& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+template<class ENTRY>
+extern inline
+ostream& operator<<(ostream& out, const PerfectCacheMemory<ENTRY>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+template<class ENTRY>
+extern inline
+PerfectCacheMemory<ENTRY>::PerfectCacheMemory(AbstractChip* chip_ptr)
+{
+ m_chip_ptr = chip_ptr;
+}
+
+// STATIC METHODS
+
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::printConfig(ostream& out)
+{
+}
+
+// PUBLIC METHODS
+
+template<class ENTRY>
+extern inline
+bool PerfectCacheMemory<ENTRY>::tryCacheAccess(const CacheMsg& msg, bool& block_stc, ENTRY*& entry)
+{
+ ERROR_MSG("not implemented");
+}
+
+// tests to see if an address is present in the cache
+template<class ENTRY>
+extern inline
+bool PerfectCacheMemory<ENTRY>::isTagPresent(const Address& address) const
+{
+ return m_map.exist(line_address(address));
+}
+
+template<class ENTRY>
+extern inline
+bool PerfectCacheMemory<ENTRY>::cacheAvail(const Address& address) const
+{
+ return true;
+}
+
+// find an Invalid or already allocated entry and sets the tag
+// appropriate for the address
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::allocate(const Address& address)
+{
+ PerfectCacheLineState<ENTRY> line_state;
+ line_state.m_permission = AccessPermission_Busy;
+ line_state.m_entry = ENTRY();
+ m_map.add(line_address(address), line_state);
+}
+
+// deallocate entry
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::deallocate(const Address& address)
+{
+ m_map.erase(line_address(address));
+}
+
+// Returns with the physical address of the conflicting cache line
+template<class ENTRY>
+extern inline
+Address PerfectCacheMemory<ENTRY>::cacheProbe(const Address& newAddress) const
+{
+ ERROR_MSG("cacheProbe called in perfect cache");
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+ENTRY& PerfectCacheMemory<ENTRY>::lookup(const Address& address)
+{
+ return m_map.lookup(line_address(address)).m_entry;
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+const ENTRY& PerfectCacheMemory<ENTRY>::lookup(const Address& address) const
+{
+ return m_map.lookup(line_address(address)).m_entry;
+}
+
+template<class ENTRY>
+extern inline
+AccessPermission PerfectCacheMemory<ENTRY>::getPermission(const Address& address) const
+{
+ return m_map.lookup(line_address(address)).m_permission;
+}
+
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::changePermission(const Address& address, AccessPermission new_perm)
+{
+ Address line_address = address;
+ line_address.makeLineAddress();
+ PerfectCacheLineState<ENTRY>& line_state = m_map.lookup(line_address);
+ AccessPermission old_perm = line_state.m_permission;
+ line_state.m_permission = new_perm;
+}
+
+template<class ENTRY>
+extern inline
+void PerfectCacheMemory<ENTRY>::print(ostream& out) const
+{
+}
+
+#endif //PERFECTCACHEMEMORY_H
diff --git a/src/mem/ruby/system/PersistentArbiter.cc b/src/mem/ruby/system/PersistentArbiter.cc
new file mode 100644
index 000000000..a0bbf6979
--- /dev/null
+++ b/src/mem/ruby/system/PersistentArbiter.cc
@@ -0,0 +1,165 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "PersistentArbiter.hh"
+#include "Address.hh"
+#include "AbstractChip.hh"
+#include "util.hh"
+
+PersistentArbiter::PersistentArbiter(AbstractChip* chip_ptr)
+{
+ m_chip_ptr = chip_ptr;
+
+ // wastes entries, but who cares
+ m_entries.setSize(RubyConfig::numberOfProcessors());
+
+ for (int i = 0; i < m_entries.size(); i++) {
+ m_entries[i].valid = false;
+ }
+
+ m_busy = false;
+ m_locker = -1;
+
+}
+
+PersistentArbiter::~PersistentArbiter()
+{
+ m_chip_ptr = NULL;
+}
+
+
+void PersistentArbiter::addLocker(NodeID id, Address addr, AccessType type) {
+ //cout << "Arbiter " << getArbiterId() << " adding locker " << id << " " << addr << endl;
+ assert(m_entries[id].valid == false);
+ m_entries[id].valid = true;
+ m_entries[id].address = addr;
+ m_entries[id].type = type;
+ m_entries[id].localId = id;
+
+}
+
+void PersistentArbiter::removeLocker(NodeID id) {
+ //cout << "Arbiter " << getArbiterId() << " removing locker " << id << " " << m_entries[id].address << endl;
+ assert(m_entries[id].valid == true);
+ m_entries[id].valid = false;
+
+ if (!lockersExist()) {
+ m_busy = false;
+ }
+}
+
+bool PersistentArbiter::successorRequestPresent(Address addr, NodeID id) {
+ for (int i = (id + 1); i < m_entries.size(); i++) {
+ if (m_entries[i].address == addr && m_entries[i].valid) {
+ //cout << "m_entries[" << id << ", address " << m_entries[id].address << " is equal to " << addr << endl;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool PersistentArbiter::lockersExist() {
+ for (int i = 0; i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ return true;
+ }
+ }
+ //cout << "no lockers found" << endl;
+ return false;
+}
+
+void PersistentArbiter::advanceActiveLock() {
+ assert(lockersExist());
+
+ //cout << "arbiter advancing lock from " << m_locker;
+ m_busy = false;
+
+ if (m_locker < (m_entries.size() - 1)) {
+ for (int i = (m_locker+1); i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ m_locker = i;
+ m_busy = true;
+ //cout << " to " << m_locker << endl;
+ return;
+ }
+ }
+ }
+
+ if (!m_busy) {
+ for (int i = 0; i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ m_locker = i;
+ m_busy = true;
+ //cout << " to " << m_locker << endl;
+ return;
+ }
+ }
+
+ assert(m_busy)
+ }
+}
+
+Address PersistentArbiter::getActiveLockAddress() {
+ assert( m_entries[m_locker].valid = true );
+ return m_entries[m_locker].address;
+}
+
+
+NodeID PersistentArbiter::getArbiterId() {
+ return m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip();
+}
+
+bool PersistentArbiter::isBusy() {
+ return m_busy;
+}
+
+NodeID PersistentArbiter::getActiveLocalId() {
+ assert( m_entries[m_locker].valid = true );
+ return m_entries[m_locker].localId;
+}
+
+void PersistentArbiter::setIssuedAddress(Address addr) {
+ m_issued_address = addr;
+}
+
+bool PersistentArbiter::isIssuedAddress(Address addr) {
+ return (m_issued_address == addr);
+}
+
+void PersistentArbiter::print(ostream& out) const {
+
+ out << "[";
+ for (int i = 0; i < m_entries.size(); i++) {
+ if (m_entries[i].valid == true) {
+ out << "( " << m_entries[i].localId << ", " << m_entries[i].address << ") ";
+ }
+ }
+ out << "]" << endl;
+
+}
diff --git a/src/mem/ruby/system/PersistentArbiter.hh b/src/mem/ruby/system/PersistentArbiter.hh
new file mode 100644
index 000000000..0654e3a9e
--- /dev/null
+++ b/src/mem/ruby/system/PersistentArbiter.hh
@@ -0,0 +1,108 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * PersistentArbiter.h
+ *
+ * Description:
+ *
+ * Used for hierarchical distributed persistent request scheme
+ *
+ */
+
+#ifndef PERSISTENTARBITER_H
+#define PERSISTENTARBITER_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "AbstractChip.hh"
+#include "AccessPermission.hh"
+#include "AccessType.hh"
+#include "RubyConfig.hh"
+#include "Address.hh"
+#include "interface.hh"
+
+struct ArbiterEntry {
+ bool valid;
+ Address address;
+ AccessType type;
+ NodeID localId;
+};
+
+class PersistentArbiter {
+public:
+
+ // Constructors
+ PersistentArbiter(AbstractChip* chip_ptr);
+
+ // Destructor
+ ~PersistentArbiter();
+
+ // Public Methods
+
+ void addLocker(NodeID id, Address addr, AccessType type);
+ void removeLocker(NodeID id);
+ bool successorRequestPresent(Address addr, NodeID id);
+ bool lockersExist();
+ void advanceActiveLock();
+ Address getActiveLockAddress();
+ NodeID getArbiterId();
+ bool isBusy();
+
+ void setIssuedAddress(Address addr);
+ bool isIssuedAddress(Address addr);
+
+
+ Address getIssuedAddress() { return m_issued_address; }
+
+ static void printConfig(ostream& out) {}
+ void print(ostream& out) const;
+
+ NodeID getActiveLocalId();
+
+private:
+
+ Address m_issued_address;
+ AbstractChip* m_chip_ptr;
+ int m_locker;
+ bool m_busy;
+ Vector<ArbiterEntry> m_entries;
+};
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const PersistentArbiter& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+#endif //PERFECTCACHEMEMORY_H
diff --git a/src/mem/ruby/system/PersistentTable.cc b/src/mem/ruby/system/PersistentTable.cc
new file mode 100644
index 000000000..18c8b5736
--- /dev/null
+++ b/src/mem/ruby/system/PersistentTable.cc
@@ -0,0 +1,195 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "PersistentTable.hh"
+#include "NetDest.hh"
+#include "Map.hh"
+#include "Address.hh"
+#include "AbstractChip.hh"
+#include "util.hh"
+
+// randomize so that handoffs are not locality-aware
+// int persistent_randomize[] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
+// int persistent_randomize[] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15};
+
+
+class PersistentTableEntry {
+public:
+ NetDest m_starving;
+ NetDest m_marked;
+ NetDest m_request_to_write;
+};
+
+PersistentTable::PersistentTable(AbstractChip* chip_ptr, int version)
+{
+ m_chip_ptr = chip_ptr;
+ m_map_ptr = new Map<Address, PersistentTableEntry>;
+ m_version = version;
+}
+
+PersistentTable::~PersistentTable()
+{
+ delete m_map_ptr;
+ m_map_ptr = NULL;
+ m_chip_ptr = NULL;
+}
+
+void PersistentTable::persistentRequestLock(const Address& address, MachineID locker, AccessType type)
+{
+
+ // if (locker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << llocker << " requesting lock for " << address << endl;
+
+ // MachineID locker = (MachineID) persistent_randomize[llocker];
+
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ // Allocate if not present
+ PersistentTableEntry entry;
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ m_map_ptr->add(address, entry);
+ } else {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(!(entry.m_starving.isElement(locker))); // Make sure we're not already in the locked set
+
+ entry.m_starving.add(locker);
+ if (type == AccessType_Write) {
+ entry.m_request_to_write.add(locker);
+ }
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ }
+}
+
+void PersistentTable::persistentRequestUnlock(const Address& address, MachineID unlocker)
+{
+ // if (unlocker == m_chip_ptr->getID() )
+ // cout << "Chip " << m_chip_ptr->getID() << ": " << uunlocker << " requesting unlock for " << address << endl;
+
+ // MachineID unlocker = (MachineID) persistent_randomize[uunlocker];
+
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_starving.isElement(unlocker)); // Make sure we're in the locked set
+ assert(entry.m_marked.isSubset(entry.m_starving));
+ entry.m_starving.remove(unlocker);
+ entry.m_marked.remove(unlocker);
+ entry.m_request_to_write.remove(unlocker);
+ assert(entry.m_marked.isSubset(entry.m_starving));
+
+ // Deallocate if empty
+ if (entry.m_starving.isEmpty()) {
+ assert(entry.m_marked.isEmpty());
+ m_map_ptr->erase(address);
+ }
+}
+
+bool PersistentTable::okToIssueStarving(const Address& address) const
+{
+ assert(address == line_address(address));
+ if (!m_map_ptr->exist(address)) {
+ return true; // No entry present
+ } else if (m_map_ptr->lookup(address).m_starving.isElement( (MachineID) {MachineType_L1Cache, m_version})) {
+ return false; // We can't issue another lockdown until are previous unlock has occurred
+ } else {
+ return (m_map_ptr->lookup(address).m_marked.isEmpty());
+ }
+}
+
+MachineID PersistentTable::findSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ // cout << "Node " << m_chip_ptr->getID() << " returning " << persistent_randomize[entry.m_starving.smallestElement()] << " for findSmallest(" << address << ")" << endl;
+ // return (MachineID) persistent_randomize[entry.m_starving.smallestElement()];
+ return (MachineID) { MachineType_L1Cache, entry.m_starving.smallestElement() };
+}
+
+AccessType PersistentTable::typeOfSmallest(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map_ptr->exist(address));
+ const PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ if (entry.m_request_to_write.isElement((MachineID) {MachineType_L1Cache, entry.m_starving.smallestElement()})) {
+ return AccessType_Write;
+ } else {
+ return AccessType_Read;
+ }
+}
+
+void PersistentTable::markEntries(const Address& address)
+{
+ assert(address == line_address(address));
+ if (m_map_ptr->exist(address)) {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ assert(entry.m_marked.isEmpty()); // None should be marked
+ entry.m_marked = entry.m_starving; // Mark all the nodes currently in the table
+ }
+}
+
+bool PersistentTable::isLocked(const Address& address) const
+{
+ assert(address == line_address(address));
+ // If an entry is present, it must be locked
+ return (m_map_ptr->exist(address));
+}
+
+int PersistentTable::countStarvingForAddress(const Address& address) const
+{
+ if (m_map_ptr->exist(address)) {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+int PersistentTable::countReadStarvingForAddress(const Address& address) const
+{
+ int count = 0;
+ if (m_map_ptr->exist(address)) {
+ PersistentTableEntry& entry = m_map_ptr->lookup(address);
+ return (entry.m_starving.count() - entry.m_request_to_write.count());
+ }
+ else {
+ return 0;
+ }
+}
+
+
diff --git a/src/mem/ruby/system/PersistentTable.hh b/src/mem/ruby/system/PersistentTable.hh
new file mode 100644
index 000000000..306f66e1d
--- /dev/null
+++ b/src/mem/ruby/system/PersistentTable.hh
@@ -0,0 +1,99 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef PersistentTable_H
+#define PersistentTable_H
+
+#include "Global.hh"
+#include "MachineID.hh"
+#include "AccessType.hh"
+
+class AbstractChip;
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+class Address;
+class PersistentTableEntry;
+
+class PersistentTable {
+public:
+ // Constructors
+ PersistentTable(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~PersistentTable();
+
+ // Public Methods
+ void persistentRequestLock(const Address& address, MachineID locker, AccessType type);
+ void persistentRequestUnlock(const Address& address, MachineID unlocker);
+ bool okToIssueStarving(const Address& address) const;
+ MachineID findSmallest(const Address& address) const;
+ AccessType typeOfSmallest(const Address& address) const;
+ void markEntries(const Address& address);
+ bool isLocked(const Address& addr) const;
+ int countStarvingForAddress(const Address& addr) const;
+ int countReadStarvingForAddress(const Address& addr) const;
+
+ static void printConfig(ostream& out) {}
+
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ PersistentTable(const PersistentTable& obj);
+ PersistentTable& operator=(const PersistentTable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, PersistentTableEntry>* m_map_ptr;
+ AbstractChip* m_chip_ptr;
+ int m_version;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const PersistentTable& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const PersistentTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //PersistentTable_H
diff --git a/src/mem/ruby/system/PseudoLRUPolicy.hh b/src/mem/ruby/system/PseudoLRUPolicy.hh
new file mode 100644
index 000000000..9d4d13a95
--- /dev/null
+++ b/src/mem/ruby/system/PseudoLRUPolicy.hh
@@ -0,0 +1,110 @@
+
+#ifndef PSEUDOLRUPOLICY_H
+#define PSEUDOLRUPOLICY_H
+
+#include "AbstractReplacementPolicy.hh"
+
+/**
+ * Implementation of tree-based pseudo-LRU replacement
+ *
+ * Works for any associativity between 1 and 128.
+ *
+ * Also implements associativities that are not a power of 2 by
+ * ignoring paths that lead to a larger index (i.e. truncating the
+ * tree). Note that when this occurs, the algorithm becomes less
+ * fair, as it will favor indicies in the larger (by index) half of
+ * the associative set. This is most unfair when the nearest power of
+ * 2 is one below the associativy, and most fair when it is one above.
+ */
+
+class PseudoLRUPolicy : public AbstractReplacementPolicy {
+ public:
+
+ PseudoLRUPolicy(Index num_sets, Index assoc);
+ ~PseudoLRUPolicy();
+
+ void touch(Index set, Index way, Time time);
+ Index getVictim(Index set) const;
+
+ private:
+ unsigned int m_effective_assoc; /** nearest (to ceiling) power of 2 */
+ unsigned int m_num_levels; /** number of levels in the tree */
+ uint64* m_trees; /** bit representation of the trees, one for each set */
+};
+
+inline
+PseudoLRUPolicy::PseudoLRUPolicy(Index num_sets, Index assoc)
+ : AbstractReplacementPolicy(num_sets, assoc)
+{
+ int num_tree_nodes;
+
+ // associativity cannot exceed capacity of tree representation
+ assert(num_sets > 0 && assoc > 1 && assoc <= (Index) sizeof(uint64)*4);
+
+ m_trees = NULL;
+ m_num_levels = 0;
+
+ m_effective_assoc = 1;
+ while(m_effective_assoc < assoc){
+ m_effective_assoc <<= 1; // effective associativity is ceiling power of 2
+ }
+ assoc = m_effective_assoc;
+ while(true){
+ assoc /= 2;
+ if(!assoc) break;
+ m_num_levels++;
+ }
+ assert(m_num_levels < sizeof(unsigned int)*4);
+ num_tree_nodes = ((int)pow(2, m_num_levels))-1;
+ m_trees = new uint64[m_num_sets];
+ for(unsigned int i=0; i< m_num_sets; i++){
+ m_trees[i] = 0;
+ }
+}
+
+inline
+PseudoLRUPolicy::~PseudoLRUPolicy()
+{
+ if(m_trees != NULL)
+ delete[] m_trees;
+}
+
+inline
+void PseudoLRUPolicy::touch(Index set, Index index, Time time){
+ assert(index >= 0 && index < m_assoc);
+ assert(set >= 0 && set < m_num_sets);
+
+ int tree_index = 0;
+ int node_val;
+ for(int i=m_num_levels -1; i>=0; i--){
+ node_val = (index >> i)&1;
+ if(node_val)
+ m_trees[set] |= node_val << tree_index;
+ else
+ m_trees[set] &= ~(1 << tree_index);
+ tree_index = node_val ? (tree_index*2)+2 : (tree_index*2)+1;
+ }
+ m_last_ref_ptr[set][index] = time;
+}
+
+inline
+Index PseudoLRUPolicy::getVictim(Index set) const {
+ // assert(m_assoc != 0);
+
+ Index index = 0;
+
+ int tree_index = 0;
+ int node_val;
+ for(unsigned int i=0;i<m_num_levels;i++){
+ node_val = (m_trees[set]>>tree_index)&1;
+ index += node_val?0:(m_effective_assoc >> (i+1));
+ tree_index = node_val? (tree_index*2)+1 : (tree_index*2)+2;
+ }
+ assert(index >= 0 && index < m_effective_assoc);
+
+ /* return either the found index or the max possible index */
+ /* NOTE: this is not a fair replacement when assoc is not a power of 2 */
+ return (index > (m_assoc-1)) ? m_assoc-1:index;
+}
+
+#endif // PSEUDOLRUPOLICY_H
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
new file mode 100644
index 000000000..59441ff81
--- /dev/null
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -0,0 +1,1161 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: Sequencer.C 1.131 2006/11/06 17:41:01-06:00 bobba@gratiano.cs.wisc.edu $
+ *
+ */
+
+#include "Global.hh"
+#include "Sequencer.hh"
+#include "System.hh"
+#include "Protocol.hh"
+#include "Profiler.hh"
+#include "CacheMemory.hh"
+#include "RubyConfig.hh"
+//#include "Tracer.hh"
+#include "AbstractChip.hh"
+#include "Chip.hh"
+#include "Tester.hh"
+#include "SubBlock.hh"
+#include "Protocol.hh"
+#include "Map.hh"
+#include "interface.hh"
+//#include "XactCommitArbiter.hh"
+// #include "TransactionInterfaceManager.hh"
+//#include "TransactionVersionManager.hh"
+//#include "LazyTransactionVersionManager.hh"
+
+//#define XACT_MGR g_system_ptr->getChip(m_chip_ptr->getID())->getTransactionInterfaceManager(m_version)
+
+Sequencer::Sequencer(AbstractChip* chip_ptr, int version) {
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+
+ m_deadlock_check_scheduled = false;
+ m_outstanding_count = 0;
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ m_writeRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
+ m_readRequestTable_ptr = new Map<Address, CacheMsg>*[smt_threads];
+
+ for(int p=0; p < smt_threads; ++p){
+ m_writeRequestTable_ptr[p] = new Map<Address, CacheMsg>;
+ m_readRequestTable_ptr[p] = new Map<Address, CacheMsg>;
+ }
+
+}
+
+Sequencer::~Sequencer() {
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int i=0; i < smt_threads; ++i){
+ if(m_writeRequestTable_ptr[i]){
+ delete m_writeRequestTable_ptr[i];
+ }
+ if(m_readRequestTable_ptr[i]){
+ delete m_readRequestTable_ptr[i];
+ }
+ }
+ if(m_writeRequestTable_ptr){
+ delete [] m_writeRequestTable_ptr;
+ }
+ if(m_readRequestTable_ptr){
+ delete [] m_readRequestTable_ptr;
+ }
+}
+
+void Sequencer::wakeup() {
+ // Check for deadlock of any of the requests
+ Time current_time = g_eventQueue_ptr->getTime();
+ bool deadlock = false;
+
+ // Check across all outstanding requests
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int total_outstanding = 0;
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i<keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
+ WARN_MSG("Possible Deadlock detected");
+ WARN_EXPR(request);
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(m_version);
+ WARN_EXPR(keys.size());
+ WARN_EXPR(current_time);
+ WARN_EXPR(request.getTime());
+ WARN_EXPR(current_time - request.getTime());
+ WARN_EXPR(*m_readRequestTable_ptr[p]);
+ ERROR_MSG("Aborting");
+ deadlock = true;
+ }
+ }
+
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i<keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if (current_time - request.getTime() >= g_DEADLOCK_THRESHOLD) {
+ WARN_MSG("Possible Deadlock detected");
+ WARN_EXPR(request);
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(m_version);
+ WARN_EXPR(current_time);
+ WARN_EXPR(request.getTime());
+ WARN_EXPR(current_time - request.getTime());
+ WARN_EXPR(keys.size());
+ WARN_EXPR(*m_writeRequestTable_ptr[p]);
+ ERROR_MSG("Aborting");
+ deadlock = true;
+ }
+ }
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ } // across all request tables
+ assert(m_outstanding_count == total_outstanding);
+
+ if (m_outstanding_count > 0) { // If there are still outstanding requests, keep checking
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ } else {
+ m_deadlock_check_scheduled = false;
+ }
+}
+
+//returns the total number of requests
+int Sequencer::getNumberOutstanding(){
+ return m_outstanding_count;
+}
+
+// returns the total number of demand requests
+int Sequencer::getNumberOutstandingDemand(){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int total_demand = 0;
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ // don't count transactional begin/commit requests
+ if(request.getType() != CacheRequestType_BEGIN_XACT && request.getType() != CacheRequestType_COMMIT_XACT){
+ if(request.getPrefetch() == PrefetchBit_No){
+ total_demand++;
+ }
+ }
+ }
+
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if(request.getPrefetch() == PrefetchBit_No){
+ total_demand++;
+ }
+ }
+ }
+
+ return total_demand;
+}
+
+int Sequencer::getNumberOutstandingPrefetch(){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int total_prefetch = 0;
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ total_prefetch++;
+ }
+ }
+
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ total_prefetch++;
+ }
+ }
+ }
+
+ return total_prefetch;
+}
+
+bool Sequencer::isPrefetchRequest(const Address & lineaddr){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ // check load requests
+ Vector<Address> keys = m_readRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_readRequestTable_ptr[p]->lookup(keys[i]);
+ if(line_address(request.getAddress()) == lineaddr){
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ return true;
+ }
+ else{
+ return false;
+ }
+ }
+ }
+
+ // check store requests
+ keys = m_writeRequestTable_ptr[p]->keys();
+ for (int i=0; i< keys.size(); i++) {
+ CacheMsg& request = m_writeRequestTable_ptr[p]->lookup(keys[i]);
+ if(line_address(request.getAddress()) == lineaddr){
+ if(request.getPrefetch() == PrefetchBit_Yes){
+ return true;
+ }
+ else{
+ return false;
+ }
+ }
+ }
+ }
+ // we should've found a matching request
+ cout << "isRequestPrefetch() ERROR request NOT FOUND : " << lineaddr << endl;
+ printProgress(cout);
+ assert(0);
+}
+
+AccessModeType Sequencer::getAccessModeOfRequest(Address addr, int thread){
+ if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
+ return request.getAccessMode();
+ } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
+ return request.getAccessMode();
+ } else {
+ printProgress(cout);
+ ERROR_MSG("Request not found in RequestTables");
+ }
+}
+
+Address Sequencer::getLogicalAddressOfRequest(Address addr, int thread){
+ assert(thread >= 0);
+ if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
+ return request.getLogicalAddress();
+ } else if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
+ return request.getLogicalAddress();
+ } else {
+ printProgress(cout);
+ WARN_MSG("Request not found in RequestTables");
+ WARN_MSG(addr);
+ WARN_MSG(thread);
+ ASSERT(0);
+ }
+}
+
+// returns the ThreadID of the request
+int Sequencer::getRequestThreadID(const Address & addr){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ int thread = -1;
+ int num_found = 0;
+ for(int p=0; p < smt_threads; ++p){
+ if(m_readRequestTable_ptr[p]->exist(addr)){
+ num_found++;
+ thread = p;
+ }
+ if(m_writeRequestTable_ptr[p]->exist(addr)){
+ num_found++;
+ thread = p;
+ }
+ }
+ if(num_found != 1){
+ cout << "getRequestThreadID ERROR too many matching requests addr = " << addr << endl;
+ printProgress(cout);
+ }
+ ASSERT(num_found == 1);
+ ASSERT(thread != -1);
+
+ return thread;
+}
+
+// given a line address, return the request's physical address
+Address Sequencer::getRequestPhysicalAddress(const Address & lineaddr){
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ Address physaddr;
+ int num_found = 0;
+ for(int p=0; p < smt_threads; ++p){
+ if(m_readRequestTable_ptr[p]->exist(lineaddr)){
+ num_found++;
+ physaddr = (m_readRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
+ }
+ if(m_writeRequestTable_ptr[p]->exist(lineaddr)){
+ num_found++;
+ physaddr = (m_writeRequestTable_ptr[p]->lookup(lineaddr)).getAddress();
+ }
+ }
+ if(num_found != 1){
+ cout << "getRequestPhysicalAddress ERROR too many matching requests addr = " << lineaddr << endl;
+ printProgress(cout);
+ }
+ ASSERT(num_found == 1);
+
+ return physaddr;
+}
+
+void Sequencer::printProgress(ostream& out) const{
+
+ int total_demand = 0;
+ out << "Sequencer Stats Version " << m_version << endl;
+ out << "Current time = " << g_eventQueue_ptr->getTime() << endl;
+ out << "---------------" << endl;
+ out << "outstanding requests" << endl;
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ Vector<Address> rkeys = m_readRequestTable_ptr[p]->keys();
+ int read_size = rkeys.size();
+ out << "proc " << m_chip_ptr->getID() << " thread " << p << " Read Requests = " << read_size << endl;
+ // print the request table
+ for(int i=0; i < read_size; ++i){
+ CacheMsg & request = m_readRequestTable_ptr[p]->lookup(rkeys[i]);
+ out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << rkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
+ if( request.getPrefetch() == PrefetchBit_No ){
+ total_demand++;
+ }
+ }
+
+ Vector<Address> wkeys = m_writeRequestTable_ptr[p]->keys();
+ int write_size = wkeys.size();
+ out << "proc " << m_chip_ptr->getID() << " thread " << p << " Write Requests = " << write_size << endl;
+ // print the request table
+ for(int i=0; i < write_size; ++i){
+ CacheMsg & request = m_writeRequestTable_ptr[p]->lookup(wkeys[i]);
+ out << "\tRequest[ " << i << " ] = " << request.getType() << " Address " << wkeys[i] << " Posted " << request.getTime() << " PF " << request.getPrefetch() << endl;
+ if( request.getPrefetch() == PrefetchBit_No ){
+ total_demand++;
+ }
+ }
+
+ out << endl;
+ }
+ out << "Total Number Outstanding: " << m_outstanding_count << endl;
+ out << "Total Number Demand : " << total_demand << endl;
+ out << "Total Number Prefetches : " << m_outstanding_count - total_demand << endl;
+ out << endl;
+ out << endl;
+
+}
+
+void Sequencer::printConfig(ostream& out) {
+ if (TSO) {
+ out << "sequencer: Sequencer - TSO" << endl;
+ } else {
+ out << "sequencer: Sequencer - SC" << endl;
+ }
+ out << " max_outstanding_requests: " << g_SEQUENCER_OUTSTANDING_REQUESTS << endl;
+}
+
+bool Sequencer::empty() const {
+ return m_outstanding_count == 0;
+}
+
+// Insert the request on the correct request table. Return true if
+// the entry was already present.
+bool Sequencer::insertRequest(const CacheMsg& request) {
+ int thread = request.getThreadID();
+ assert(thread >= 0);
+ int total_outstanding = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+ assert(m_outstanding_count == total_outstanding);
+
+ // See if we should schedule a deadlock check
+ if (m_deadlock_check_scheduled == false) {
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ m_deadlock_check_scheduled = true;
+ }
+
+ if ((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC)) {
+ if (m_writeRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
+ m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ return true;
+ }
+ m_writeRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
+ m_writeRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ m_outstanding_count++;
+ } else {
+ if (m_readRequestTable_ptr[thread]->exist(line_address(request.getAddress()))) {
+ m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ return true;
+ }
+ m_readRequestTable_ptr[thread]->allocate(line_address(request.getAddress()));
+ m_readRequestTable_ptr[thread]->lookup(line_address(request.getAddress())) = request;
+ m_outstanding_count++;
+ }
+
+ g_system_ptr->getProfiler()->sequencerRequests(m_outstanding_count);
+
+ total_outstanding = 0;
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+
+ assert(m_outstanding_count == total_outstanding);
+ return false;
+}
+
+void Sequencer::removeRequest(const CacheMsg& request) {
+ int thread = request.getThreadID();
+ assert(thread >= 0);
+ int total_outstanding = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+ assert(m_outstanding_count == total_outstanding);
+
+ if ((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC)) {
+ m_writeRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
+ } else {
+ m_readRequestTable_ptr[thread]->deallocate(line_address(request.getAddress()));
+ }
+ m_outstanding_count--;
+
+ total_outstanding = 0;
+ for(int p=0; p < smt_threads; ++p){
+ total_outstanding += m_writeRequestTable_ptr[p]->size() + m_readRequestTable_ptr[p]->size();
+ }
+ assert(m_outstanding_count == total_outstanding);
+}
+
+void Sequencer::writeCallback(const Address& address) {
+ DataBlock data;
+ writeCallback(address, data);
+}
+
+void Sequencer::writeCallback(const Address& address, DataBlock& data) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_writeRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
+
+ writeCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
+}
+
+void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
+
+ assert(address == line_address(address));
+ assert(thread >= 0);
+ assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
+
+ writeCallback(address, data, respondingMach, thread);
+
+}
+
+void Sequencer::writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
+ CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread);
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC));
+
+ hitCallback(request, data, respondingMach, thread);
+
+}
+
+void Sequencer::readCallback(const Address& address) {
+ DataBlock data;
+ readCallback(address, data);
+}
+
+void Sequencer::readCallback(const Address& address, DataBlock& data) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_readRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
+
+ readCallback(address, data, GenericMachineType_NULL, PrefetchBit_No, thread);
+}
+
+void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread) {
+
+ assert(address == line_address(address));
+ assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
+
+ readCallback(address, data, respondingMach, thread);
+}
+
+void Sequencer::readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
+
+ CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread );
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_LD) ||
+ (request.getType() == CacheRequestType_LD_XACT) ||
+ (request.getType() == CacheRequestType_IFETCH)
+ );
+
+ hitCallback(request, data, respondingMach, thread);
+}
+
+void Sequencer::hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread) {
+ int size = request.getSize();
+ Address request_address = request.getAddress();
+ Address request_logical_address = request.getLogicalAddress();
+ Address request_line_address = line_address(request_address);
+ CacheRequestType type = request.getType();
+ int threadID = request.getThreadID();
+ Time issued_time = request.getTime();
+ int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
+
+ // Set this cache entry to the most recently used
+ if (type == CacheRequestType_IFETCH) {
+ if (Protocol::m_TwoLevelCache) {
+ if (m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ else {
+ if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ } else {
+ if (Protocol::m_TwoLevelCache) {
+ if (m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ else {
+ if (m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->isTagPresent(request_line_address)) {
+ m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->setMRU(request_line_address);
+ }
+ }
+ }
+
+ assert(g_eventQueue_ptr->getTime() >= issued_time);
+ Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
+
+ if (PROTOCOL_DEBUG_TRACE) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Done", "",
+ int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
+ }
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
+ if (request.getPrefetch() == PrefetchBit_Yes) {
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
+ g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
+ return; // Ignore the software prefetch, don't callback the driver
+ }
+
+ // Profile the miss latency for all non-zero demand misses
+ if (miss_latency != 0) {
+ g_system_ptr->getProfiler()->missLatency(miss_latency, type, respondingMach);
+
+ #if 0
+ uinteger_t tick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick"));
+ uinteger_t tick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick_cmpr"));
+ uinteger_t stick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick"));
+ uinteger_t stick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick_cmpr"));
+ cout << "END PROC " << m_version << hex << " tick = " << tick << " tick_cmpr = " << tick_cmpr << " stick = " << stick << " stick_cmpr = " << stick_cmpr << " cycle = "<< g_eventQueue_ptr->getTime() << dec << endl;
+ #endif
+
+ }
+
+ bool write =
+ (type == CacheRequestType_ST) ||
+ (type == CacheRequestType_ST_XACT) ||
+ (type == CacheRequestType_LDX_XACT) ||
+ (type == CacheRequestType_ATOMIC);
+
+ if (TSO && write) {
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->callBack(line_address(request.getAddress()), data);
+ } else {
+
+ // Copy the correct bytes out of the cache line into the subblock
+ SubBlock subblock(request_address, request_logical_address, size);
+ subblock.mergeFrom(data); // copy the correct bytes from DataBlock in the SubBlock
+
+ // Scan the store buffer to see if there are any outstanding stores we need to collect
+ if (TSO) {
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->updateSubBlock(subblock);
+ }
+
+ // Call into the Driver (Tester or Simics) and let it read and/or modify the sub-block
+ g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID);
+
+ // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
+ // (This is only triggered for the non-TSO case)
+ if (write) {
+ assert(!TSO);
+ subblock.mergeTo(data); // copy the correct bytes from SubBlock into the DataBlock
+ }
+ }
+}
+
+void Sequencer::readConflictCallback(const Address& address) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_readRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_readRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_readRequestTable_ptr[thread]->lookup(address);
+
+ readConflictCallback(address, GenericMachineType_NULL, thread);
+}
+
+void Sequencer::readConflictCallback(const Address& address, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_readRequestTable_ptr[thread]->exist(line_address(address)));
+
+ CacheMsg request = m_readRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread );
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_LD) ||
+ (request.getType() == CacheRequestType_LD_XACT) ||
+ (request.getType() == CacheRequestType_IFETCH)
+ );
+
+ conflictCallback(request, respondingMach, thread);
+}
+
+void Sequencer::writeConflictCallback(const Address& address) {
+ // process oldest thread first
+ int thread = -1;
+ Time oldest_time = 0;
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int t=0; t < smt_threads; ++t){
+ if(m_writeRequestTable_ptr[t]->exist(address)){
+ CacheMsg & request = m_writeRequestTable_ptr[t]->lookup(address);
+ if(thread == -1 || (request.getTime() < oldest_time) ){
+ thread = t;
+ oldest_time = request.getTime();
+ }
+ }
+ }
+ // make sure we found an oldest thread
+ ASSERT(thread != -1);
+
+ CacheMsg & request = m_writeRequestTable_ptr[thread]->lookup(address);
+
+ writeConflictCallback(address, GenericMachineType_NULL, thread);
+}
+
+void Sequencer::writeConflictCallback(const Address& address, GenericMachineType respondingMach, int thread) {
+ assert(address == line_address(address));
+ assert(m_writeRequestTable_ptr[thread]->exist(line_address(address)));
+ CacheMsg request = m_writeRequestTable_ptr[thread]->lookup(address);
+ assert( request.getThreadID() == thread);
+ removeRequest(request);
+
+ assert((request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC));
+
+ conflictCallback(request, respondingMach, thread);
+
+}
+
+void Sequencer::conflictCallback(const CacheMsg& request, GenericMachineType respondingMach, int thread) {
+ assert(XACT_MEMORY);
+ int size = request.getSize();
+ Address request_address = request.getAddress();
+ Address request_logical_address = request.getLogicalAddress();
+ Address request_line_address = line_address(request_address);
+ CacheRequestType type = request.getType();
+ int threadID = request.getThreadID();
+ Time issued_time = request.getTime();
+ int logical_proc_no = ((m_chip_ptr->getID() * RubyConfig::numberOfProcsPerChip()) + m_version) * RubyConfig::numberofSMTThreads() + threadID;
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, size);
+
+ assert(g_eventQueue_ptr->getTime() >= issued_time);
+ Time miss_latency = g_eventQueue_ptr->getTime() - issued_time;
+
+ if (PROTOCOL_DEBUG_TRACE) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), -1, request.getAddress(), "", "Conflict", "",
+ int_to_string(miss_latency)+" cycles "+GenericMachineType_to_string(respondingMach)+" "+CacheRequestType_to_string(request.getType())+" "+PrefetchBit_to_string(request.getPrefetch()));
+ }
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request_address);
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, request.getPrefetch());
+ if (request.getPrefetch() == PrefetchBit_Yes) {
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "return");
+ g_system_ptr->getProfiler()->swPrefetchLatency(miss_latency, type, respondingMach);
+ return; // Ignore the software prefetch, don't callback the driver
+ }
+
+ bool write =
+ (type == CacheRequestType_ST) ||
+ (type == CacheRequestType_ST_XACT) ||
+ (type == CacheRequestType_LDX_XACT) ||
+ (type == CacheRequestType_ATOMIC);
+
+ // Copy the correct bytes out of the cache line into the subblock
+ SubBlock subblock(request_address, request_logical_address, size);
+
+ // Call into the Driver (Tester or Simics)
+ g_system_ptr->getDriver()->conflictCallback(m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version, subblock, type, threadID);
+
+ // If the request was a Store or Atomic, apply the changes in the SubBlock to the DataBlock
+ // (This is only triggered for the non-TSO case)
+ if (write) {
+ assert(!TSO);
+ }
+}
+
+void Sequencer::printDebug(){
+ //notify driver of debug
+ g_system_ptr->getDriver()->printDebug();
+}
+
+// Returns true if the sequencer already has a load or store outstanding
+bool Sequencer::isReady(const CacheMsg& request) const {
+
+ if (m_outstanding_count >= g_SEQUENCER_OUTSTANDING_REQUESTS) {
+ //cout << "TOO MANY OUTSTANDING: " << m_outstanding_count << " " << g_SEQUENCER_OUTSTANDING_REQUESTS << " VER " << m_version << endl;
+ //printProgress(cout);
+ return false;
+ }
+ int thread = request.getThreadID();
+
+ // This code allows reads to be performed even when we have a write
+ // request outstanding for the line
+ bool write =
+ (request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC);
+
+ // LUKE - disallow more than one request type per address
+ // INVARIANT: at most one request type per address, per processor
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ if( m_writeRequestTable_ptr[p]->exist(line_address(request.getAddress())) ||
+ m_readRequestTable_ptr[p]->exist(line_address(request.getAddress())) ){
+ //cout << "OUTSTANDING REQUEST EXISTS " << p << " VER " << m_version << endl;
+ //printProgress(cout);
+ return false;
+ }
+ }
+
+ if (TSO) {
+ return m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady();
+ }
+ return true;
+}
+
+// Called by Driver (Simics or Tester).
+void Sequencer::makeRequest(const CacheMsg& request) {
+ //assert(isReady(request));
+ bool write = (request.getType() == CacheRequestType_ST) ||
+ (request.getType() == CacheRequestType_ST_XACT) ||
+ (request.getType() == CacheRequestType_LDX_XACT) ||
+ (request.getType() == CacheRequestType_ATOMIC);
+
+ if (TSO && (request.getPrefetch() == PrefetchBit_No) && write) {
+ assert(m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->isReady());
+ m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->insertStore(request);
+ return;
+ }
+
+ bool hit = doRequest(request);
+
+}
+
+bool Sequencer::doRequest(const CacheMsg& request) {
+ bool hit = false;
+ // Check the fast path
+ DataBlock* data_ptr;
+
+ int thread = request.getThreadID();
+
+ hit = tryCacheAccess(line_address(request.getAddress()),
+ request.getType(),
+ request.getProgramCounter(),
+ request.getAccessMode(),
+ request.getSize(),
+ data_ptr);
+
+ if (hit && (request.getType() == CacheRequestType_IFETCH || !REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) ) {
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path hit");
+ hitCallback(request, *data_ptr, GenericMachineType_L1Cache, thread);
+ return true;
+ }
+
+ #if 0
+ uinteger_t tick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick"));
+ uinteger_t tick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "tick_cmpr"));
+ uinteger_t stick = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick"));
+ uinteger_t stick_cmpr = SIMICS_read_control_register(m_version, SIMICS_get_register_number(m_version, "stick_cmpr"));
+ cout << "START PROC " << m_version << hex << " tick = " << tick << " tick_cmpr = " << tick_cmpr << " stick = " << stick << " stick_cmpr = " << stick_cmpr << " cycle = "<< g_eventQueue_ptr->getTime() << dec << endl;;
+ #endif
+
+ if (TSO && (request.getType() == CacheRequestType_LD || request.getType() == CacheRequestType_IFETCH)) {
+
+ // See if we can satisfy the load entirely from the store buffer
+ SubBlock subblock(line_address(request.getAddress()), request.getSize());
+ if (m_chip_ptr->m_L1Cache_storeBuffer_vec[m_version]->trySubBlock(subblock)) {
+ DataBlock dummy;
+ hitCallback(request, dummy, GenericMachineType_NULL, thread); // Call with an 'empty' datablock, since the data is in the store buffer
+ return true;
+ }
+ }
+
+ DEBUG_MSG(SEQUENCER_COMP, MedPrio, "Fast path miss");
+ issueRequest(request);
+ return hit;
+}
+
+void Sequencer::issueRequest(const CacheMsg& request) {
+ bool found = insertRequest(request);
+
+ if (!found) {
+ CacheMsg msg = request;
+ msg.getAddress() = line_address(request.getAddress()); // Make line address
+
+ // Fast Path L1 misses are profiled here - all non-fast path misses are profiled within the generated protocol code
+ if (!REMOVE_SINGLE_CYCLE_DCACHE_FAST_PATH) {
+ g_system_ptr->getProfiler()->addPrimaryStatSample(msg, m_chip_ptr->getID());
+ }
+
+ if (PROTOCOL_DEBUG_TRACE) {
+ g_system_ptr->getProfiler()->profileTransition("Seq", (m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip() + m_version), -1, msg.getAddress(),"", "Begin", "", CacheRequestType_to_string(request.getType()));
+ }
+
+#if 0
+ // Commented out by nate binkert because I removed the trace stuff
+ if (g_system_ptr->getTracer()->traceEnabled()) {
+ g_system_ptr->getTracer()->traceRequest((m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version), msg.getAddress(), msg.getProgramCounter(),
+ msg.getType(), g_eventQueue_ptr->getTime());
+ }
+#endif
+
+ Time latency = 0; // initialzed to an null value
+
+ latency = SEQUENCER_TO_CONTROLLER_LATENCY;
+
+ // Send the message to the cache controller
+ assert(latency > 0);
+ m_chip_ptr->m_L1Cache_mandatoryQueue_vec[m_version]->enqueue(msg, latency);
+
+ } // !found
+}
+
+bool Sequencer::tryCacheAccess(const Address& addr, CacheRequestType type,
+ const Address& pc, AccessModeType access_mode,
+ int size, DataBlock*& data_ptr) {
+ if (type == CacheRequestType_IFETCH) {
+ if (Protocol::m_TwoLevelCache) {
+ return m_chip_ptr->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ else {
+ return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ } else {
+ if (Protocol::m_TwoLevelCache) {
+ return m_chip_ptr->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ else {
+ return m_chip_ptr->m_L1Cache_cacheMemory_vec[m_version]->tryCacheAccess(line_address(addr), type, data_ptr);
+ }
+ }
+}
+
+void Sequencer::resetRequestTime(const Address& addr, int thread){
+ assert(thread >= 0);
+ //reset both load and store requests, if they exist
+ if(m_readRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_readRequestTable_ptr[thread]->lookup(addr);
+ if( request.m_AccessMode != AccessModeType_UserMode){
+ cout << "resetRequestType ERROR read request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
+ printProgress(cout);
+ }
+ //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
+ request.setTime(g_eventQueue_ptr->getTime());
+ }
+ if(m_writeRequestTable_ptr[thread]->exist(line_address(addr))){
+ CacheMsg& request = m_writeRequestTable_ptr[thread]->lookup(addr);
+ if( request.m_AccessMode != AccessModeType_UserMode){
+ cout << "resetRequestType ERROR write request addr = " << addr << " thread = "<< thread << " is SUPERVISOR MODE" << endl;
+ printProgress(cout);
+ }
+ //ASSERT(request.m_AccessMode == AccessModeType_UserMode);
+ request.setTime(g_eventQueue_ptr->getTime());
+ }
+}
+
+// removes load request from queue
+void Sequencer::removeLoadRequest(const Address & addr, int thread){
+ removeRequest(getReadRequest(addr, thread));
+}
+
+void Sequencer::removeStoreRequest(const Address & addr, int thread){
+ removeRequest(getWriteRequest(addr, thread));
+}
+
+// returns the read CacheMsg
+CacheMsg & Sequencer::getReadRequest( const Address & addr, int thread ){
+ Address temp = addr;
+ assert(thread >= 0);
+ assert(temp == line_address(temp));
+ assert(m_readRequestTable_ptr[thread]->exist(addr));
+ return m_readRequestTable_ptr[thread]->lookup(addr);
+}
+
+CacheMsg & Sequencer::getWriteRequest( const Address & addr, int thread){
+ Address temp = addr;
+ assert(thread >= 0);
+ assert(temp == line_address(temp));
+ assert(m_writeRequestTable_ptr[thread]->exist(addr));
+ return m_writeRequestTable_ptr[thread]->lookup(addr);
+}
+
+void Sequencer::print(ostream& out) const {
+ out << "[Sequencer: " << m_chip_ptr->getID()
+ << ", outstanding requests: " << m_outstanding_count;
+
+ int smt_threads = RubyConfig::numberofSMTThreads();
+ for(int p=0; p < smt_threads; ++p){
+ out << ", read request table[ " << p << " ]: " << *m_readRequestTable_ptr[p]
+ << ", write request table[ " << p << " ]: " << *m_writeRequestTable_ptr[p];
+ }
+ out << "]";
+}
+
+// this can be called from setState whenever coherence permissions are upgraded
+// when invoked, coherence violations will be checked for the given block
+void Sequencer::checkCoherence(const Address& addr) {
+#ifdef CHECK_COHERENCE
+ g_system_ptr->checkGlobalCoherenceInvariant(addr);
+#endif
+}
+
+bool Sequencer::getRubyMemoryValue(const Address& addr, char* value,
+ unsigned int size_in_bytes ) {
+ if(g_SIMICS){
+ for(unsigned int i=0; i < size_in_bytes; i++) {
+ value[i] = SIMICS_read_physical_memory( m_chip_ptr->getID()*RubyConfig::numberOfProcsPerChip()+m_version,
+ addr.getAddress() + i, 1 );
+ }
+ return false; // Do nothing?
+ } else {
+ bool found = false;
+ const Address lineAddr = line_address(addr);
+ DataBlock data;
+ PhysAddress paddr(addr);
+ DataBlock* dataPtr = &data;
+ Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
+ // LUKE - use variable names instead of macros
+ assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
+ assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
+
+ MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
+ int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
+
+ if (Protocol::m_TwoLevelCache) {
+ if(Protocol::m_CMP){
+ assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
+ }
+ else{
+ assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
+ }
+ }
+
+ if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
+ n->m_L1Cache_L1IcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L1Cache_L1DcacheMemory_vec[m_version]->getMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L2Cache_L2cacheMemory_vec[l2_ver]->getMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr)){
+// ASSERT(n->TBE_TABLE_MEMBER_VARIABLE->isPresent(lineAddr));
+// L1Cache_TBE tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
+
+// int offset = addr.getOffset();
+// for(int i=0; i<size_in_bytes; ++i){
+// value[i] = tbeEntry.getDataBlk().getByte(offset + i);
+// }
+
+// found = true;
+ } else {
+ // Address not found
+ //cout << " " << m_chip_ptr->getID() << " NOT IN CACHE, Value at Directory is: " << (int) value[0] << endl;
+ n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
+ int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ int offset = addr.getOffset();
+ value[i] = n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.getByte(offset + i);
+ }
+ // Address not found
+ //WARN_MSG("Couldn't find address");
+ //WARN_EXPR(addr);
+ found = false;
+ }
+ return true;
+ }
+}
+
+bool Sequencer::setRubyMemoryValue(const Address& addr, char *value,
+ unsigned int size_in_bytes) {
+ char test_buffer[64];
+
+ if(g_SIMICS){
+ return false; // Do nothing?
+ } else {
+ // idea here is that coherent cache should find the
+ // latest data, the update it
+ bool found = false;
+ const Address lineAddr = line_address(addr);
+ PhysAddress paddr(addr);
+ DataBlock data;
+ DataBlock* dataPtr = &data;
+ Chip* n = dynamic_cast<Chip*>(m_chip_ptr);
+
+ MachineID l2_mach = map_L2ChipId_to_L2Cache(addr, m_chip_ptr->getID() );
+ int l2_ver = l2_mach.num%RubyConfig::numberOfL2CachePerChip();
+ // LUKE - use variable names instead of macros
+ //cout << "number of L2caches per chip = " << RubyConfig::numberOfL2CachePerChip(m_version) << endl;
+ //cout << "L1I cache vec size = " << n->m_L1Cache_L1IcacheMemory_vec.size() << endl;
+ //cout << "L1D cache vec size = " << n->m_L1Cache_L1DcacheMemory_vec.size() << endl;
+ //cout << "L1cache_cachememory size = " << n->m_L1Cache_cacheMemory_vec.size() << endl;
+ //cout << "L1cache_l2cachememory size = " << n->m_L1Cache_L2cacheMemory_vec.size() << endl;
+ // if (Protocol::m_TwoLevelCache) {
+// if(Protocol::m_CMP){
+// cout << "CMP L2 cache vec size = " << n->m_L2Cache_L2cacheMemory_vec.size() << endl;
+// }
+// else{
+// cout << "L2 cache vec size = " << n->m_L1Cache_cacheMemory_vec.size() << endl;
+// }
+// }
+
+ assert(n->m_L1Cache_L1IcacheMemory_vec[m_version] != NULL);
+ assert(n->m_L1Cache_L1DcacheMemory_vec[m_version] != NULL);
+ if (Protocol::m_TwoLevelCache) {
+ if(Protocol::m_CMP){
+ assert(n->m_L2Cache_L2cacheMemory_vec[l2_ver] != NULL);
+ }
+ else{
+ assert(n->m_L1Cache_cacheMemory_vec[m_version] != NULL);
+ }
+ }
+
+ if (n->m_L1Cache_L1IcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_IFETCH, dataPtr)){
+ n->m_L1Cache_L1IcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (n->m_L1Cache_L1DcacheMemory_vec[m_version]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L1Cache_L1DcacheMemory_vec[m_version]->setMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ } else if (Protocol::m_CMP && n->m_L2Cache_L2cacheMemory_vec[l2_ver]->tryCacheAccess(lineAddr, CacheRequestType_LD, dataPtr)){
+ n->m_L2Cache_L2cacheMemory_vec[l2_ver]->setMemoryValue(addr, value, size_in_bytes);
+ found = true;
+ // } else if (n->TBE_TABLE_MEMBER_VARIABLE->isTagPresent(lineAddr)){
+// L1Cache_TBE& tbeEntry = n->TBE_TABLE_MEMBER_VARIABLE->lookup(lineAddr);
+// DataBlock tmpData;
+// int offset = addr.getOffset();
+// for(int i=0; i<size_in_bytes; ++i){
+// tmpData.setByte(offset + i, value[i]);
+// }
+// tbeEntry.setDataBlk(tmpData);
+// tbeEntry.setDirty(true);
+ } else {
+ // Address not found
+ n = dynamic_cast<Chip*>(g_system_ptr->getChip(map_Address_to_DirectoryNode(addr)/RubyConfig::numberOfDirectoryPerChip()));
+ int dir_version = map_Address_to_DirectoryNode(addr)%RubyConfig::numberOfDirectoryPerChip();
+ for(unsigned int i=0; i<size_in_bytes; ++i){
+ int offset = addr.getOffset();
+ n->m_Directory_directory_vec[dir_version]->lookup(lineAddr).m_DataBlk.setByte(offset + i, value[i]);
+ }
+ found = false;
+ }
+
+ if (found){
+ found = getRubyMemoryValue(addr, test_buffer, size_in_bytes);
+ assert(found);
+ if(value[0] != test_buffer[0]){
+ WARN_EXPR((int) value[0]);
+ WARN_EXPR((int) test_buffer[0]);
+ ERROR_MSG("setRubyMemoryValue failed to set value.");
+ }
+ }
+
+ return true;
+ }
+}
diff --git a/src/mem/ruby/system/Sequencer.hh b/src/mem/ruby/system/Sequencer.hh
new file mode 100644
index 000000000..5dd674655
--- /dev/null
+++ b/src/mem/ruby/system/Sequencer.hh
@@ -0,0 +1,170 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id: Sequencer.h 1.70 2006/09/27 14:56:41-05:00 bobba@s1-01.cs.wisc.edu $
+ *
+ * Description:
+ *
+ */
+
+#ifndef SEQUENCER_H
+#define SEQUENCER_H
+
+#include "Global.hh"
+#include "RubyConfig.hh"
+#include "Consumer.hh"
+#include "CacheRequestType.hh"
+#include "AccessModeType.hh"
+#include "GenericMachineType.hh"
+#include "PrefetchBit.hh"
+#include "Map.hh"
+
+class DataBlock;
+class AbstractChip;
+class CacheMsg;
+class Address;
+class MachineID;
+
+class Sequencer : public Consumer {
+public:
+ // Constructors
+ Sequencer(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~Sequencer();
+
+ // Public Methods
+ void wakeup(); // Used only for deadlock detection
+
+ static void printConfig(ostream& out);
+
+ // returns total number of outstanding request (includes prefetches)
+ int getNumberOutstanding();
+ // return only total number of outstanding demand requests
+ int getNumberOutstandingDemand();
+ // return only total number of outstanding prefetch requests
+ int getNumberOutstandingPrefetch();
+
+ // remove load/store request from queue
+ void removeLoadRequest(const Address & addr, int thread);
+ void removeStoreRequest(const Address & addr, int thread);
+
+ void printProgress(ostream& out) const;
+
+ // returns a pointer to the request in the request tables
+ CacheMsg & getReadRequest( const Address & addr, int thread );
+ CacheMsg & getWriteRequest( const Address & addr, int thread );
+
+ // called by Ruby when transaction completes
+ void writeConflictCallback(const Address& address);
+ void readConflictCallback(const Address& address);
+ void writeConflictCallback(const Address& address, GenericMachineType respondingMach, int thread);
+ void readConflictCallback(const Address& address, GenericMachineType respondingMach, int thread);
+
+ void writeCallback(const Address& address, DataBlock& data);
+ void readCallback(const Address& address, DataBlock& data);
+ void writeCallback(const Address& address);
+ void readCallback(const Address& address);
+ void writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread);
+ void readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, PrefetchBit pf, int thread);
+ void writeCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread);
+ void readCallback(const Address& address, DataBlock& data, GenericMachineType respondingMach, int thread);
+
+ // returns the thread ID of the request
+ int getRequestThreadID(const Address & addr);
+ // returns the physical address of the request
+ Address getRequestPhysicalAddress(const Address & lineaddr);
+ // returns whether a request is a prefetch request
+ bool isPrefetchRequest(const Address & lineaddr);
+
+ //notifies driver of debug print
+ void printDebug();
+
+ // called by Tester or Simics
+ void makeRequest(const CacheMsg& request);
+ bool doRequest(const CacheMsg& request);
+ void issueRequest(const CacheMsg& request);
+ bool isReady(const CacheMsg& request) const;
+ bool empty() const;
+ void resetRequestTime(const Address& addr, int thread);
+ Address getLogicalAddressOfRequest(Address address, int thread);
+ AccessModeType getAccessModeOfRequest(Address address, int thread);
+ //uint64 getSequenceNumberOfRequest(Address addr, int thread);
+
+ void print(ostream& out) const;
+ void checkCoherence(const Address& address);
+
+ bool getRubyMemoryValue(const Address& addr, char* value, unsigned int size_in_bytes);
+ bool setRubyMemoryValue(const Address& addr, char *value, unsigned int size_in_bytes);
+
+ void removeRequest(const CacheMsg& request);
+private:
+ // Private Methods
+ bool tryCacheAccess(const Address& addr, CacheRequestType type, const Address& pc, AccessModeType access_mode, int size, DataBlock*& data_ptr);
+ void conflictCallback(const CacheMsg& request, GenericMachineType respondingMach, int thread);
+ void hitCallback(const CacheMsg& request, DataBlock& data, GenericMachineType respondingMach, int thread);
+ bool insertRequest(const CacheMsg& request);
+
+
+ // Private copy constructor and assignment operator
+ Sequencer(const Sequencer& obj);
+ Sequencer& operator=(const Sequencer& obj);
+
+ // Data Members (m_ prefix)
+ AbstractChip* m_chip_ptr;
+
+ // indicates what processor on the chip this sequencer is associated with
+ int m_version;
+
+ // One request table per SMT thread
+ Map<Address, CacheMsg>** m_writeRequestTable_ptr;
+ Map<Address, CacheMsg>** m_readRequestTable_ptr;
+ // Global outstanding request count, across all request tables
+ int m_outstanding_count;
+ bool m_deadlock_check_scheduled;
+
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const Sequencer& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const Sequencer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SEQUENCER_H
+
diff --git a/src/mem/ruby/system/StoreBuffer.cc b/src/mem/ruby/system/StoreBuffer.cc
new file mode 100644
index 000000000..c6880bdd1
--- /dev/null
+++ b/src/mem/ruby/system/StoreBuffer.cc
@@ -0,0 +1,300 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "Global.hh"
+#include "RubyConfig.hh"
+#include "StoreBuffer.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+#include "Driver.hh"
+#include "Vector.hh"
+#include "EventQueue.hh"
+#include "AddressProfiler.hh"
+#include "Sequencer.hh"
+#include "SubBlock.hh"
+#include "Profiler.hh"
+
+// *** Begin Helper class ***
+struct StoreBufferEntry {
+ StoreBufferEntry() {} // So we can allocate a vector of StoreBufferEntries
+ StoreBufferEntry(const SubBlock& block, CacheRequestType type, const Address& pc, AccessModeType access_mode, int size, int thread) : m_subblock(block) {
+ m_type = type;
+ m_pc = pc;
+ m_access_mode = access_mode;
+ m_size = size;
+ m_thread = thread;
+ m_time = g_eventQueue_ptr->getTime();
+ }
+
+ void print(ostream& out) const
+ {
+ out << "[StoreBufferEntry: "
+ << "SubBlock: " << m_subblock
+ << ", Type: " << m_type
+ << ", PC: " << m_pc
+ << ", AccessMode: " << m_access_mode
+ << ", Size: " << m_size
+ << ", Thread: " << m_thread
+ << ", Time: " << m_time
+ << "]";
+ }
+
+ SubBlock m_subblock;
+ CacheRequestType m_type;
+ Address m_pc;
+ AccessModeType m_access_mode;
+ int m_size;
+ int m_thread;
+ Time m_time;
+};
+
+extern inline
+ostream& operator<<(ostream& out, const StoreBufferEntry& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+// *** End Helper class ***
+
+const int MAX_ENTRIES = 128;
+
+static void inc_index(int& index)
+{
+ index++;
+ if (index >= MAX_ENTRIES) {
+ index = 0;
+ }
+}
+
+StoreBuffer::StoreBuffer(AbstractChip* chip_ptr, int version) :
+ m_store_cache()
+{
+ m_chip_ptr = chip_ptr;
+ m_version = version;
+ m_queue_ptr = new Vector<StoreBufferEntry>(MAX_ENTRIES);
+ m_queue_ptr->setSize(MAX_ENTRIES);
+ m_pending = false;
+ m_seen_atomic = false;
+ m_head = 0;
+ m_tail = 0;
+ m_size = 0;
+ m_deadlock_check_scheduled = false;
+}
+
+StoreBuffer::~StoreBuffer()
+{
+ delete m_queue_ptr;
+}
+
+// Used only to check for deadlock
+void StoreBuffer::wakeup()
+{
+ // Check for deadlock of any of the requests
+ Time current_time = g_eventQueue_ptr->getTime();
+
+ int queue_pointer = m_head;
+ for (int i=0; i<m_size; i++) {
+ if (current_time - (getEntry(queue_pointer).m_time) >= g_DEADLOCK_THRESHOLD) {
+ WARN_EXPR(getEntry(queue_pointer));
+ WARN_EXPR(m_chip_ptr->getID());
+ WARN_EXPR(current_time);
+ ERROR_MSG("Possible Deadlock detected");
+ }
+ inc_index(queue_pointer);
+ }
+
+ if (m_size > 0) { // If there are still outstanding requests, keep checking
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ } else {
+ m_deadlock_check_scheduled = false;
+ }
+}
+
+void StoreBuffer::printConfig(ostream& out)
+{
+ out << "Store buffer entries: " << MAX_ENTRIES << " (Only valid if TSO is enabled)" << endl;
+}
+
+// Handle an incoming store request, this method is responsible for
+// calling hitCallback as needed
+void StoreBuffer::insertStore(const CacheMsg& request)
+{
+ Address addr = request.getAddress();
+ CacheRequestType type = request.getType();
+ Address pc = request.getProgramCounter();
+ AccessModeType access_mode = request.getAccessMode();
+ int size = request.getSize();
+ int threadID = request.getThreadID();
+
+ DEBUG_MSG(STOREBUFFER_COMP, MedPrio, "insertStore");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, g_eventQueue_ptr->getTime());
+ assert((type == CacheRequestType_ST) || (type == CacheRequestType_ATOMIC));
+ assert(isReady());
+
+ // See if we should schedule a deadlock check
+ if (m_deadlock_check_scheduled == false) {
+ g_eventQueue_ptr->scheduleEvent(this, g_DEADLOCK_THRESHOLD);
+ m_deadlock_check_scheduled = true;
+ }
+
+ // Perform the hit-callback for the store
+ SubBlock subblock(addr, size);
+ if(type == CacheRequestType_ST) {
+ g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID(), subblock, type, threadID);
+ assert(subblock.getSize() != 0);
+ } else {
+ // wait to perform the hitCallback until later for Atomics
+ }
+
+ // Perform possible pre-fetch
+ if(!isEmpty()) {
+ CacheMsg new_request = request;
+ new_request.getPrefetch() = PrefetchBit_Yes;
+ m_chip_ptr->getSequencer(m_version)->makeRequest(new_request);
+ }
+
+ // Update the StoreCache
+ m_store_cache.add(subblock);
+
+ // Enqueue the entry
+ StoreBufferEntry entry(subblock, type, pc, access_mode, size, threadID); // FIXME
+ enqueue(entry);
+
+ if(type == CacheRequestType_ATOMIC) {
+ m_seen_atomic = true;
+ }
+
+ processHeadOfQueue();
+}
+
+void StoreBuffer::callBack(const Address& addr, DataBlock& data)
+{
+ DEBUG_MSG(STOREBUFFER_COMP, MedPrio, "callBack");
+ DEBUG_EXPR(STOREBUFFER_COMP, MedPrio, g_eventQueue_ptr->getTime());
+ assert(!isEmpty());
+ assert(m_pending == true);
+ assert(line_address(addr) == addr);
+ assert(line_address(m_pending_address) == addr);
+ assert(line_address(peek().m_subblock.getAddress()) == addr);
+ CacheRequestType type = peek().m_type;
+ int threadID = peek().m_thread;
+ assert((type == CacheRequestType_ST) || (type == CacheRequestType_ATOMIC));
+ m_pending = false;
+
+ // If oldest entry was ATOMIC, perform the callback
+ if(type == CacheRequestType_ST) {
+ // We already performed the call back for the store at insert time
+ } else {
+ // We waited to perform the hitCallback until now for Atomics
+ peek().m_subblock.mergeFrom(data); // copy the correct bytes from DataBlock into the SubBlock for the Load part of the atomic Load/Store
+ g_system_ptr->getDriver()->hitCallback(m_chip_ptr->getID(), peek().m_subblock, type, threadID);
+ m_seen_atomic = false;
+
+ /// FIXME - record the time spent in the store buffer - split out ST vs ATOMIC
+ }
+ assert(peek().m_subblock.getSize() != 0);
+
+ // Apply the head entry to the datablock
+ peek().m_subblock.mergeTo(data); // For both the Store and Atomic cases
+
+ // Update the StoreCache
+ m_store_cache.remove(peek().m_subblock);
+
+ // Dequeue the entry from the store buffer
+ dequeue();
+
+ if (isEmpty()) {
+ assert(m_store_cache.isEmpty());
+ }
+
+ if(type == CacheRequestType_ATOMIC) {
+ assert(isEmpty());
+ }
+
+ // See if we can remove any more entries
+ processHeadOfQueue();
+}
+
+void StoreBuffer::processHeadOfQueue()
+{
+ if(!isEmpty() && !m_pending) {
+ StoreBufferEntry& entry = peek();
+ assert(m_pending == false);
+ m_pending = true;
+ m_pending_address = entry.m_subblock.getAddress();
+ CacheMsg request(entry.m_subblock.getAddress(), entry.m_subblock.getAddress(), entry.m_type, entry.m_pc, entry.m_access_mode, entry.m_size, PrefetchBit_No, 0, Address(0), entry.m_thread, 0, false);
+ m_chip_ptr->getSequencer(m_version)->doRequest(request);
+ }
+}
+
+bool StoreBuffer::isReady() const
+{
+ return ((m_size < MAX_ENTRIES) && (!m_seen_atomic));
+}
+
+// Queue implementation methods
+
+StoreBufferEntry& StoreBuffer::peek()
+{
+ return getEntry(m_head);
+}
+
+void StoreBuffer::dequeue()
+{
+ assert(m_size > 0);
+ m_size--;
+ inc_index(m_head);
+}
+
+void StoreBuffer::enqueue(const StoreBufferEntry& entry)
+{
+ // assert(isReady());
+ (*m_queue_ptr)[m_tail] = entry;
+ m_size++;
+ g_system_ptr->getProfiler()->storeBuffer(m_size, m_store_cache.size());
+ inc_index(m_tail);
+}
+
+StoreBufferEntry& StoreBuffer::getEntry(int index)
+{
+ return (*m_queue_ptr)[index];
+}
+
+void StoreBuffer::print(ostream& out) const
+{
+ out << "[StoreBuffer]";
+}
+
diff --git a/src/mem/ruby/system/StoreBuffer.hh b/src/mem/ruby/system/StoreBuffer.hh
new file mode 100644
index 000000000..832e4f0bb
--- /dev/null
+++ b/src/mem/ruby/system/StoreBuffer.hh
@@ -0,0 +1,120 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef StoreBuffer_H
+#define StoreBuffer_H
+
+#include "Global.hh"
+#include "Consumer.hh"
+#include "Address.hh"
+#include "AccessModeType.hh"
+#include "CacheRequestType.hh"
+#include "StoreCache.hh"
+
+class CacheMsg;
+class DataBlock;
+class SubBlock;
+class StoreBufferEntry;
+class AbstractChip;
+
+template <class TYPE> class Vector;
+
+class StoreBuffer : public Consumer {
+public:
+ // Constructors
+ StoreBuffer(AbstractChip* chip_ptr, int version);
+
+ // Destructor
+ ~StoreBuffer();
+
+ // Public Methods
+ void wakeup(); // Used only for deadlock detection
+ void callBack(const Address& addr, DataBlock& data);
+ void insertStore(const CacheMsg& request);
+ void updateSubBlock(SubBlock& sub_block) const { m_store_cache.update(sub_block); }
+ bool trySubBlock(const SubBlock& sub_block) const { assert(isReady()); return m_store_cache.check(sub_block); }
+ void print(ostream& out) const;
+ bool isEmpty() const { return (m_size == 0); }
+ bool isReady() const;
+
+ // Class methods
+ static void printConfig(ostream& out);
+
+private:
+ // Private Methods
+ void processHeadOfQueue();
+
+ StoreBufferEntry& peek();
+ void dequeue();
+ void enqueue(const StoreBufferEntry& entry);
+ StoreBufferEntry& getEntry(int index);
+
+ // Private copy constructor and assignment operator
+ StoreBuffer(const StoreBuffer& obj);
+ StoreBuffer& operator=(const StoreBuffer& obj);
+
+ // Data Members (m_ prefix)
+ int m_version;
+
+ Vector<StoreBufferEntry>* m_queue_ptr;
+ int m_head;
+ int m_tail;
+ int m_size;
+
+ StoreCache m_store_cache;
+
+ AbstractChip* m_chip_ptr;
+ bool m_pending;
+ Address m_pending_address;
+ bool m_seen_atomic;
+ bool m_deadlock_check_scheduled;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const StoreBuffer& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const StoreBuffer& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //StoreBuffer_H
diff --git a/src/mem/ruby/system/StoreCache.cc b/src/mem/ruby/system/StoreCache.cc
new file mode 100644
index 000000000..bc25c50d6
--- /dev/null
+++ b/src/mem/ruby/system/StoreCache.cc
@@ -0,0 +1,178 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ */
+
+#include "StoreCache.hh"
+#include "System.hh"
+#include "Driver.hh"
+#include "Vector.hh"
+#include "DataBlock.hh"
+#include "SubBlock.hh"
+#include "Map.hh"
+
+// Helper class
+struct StoreCacheEntry {
+ StoreCacheEntry() {
+ m_byte_counters.setSize(RubyConfig::dataBlockBytes());
+ for(int i=0; i<m_byte_counters.size(); i++) {
+ m_byte_counters[i] = 0;
+ }
+ m_line_counter = 0;
+
+ }
+ Address m_addr;
+ DataBlock m_datablock;
+ Vector<int> m_byte_counters;
+ int m_line_counter;
+};
+
+StoreCache::StoreCache()
+{
+ m_internal_cache_ptr = new Map<Address, StoreCacheEntry>;
+}
+
+StoreCache::~StoreCache()
+{
+ delete m_internal_cache_ptr;
+}
+
+bool StoreCache::isEmpty() const
+{
+ return m_internal_cache_ptr->size() == 0;
+}
+
+int StoreCache::size() const { return m_internal_cache_ptr->size(); }
+
+void StoreCache::add(const SubBlock& block)
+{
+ if (m_internal_cache_ptr->exist(line_address(block.getAddress())) == false) {
+ m_internal_cache_ptr->allocate(line_address(block.getAddress()));
+ }
+
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // For each byte in entry change the bytes and inc. the counters
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+ // Update counter
+ entry.m_byte_counters[starting_offset+index]++;
+
+ // Record data
+ entry.m_datablock.setByte(starting_offset+index, block.getByte(index));
+
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, block.getAddress());
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, int(block.getByte(index)));
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, starting_offset+index);
+ }
+
+ // Increment the counter
+ entry.m_line_counter++;
+}
+
+void StoreCache::remove(const SubBlock& block)
+{
+ assert(m_internal_cache_ptr->exist(line_address(block.getAddress())));
+
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // Decrement the byte counters
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+ // Update counter
+ entry.m_byte_counters[starting_offset+index]--;
+ }
+
+ // Decrement the line counter
+ entry.m_line_counter--;
+ assert(entry.m_line_counter >= 0);
+
+ // Check to see if we should de-allocate this entry
+ if (entry.m_line_counter == 0) {
+ m_internal_cache_ptr->deallocate(line_address(block.getAddress()));
+ }
+}
+
+bool StoreCache::check(const SubBlock& block) const
+{
+ if (m_internal_cache_ptr->exist(line_address(block.getAddress())) == false) {
+ return false;
+ } else {
+ // Lookup the entry
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // See if all the bytes are valid
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+ if (entry.m_byte_counters[starting_offset+index] > 0) {
+ // So far so good
+ } else {
+ // not all the bytes were valid
+ return false;
+ }
+ }
+ }
+ return true;
+}
+
+void StoreCache::update(SubBlock& block) const
+{
+ if (m_internal_cache_ptr->exist(line_address(block.getAddress()))) {
+ // Lookup the entry
+ StoreCacheEntry& entry = m_internal_cache_ptr->lookup(line_address(block.getAddress()));
+
+ // Copy all appropriate and valid bytes from the store cache to
+ // the SubBlock
+ int starting_offset = block.getAddress().getOffset();
+ int size = block.getSize();
+ for (int index=0; index < size; index++) {
+
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, block.getAddress());
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, int(entry.m_datablock.getByte(starting_offset+index)));
+ DEBUG_EXPR(SEQUENCER_COMP, LowPrio, starting_offset+index);
+
+ // If this byte is valid, copy the data into the sub-block
+ if (entry.m_byte_counters[starting_offset+index] > 0) {
+ block.setByte(index, entry.m_datablock.getByte(starting_offset+index));
+ }
+ }
+ }
+}
+
+void StoreCache::print(ostream& out) const
+{
+ out << "[StoreCache]";
+}
+
diff --git a/src/mem/ruby/system/StoreCache.hh b/src/mem/ruby/system/StoreCache.hh
new file mode 100644
index 000000000..d92d39888
--- /dev/null
+++ b/src/mem/ruby/system/StoreCache.hh
@@ -0,0 +1,85 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ *
+ * Description:
+ *
+ */
+
+#ifndef StoreCache_H
+#define StoreCache_H
+
+#include "Global.hh"
+#include "Address.hh"
+
+
+class DataBlock;
+class SubBlock;
+class StoreCacheEntry;
+
+template <class KEY_TYPE, class VALUE_TYPE> class Map;
+
+class StoreCache {
+public:
+ // Constructors
+ StoreCache();
+
+ // Destructor
+ ~StoreCache();
+
+ // Public Methods
+ void add(const SubBlock& block);
+ void remove(const SubBlock& block);
+ bool check(const SubBlock& block) const;
+ void update(SubBlock& block) const;
+ bool isEmpty() const;
+ int size() const;
+ void print(ostream& out) const;
+
+private:
+ Map<Address, StoreCacheEntry>* m_internal_cache_ptr;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const StoreCache& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const StoreCache& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //StoreCache_H
diff --git a/src/mem/ruby/system/System.cc b/src/mem/ruby/system/System.cc
new file mode 100644
index 000000000..6352d8a58
--- /dev/null
+++ b/src/mem/ruby/system/System.cc
@@ -0,0 +1,269 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * System.C
+ *
+ * Description: See System.h
+ *
+ * $Id$
+ *
+ */
+
+
+#include "System.hh"
+#include "Profiler.hh"
+#include "Network.hh"
+#include "Tester.hh"
+#include "SyntheticDriver.hh"
+#include "DeterministicDriver.hh"
+#include "OpalInterface.hh"
+#include "Chip.hh"
+//#include "Tracer.hh"
+#include "Protocol.hh"
+//#include "XactIsolationChecker.hh" // gem5:Arka for decomissioning of log_tm
+//#include "XactCommitArbiter.hh"
+//#include "XactVisualizer.hh"
+#include "M5Driver.hh"
+
+System::System()
+{
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"initializing");
+
+ m_driver_ptr = NULL;
+ m_profiler_ptr = new Profiler;
+
+ // NETWORK INITIALIZATION
+ // create the network by calling a function that calls new
+ m_network_ptr = Network::createNetwork(RubyConfig::numberOfChips());
+
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"Constructed network");
+
+ // CHIP INITIALIZATION
+ m_chip_vector.setSize(RubyConfig::numberOfChips());// create the vector of pointers to processors
+ for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
+ // create the chip
+ m_chip_vector[i] = new Chip(i, m_network_ptr);
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"Constructed a chip");
+ }
+
+ // These must be after the chips are constructed
+
+#if 0
+ if (!g_SIMICS) {
+ if (g_SYNTHETIC_DRIVER && !g_DETERMINISTIC_DRIVER) {
+ m_driver_ptr = new SyntheticDriver(this);
+ } else if (!g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
+ m_driver_ptr = new DeterministicDriver(this);
+ } else if (g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
+ ERROR_MSG("SYNTHETIC and DETERMINISTIC DRIVERS are exclusive and cannot be both enabled");
+ } else {
+ // normally make tester object, otherwise make an opal interface object.
+ if (!OpalInterface::isOpalLoaded()) {
+ m_driver_ptr = new Tester(this);
+ } else {
+ m_driver_ptr = new OpalInterface(this);
+ }
+ }
+ } else {
+ // detect if opal is loaded or not
+ if (OpalInterface::isOpalLoaded()) {
+ m_driver_ptr = new OpalInterface(this);
+ } else {
+ assert(0);
+ /* Need to allocate a driver here */
+ // m_driver_ptr = new SimicsDriver(this);
+ }
+ }
+#endif
+
+ if (g_SYNTHETIC_DRIVER && !g_DETERMINISTIC_DRIVER) {
+ cerr << "Creating Synthetic Driver" << endl;
+ m_driver_ptr = new SyntheticDriver(this);
+ } else if (!g_SYNTHETIC_DRIVER && g_DETERMINISTIC_DRIVER) {
+ cerr << "Creating Deterministic Driver" << endl;
+ m_driver_ptr = new DeterministicDriver(this);
+ } else {
+ cerr << "Creating M5 Driver" << endl;
+ m_driver_ptr = new M5Driver(this);
+ }
+ /* gem5:Binkert for decomissiong of tracer
+ m_tracer_ptr = new Tracer;
+ */
+
+ /* gem5:Arka for decomissiong of log_tm
+ if (XACT_MEMORY) {
+ m_xact_isolation_checker = new XactIsolationChecker;
+ m_xact_commit_arbiter = new XactCommitArbiter;
+ m_xact_visualizer = new XactVisualizer;
+ }
+*/
+ DEBUG_MSG(SYSTEM_COMP, MedPrio,"finished initializing");
+ DEBUG_NEWLINE(SYSTEM_COMP, MedPrio);
+
+}
+
+System::~System()
+{
+ for (int i = 0; i < m_chip_vector.size(); i++) {
+ delete m_chip_vector[i];
+ }
+ delete m_driver_ptr;
+ delete m_network_ptr;
+ delete m_profiler_ptr;
+ /* gem5:Binkert for decomissiong of tracer
+ delete m_tracer_ptr;
+ */
+}
+
+void System::printConfig(ostream& out) const
+{
+ out << "\n================ Begin System Configuration Print ================\n\n";
+ RubyConfig::printConfiguration(out);
+ out << endl;
+ getChip(0)->printConfig(out);
+ m_network_ptr->printConfig(out);
+ m_driver_ptr->printConfig(out);
+ m_profiler_ptr->printConfig(out);
+ out << "\n================ End System Configuration Print ================\n\n";
+}
+
+void System::printStats(ostream& out)
+{
+ const time_t T = time(NULL);
+ tm *localTime = localtime(&T);
+ char buf[100];
+ strftime(buf, 100, "%b/%d/%Y %H:%M:%S", localTime);
+
+ out << "Real time: " << buf << endl;
+
+ m_profiler_ptr->printStats(out);
+ for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
+ for(int p=0; p<RubyConfig::numberOfProcsPerChip(); p++) {
+ m_chip_vector[i]->m_L1Cache_mandatoryQueue_vec[p]->printStats(out);
+ }
+ }
+ m_network_ptr->printStats(out);
+ m_driver_ptr->printStats(out);
+ Chip::printStats(out);
+}
+
+void System::clearStats() const
+{
+ m_profiler_ptr->clearStats();
+ m_network_ptr->clearStats();
+ m_driver_ptr->clearStats();
+ Chip::clearStats();
+ for(int i=0; i<RubyConfig::numberOfChips(); i++) { // for each chip
+ for(int p=0; p<RubyConfig::numberOfProcsPerChip(); p++) {
+ m_chip_vector[i]->m_L1Cache_mandatoryQueue_vec[p]->clearStats();
+ }
+ }
+}
+
+void System::recordCacheContents(CacheRecorder& tr) const
+{
+ for (int i = 0; i < m_chip_vector.size(); i++) {
+ for (int m_version = 0; m_version < RubyConfig::numberOfProcsPerChip(); m_version++) {
+ if (Protocol::m_TwoLevelCache) {
+ m_chip_vector[i]->m_L1Cache_L1IcacheMemory_vec[m_version]->setAsInstructionCache(true);
+ m_chip_vector[i]->m_L1Cache_L1DcacheMemory_vec[m_version]->setAsInstructionCache(false);
+ } else {
+ m_chip_vector[i]->m_L1Cache_cacheMemory_vec[m_version]->setAsInstructionCache(false);
+ }
+ }
+ m_chip_vector[i]->recordCacheContents(tr);
+ }
+}
+
+void System::opalLoadNotify()
+{
+ if (OpalInterface::isOpalLoaded()) {
+ // change the driver pointer to point to an opal driver
+ delete m_driver_ptr;
+ m_driver_ptr = new OpalInterface(this);
+ }
+}
+
+#ifdef CHECK_COHERENCE
+// This code will check for cases if the given cache block is exclusive in
+// one node and shared in another-- a coherence violation
+//
+// To use, the SLICC specification must call sequencer.checkCoherence(address)
+// when the controller changes to a state with new permissions. Do this
+// in setState. The SLICC spec must also define methods "isBlockShared"
+// and "isBlockExclusive" that are specific to that protocol
+//
+void System::checkGlobalCoherenceInvariant(const Address& addr ) {
+
+ NodeID exclusive = -1;
+ bool sharedDetected = false;
+ NodeID lastShared = -1;
+
+ for (int i = 0; i < m_chip_vector.size(); i++) {
+
+ if (m_chip_vector[i]->isBlockExclusive(addr)) {
+ if (exclusive != -1) {
+ // coherence violation
+ WARN_EXPR(exclusive);
+ WARN_EXPR(m_chip_vector[i]->getID());
+ WARN_EXPR(addr);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Coherence Violation Detected -- 2 exclusive chips");
+ }
+ else if (sharedDetected) {
+ WARN_EXPR(lastShared);
+ WARN_EXPR(m_chip_vector[i]->getID());
+ WARN_EXPR(addr);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
+ }
+ else {
+ exclusive = m_chip_vector[i]->getID();
+ }
+ }
+ else if (m_chip_vector[i]->isBlockShared(addr)) {
+ sharedDetected = true;
+ lastShared = m_chip_vector[i]->getID();
+
+ if (exclusive != -1) {
+ WARN_EXPR(lastShared);
+ WARN_EXPR(exclusive);
+ WARN_EXPR(addr);
+ WARN_EXPR(g_eventQueue_ptr->getTime());
+ ERROR_MSG("Coherence Violation Detected -- exclusive chip with >=1 shared");
+ }
+ }
+ }
+}
+#endif
+
+
+
+
diff --git a/src/mem/ruby/system/System.hh b/src/mem/ruby/system/System.hh
new file mode 100644
index 000000000..350f74468
--- /dev/null
+++ b/src/mem/ruby/system/System.hh
@@ -0,0 +1,137 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * System.h
+ *
+ * Description: Contains all of the various parts of the system we are
+ * simulating. Performs allocation, deallocation, and setup of all
+ * the major components of the system
+ *
+ * $Id$
+ *
+ */
+
+#ifndef SYSTEM_H
+#define SYSTEM_H
+
+#include "Global.hh"
+#include "Vector.hh"
+#include "Address.hh"
+#include "RubyConfig.hh"
+#include "MachineType.hh"
+#include "AbstractChip.hh"
+
+class Profiler;
+class Network;
+class Driver;
+class CacheRecorder;
+class Tracer;
+class Sequencer;
+class XactIsolationChecker;
+class XactCommitArbiter;
+class XactVisualizer;
+class TransactionInterfaceManager;
+
+class System {
+public:
+ // Constructors
+ System();
+
+ // Destructor
+ ~System();
+
+ // Public Methods
+ int getNumProcessors() { return RubyConfig::numberOfProcessors(); }
+ int getNumMemories() { return RubyConfig::numberOfMemories(); }
+ Profiler* getProfiler() { return m_profiler_ptr; }
+ Driver* getDriver() { assert(m_driver_ptr != NULL); return m_driver_ptr; }
+ Tracer* getTracer() { assert(m_tracer_ptr != NULL); return m_tracer_ptr; }
+ Network* getNetwork() { assert(m_network_ptr != NULL); return m_network_ptr; }
+ XactIsolationChecker* getXactIsolationChecker() { assert(m_xact_isolation_checker!= NULL); return m_xact_isolation_checker;}
+ XactCommitArbiter* getXactCommitArbiter() { assert(m_xact_commit_arbiter!= NULL); return m_xact_commit_arbiter;}
+ XactVisualizer* getXactVisualizer() { assert(m_xact_visualizer!= NULL); return m_xact_visualizer;}
+
+ AbstractChip* getChip(int chipNumber) const { assert(m_chip_vector[chipNumber] != NULL); return m_chip_vector[chipNumber];}
+ Sequencer* getSequencer(int procNumber) const {
+ assert(procNumber < RubyConfig::numberOfProcessors());
+ return m_chip_vector[procNumber/RubyConfig::numberOfProcsPerChip()]->getSequencer(procNumber%RubyConfig::numberOfProcsPerChip());
+ }
+ TransactionInterfaceManager* getTransactionInterfaceManager(int procNumber) const {
+ return m_chip_vector[procNumber/RubyConfig::numberOfProcsPerChip()]->getTransactionInterfaceManager(procNumber%RubyConfig::numberOfProcsPerChip());
+ }
+ void recordCacheContents(CacheRecorder& tr) const;
+ void printConfig(ostream& out) const;
+ void printStats(ostream& out);
+ void clearStats() const;
+
+ // called to notify the system when opal is loaded
+ void opalLoadNotify();
+
+ void print(ostream& out) const;
+#ifdef CHECK_COHERENCE
+ void checkGlobalCoherenceInvariant(const Address& addr);
+#endif
+
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ System(const System& obj);
+ System& operator=(const System& obj);
+
+ // Data Members (m_ prefix)
+ Network* m_network_ptr;
+ Vector<AbstractChip*> m_chip_vector;
+ Profiler* m_profiler_ptr;
+ Driver* m_driver_ptr;
+ Tracer* m_tracer_ptr;
+ XactIsolationChecker *m_xact_isolation_checker;
+ XactCommitArbiter *m_xact_commit_arbiter;
+ XactVisualizer *m_xact_visualizer;
+};
+
+// Output operator declaration
+ostream& operator<<(ostream& out, const System& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+inline
+ostream& operator<<(ostream& out, const System& obj)
+{
+// obj.print(out);
+ out << flush;
+ return out;
+}
+
+#endif //SYSTEM_H
+
+
+
diff --git a/src/mem/ruby/system/TBETable.hh b/src/mem/ruby/system/TBETable.hh
new file mode 100644
index 000000000..ad1674dca
--- /dev/null
+++ b/src/mem/ruby/system/TBETable.hh
@@ -0,0 +1,165 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TBETable.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef TBETABLE_H
+#define TBETABLE_H
+
+#include "Global.hh"
+#include "Map.hh"
+#include "Address.hh"
+#include "Profiler.hh"
+#include "AbstractChip.hh"
+#include "System.hh"
+
+template<class ENTRY>
+class TBETable {
+public:
+
+ // Constructors
+ TBETable(AbstractChip* chip_ptr);
+
+ // Destructor
+ //~TBETable();
+
+ // Public Methods
+
+ static void printConfig(ostream& out) { out << "TBEs_per_TBETable: " << NUMBER_OF_TBES << endl; }
+
+ bool isPresent(const Address& address) const;
+ void allocate(const Address& address);
+ void deallocate(const Address& address);
+ bool areNSlotsAvailable(int n) const { return (NUMBER_OF_TBES - m_map.size()) >= n; }
+
+ ENTRY& lookup(const Address& address);
+ const ENTRY& lookup(const Address& address) const;
+
+ // Print cache contents
+ void print(ostream& out) const;
+private:
+ // Private Methods
+
+ // Private copy constructor and assignment operator
+ TBETable(const TBETable& obj);
+ TBETable& operator=(const TBETable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, ENTRY> m_map;
+ AbstractChip* m_chip_ptr;
+};
+
+// Output operator declaration
+//ostream& operator<<(ostream& out, const TBETable<ENTRY>& obj);
+
+// ******************* Definitions *******************
+
+// Output operator definition
+template<class ENTRY>
+extern inline
+ostream& operator<<(ostream& out, const TBETable<ENTRY>& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+
+
+// ****************************************************************
+
+template<class ENTRY>
+extern inline
+TBETable<ENTRY>::TBETable(AbstractChip* chip_ptr)
+{
+ m_chip_ptr = chip_ptr;
+}
+
+// PUBLIC METHODS
+
+// tests to see if an address is present in the cache
+template<class ENTRY>
+extern inline
+bool TBETable<ENTRY>::isPresent(const Address& address) const
+{
+ assert(address == line_address(address));
+ assert(m_map.size() <= NUMBER_OF_TBES);
+ return m_map.exist(address);
+}
+
+template<class ENTRY>
+extern inline
+void TBETable<ENTRY>::allocate(const Address& address)
+{
+ assert(isPresent(address) == false);
+ assert(m_map.size() < NUMBER_OF_TBES);
+ g_system_ptr->getProfiler()->L2tbeUsageSample(m_map.size());
+ m_map.add(address, ENTRY());
+}
+
+template<class ENTRY>
+extern inline
+void TBETable<ENTRY>::deallocate(const Address& address)
+{
+ assert(isPresent(address) == true);
+ assert(m_map.size() > 0);
+ m_map.erase(address);
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+ENTRY& TBETable<ENTRY>::lookup(const Address& address)
+{
+ assert(isPresent(address) == true);
+ return m_map.lookup(address);
+}
+
+// looks an address up in the cache
+template<class ENTRY>
+extern inline
+const ENTRY& TBETable<ENTRY>::lookup(const Address& address) const
+{
+ assert(isPresent(address) == true);
+ return m_map.lookup(address);
+}
+
+template<class ENTRY>
+extern inline
+void TBETable<ENTRY>::print(ostream& out) const
+{
+}
+
+#endif //TBETABLE_H
diff --git a/src/mem/ruby/system/TimerTable.cc b/src/mem/ruby/system/TimerTable.cc
new file mode 100644
index 000000000..a8453d4bb
--- /dev/null
+++ b/src/mem/ruby/system/TimerTable.cc
@@ -0,0 +1,129 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * $Id$
+ */
+
+#include "Global.hh"
+#include "TimerTable.hh"
+#include "EventQueue.hh"
+
+TimerTable::TimerTable(Chip* chip_ptr)
+{
+ assert(chip_ptr != NULL);
+ m_consumer_ptr = NULL;
+ m_chip_ptr = chip_ptr;
+ m_next_valid = false;
+ m_next_address = Address(0);
+ m_next_time = 0;
+}
+
+
+bool TimerTable::isReady() const
+{
+ if (m_map.size() == 0) {
+ return false;
+ }
+
+ if (!m_next_valid) {
+ updateNext();
+ }
+ assert(m_next_valid);
+ return (g_eventQueue_ptr->getTime() >= m_next_time);
+}
+
+const Address& TimerTable::readyAddress() const
+{
+ assert(isReady());
+
+ if (!m_next_valid) {
+ updateNext();
+ }
+ assert(m_next_valid);
+ return m_next_address;
+}
+
+void TimerTable::set(const Address& address, Time relative_latency)
+{
+ assert(address == line_address(address));
+ assert(relative_latency > 0);
+ assert(m_map.exist(address) == false);
+ Time ready_time = g_eventQueue_ptr->getTime() + relative_latency;
+ m_map.add(address, ready_time);
+ assert(m_consumer_ptr != NULL);
+ g_eventQueue_ptr->scheduleEventAbsolute(m_consumer_ptr, ready_time);
+ m_next_valid = false;
+
+ // Don't always recalculate the next ready address
+ if (ready_time <= m_next_time) {
+ m_next_valid = false;
+ }
+}
+
+void TimerTable::unset(const Address& address)
+{
+ assert(address == line_address(address));
+ assert(m_map.exist(address) == true);
+ m_map.remove(address);
+
+ // Don't always recalculate the next ready address
+ if (address == m_next_address) {
+ m_next_valid = false;
+ }
+}
+
+void TimerTable::print(ostream& out) const
+{
+}
+
+
+void TimerTable::updateNext() const
+{
+ if (m_map.size() == 0) {
+ assert(m_next_valid == false);
+ return;
+ }
+
+ Vector<Address> addresses = m_map.keys();
+ m_next_address = addresses[0];
+ m_next_time = m_map.lookup(m_next_address);
+
+ // Search for the minimum time
+ int size = addresses.size();
+ for (int i=1; i<size; i++) {
+ Address maybe_next_address = addresses[i];
+ Time maybe_next_time = m_map.lookup(maybe_next_address);
+ if (maybe_next_time < m_next_time) {
+ m_next_time = maybe_next_time;
+ m_next_address= maybe_next_address;
+ }
+ }
+ m_next_valid = true;
+}
diff --git a/src/mem/ruby/system/TimerTable.hh b/src/mem/ruby/system/TimerTable.hh
new file mode 100644
index 000000000..c7f77efb1
--- /dev/null
+++ b/src/mem/ruby/system/TimerTable.hh
@@ -0,0 +1,98 @@
+
+/*
+ * Copyright (c) 1999-2008 Mark D. Hill and David A. Wood
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/*
+ * TimerTable.h
+ *
+ * Description:
+ *
+ * $Id$
+ *
+ */
+
+#ifndef TIMERTABLE_H
+#define TIMERTABLE_H
+
+#include "Global.hh"
+#include "Map.hh"
+#include "Address.hh"
+class Consumer;
+class Chip;
+
+class TimerTable {
+public:
+
+ // Constructors
+ TimerTable(Chip* chip_ptr);
+
+ // Destructor
+ //~TimerTable();
+
+ // Class Methods
+ static void printConfig(ostream& out) {}
+
+ // Public Methods
+ void setConsumer(Consumer* consumer_ptr) { ASSERT(m_consumer_ptr==NULL); m_consumer_ptr = consumer_ptr; }
+ void setDescription(const string& name) { m_name = name; }
+
+ bool isReady() const;
+ const Address& readyAddress() const;
+ bool isSet(const Address& address) const { return m_map.exist(address); }
+ void set(const Address& address, Time relative_latency);
+ void unset(const Address& address);
+ void print(ostream& out) const;
+private:
+ // Private Methods
+ void updateNext() const;
+
+ // Private copy constructor and assignment operator
+ TimerTable(const TimerTable& obj);
+ TimerTable& operator=(const TimerTable& obj);
+
+ // Data Members (m_prefix)
+ Map<Address, Time> m_map;
+ Chip* m_chip_ptr;
+ mutable bool m_next_valid;
+ mutable Time m_next_time; // Only valid if m_next_valid is true
+ mutable Address m_next_address; // Only valid if m_next_valid is true
+ Consumer* m_consumer_ptr; // Consumer to signal a wakeup()
+ string m_name;
+};
+
+// ******************* Definitions *******************
+
+// Output operator definition
+extern inline
+ostream& operator<<(ostream& out, const TimerTable& obj)
+{
+ obj.print(out);
+ out << flush;
+ return out;
+}
+#endif //TIMERTABLE_H