diff options
author | Nilay Vaish <nilay@cs.wisc.edu> | 2014-09-01 16:55:40 -0500 |
---|---|---|
committer | Nilay Vaish <nilay@cs.wisc.edu> | 2014-09-01 16:55:40 -0500 |
commit | 82d136285dac52a97384961a814d5a0dda4a6482 (patch) | |
tree | 7e5a7cb87120591f8d87e73cfad4f9d5a300ee67 /src/mem/ruby/structures/SparseMemory.cc | |
parent | 01f792a3675983411ff77b54cbee7ffee2a3d5d5 (diff) | |
download | gem5-82d136285dac52a97384961a814d5a0dda4a6482.tar.xz |
ruby: move files from ruby/system to ruby/structures
The directory ruby/system is crowded and unorganized. Hence, the files the
hold actual physical structures, are being moved to the directory
ruby/structures. This includes Cache Memory, Directory Memory,
Memory Controller, Wire Buffer, TBE Table, Perfect Cache Memory, Timer Table,
Bank Array.
The directory ruby/systems has the glue code that holds these structures
together.
--HG--
rename : src/mem/ruby/system/MachineID.hh => src/mem/ruby/common/MachineID.hh
rename : src/mem/ruby/buffers/MessageBuffer.cc => src/mem/ruby/network/MessageBuffer.cc
rename : src/mem/ruby/buffers/MessageBuffer.hh => src/mem/ruby/network/MessageBuffer.hh
rename : src/mem/ruby/buffers/MessageBufferNode.cc => src/mem/ruby/network/MessageBufferNode.cc
rename : src/mem/ruby/buffers/MessageBufferNode.hh => src/mem/ruby/network/MessageBufferNode.hh
rename : src/mem/ruby/system/AbstractReplacementPolicy.hh => src/mem/ruby/structures/AbstractReplacementPolicy.hh
rename : src/mem/ruby/system/BankedArray.cc => src/mem/ruby/structures/BankedArray.cc
rename : src/mem/ruby/system/BankedArray.hh => src/mem/ruby/structures/BankedArray.hh
rename : src/mem/ruby/system/Cache.py => src/mem/ruby/structures/Cache.py
rename : src/mem/ruby/system/CacheMemory.cc => src/mem/ruby/structures/CacheMemory.cc
rename : src/mem/ruby/system/CacheMemory.hh => src/mem/ruby/structures/CacheMemory.hh
rename : src/mem/ruby/system/DirectoryMemory.cc => src/mem/ruby/structures/DirectoryMemory.cc
rename : src/mem/ruby/system/DirectoryMemory.hh => src/mem/ruby/structures/DirectoryMemory.hh
rename : src/mem/ruby/system/DirectoryMemory.py => src/mem/ruby/structures/DirectoryMemory.py
rename : src/mem/ruby/system/LRUPolicy.hh => src/mem/ruby/structures/LRUPolicy.hh
rename : src/mem/ruby/system/MemoryControl.cc => src/mem/ruby/structures/MemoryControl.cc
rename : src/mem/ruby/system/MemoryControl.hh => src/mem/ruby/structures/MemoryControl.hh
rename : src/mem/ruby/system/MemoryControl.py => src/mem/ruby/structures/MemoryControl.py
rename : src/mem/ruby/system/MemoryNode.cc => src/mem/ruby/structures/MemoryNode.cc
rename : src/mem/ruby/system/MemoryNode.hh => src/mem/ruby/structures/MemoryNode.hh
rename : src/mem/ruby/system/MemoryVector.hh => src/mem/ruby/structures/MemoryVector.hh
rename : src/mem/ruby/system/PerfectCacheMemory.hh => src/mem/ruby/structures/PerfectCacheMemory.hh
rename : src/mem/ruby/system/PersistentTable.cc => src/mem/ruby/structures/PersistentTable.cc
rename : src/mem/ruby/system/PersistentTable.hh => src/mem/ruby/structures/PersistentTable.hh
rename : src/mem/ruby/system/PseudoLRUPolicy.hh => src/mem/ruby/structures/PseudoLRUPolicy.hh
rename : src/mem/ruby/system/RubyMemoryControl.cc => src/mem/ruby/structures/RubyMemoryControl.cc
rename : src/mem/ruby/system/RubyMemoryControl.hh => src/mem/ruby/structures/RubyMemoryControl.hh
rename : src/mem/ruby/system/RubyMemoryControl.py => src/mem/ruby/structures/RubyMemoryControl.py
rename : src/mem/ruby/system/SparseMemory.cc => src/mem/ruby/structures/SparseMemory.cc
rename : src/mem/ruby/system/SparseMemory.hh => src/mem/ruby/structures/SparseMemory.hh
rename : src/mem/ruby/system/TBETable.hh => src/mem/ruby/structures/TBETable.hh
rename : src/mem/ruby/system/TimerTable.cc => src/mem/ruby/structures/TimerTable.cc
rename : src/mem/ruby/system/TimerTable.hh => src/mem/ruby/structures/TimerTable.hh
rename : src/mem/ruby/system/WireBuffer.cc => src/mem/ruby/structures/WireBuffer.cc
rename : src/mem/ruby/system/WireBuffer.hh => src/mem/ruby/structures/WireBuffer.hh
rename : src/mem/ruby/system/WireBuffer.py => src/mem/ruby/structures/WireBuffer.py
rename : src/mem/ruby/recorder/CacheRecorder.cc => src/mem/ruby/system/CacheRecorder.cc
rename : src/mem/ruby/recorder/CacheRecorder.hh => src/mem/ruby/system/CacheRecorder.hh
Diffstat (limited to 'src/mem/ruby/structures/SparseMemory.cc')
-rw-r--r-- | src/mem/ruby/structures/SparseMemory.cc | 417 |
1 files changed, 417 insertions, 0 deletions
diff --git a/src/mem/ruby/structures/SparseMemory.cc b/src/mem/ruby/structures/SparseMemory.cc new file mode 100644 index 000000000..a63790502 --- /dev/null +++ b/src/mem/ruby/structures/SparseMemory.cc @@ -0,0 +1,417 @@ +/* + * Copyright (c) 2009 Advanced Micro Devices, Inc. + * Copyright (c) 2012 Mark D. Hill and David A. Wood + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer; + * redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution; + * neither the name of the copyright holders nor the names of its + * contributors may be used to endorse or promote products derived from + * this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include <queue> + +#include "debug/RubyCache.hh" +#include "mem/ruby/structures/SparseMemory.hh" +#include "mem/ruby/system/System.hh" + +using namespace std; + +SparseMemory::SparseMemory(int number_of_levels) +{ + int even_level_bits; + int extra; + m_total_number_of_bits = RubySystem::getMemorySizeBits() + - RubySystem::getBlockSizeBits();; + + m_number_of_levels = number_of_levels; + + // + // Create the array that describes the bits per level + // + m_number_of_bits_per_level = new int[m_number_of_levels]; + even_level_bits = m_total_number_of_bits / m_number_of_levels; + extra = m_total_number_of_bits % m_number_of_levels; + for (int level = 0; level < m_number_of_levels; level++) { + if (level < extra) + m_number_of_bits_per_level[level] = even_level_bits + 1; + else + m_number_of_bits_per_level[level] = even_level_bits; + } + m_map_head = new SparseMapType; +} + +SparseMemory::~SparseMemory() +{ + recursivelyRemoveTables(m_map_head, 0); + delete m_map_head; + delete [] m_number_of_bits_per_level; +} + +// Recursively search table hierarchy for the lowest level table. +// Delete the lowest table first, the tables above +void +SparseMemory::recursivelyRemoveTables(SparseMapType* curTable, int curLevel) +{ + SparseMapType::iterator iter; + + for (iter = curTable->begin(); iter != curTable->end(); iter++) { + SparseMemEntry entry = (*iter).second; + + if (curLevel != (m_number_of_levels - 1)) { + // If the not at the last level, analyze those lower level + // tables first, then delete those next tables + SparseMapType* nextTable = (SparseMapType*)(entry); + recursivelyRemoveTables(nextTable, (curLevel + 1)); + delete nextTable; + } else { + // If at the last level, delete the directory entry + delete (AbstractEntry*)(entry); + } + entry = NULL; + } + + // Once all entries have been deleted, erase the entries + curTable->erase(curTable->begin(), curTable->end()); +} + +// tests to see if an address is present in the memory +bool +SparseMemory::exist(const Address& address) const +{ + SparseMapType* curTable = m_map_head; + Address curAddress; + + // Initiallize the high bit to be the total number of bits plus + // the block offset. However the highest bit index is one less + // than this value. + int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); + int lowBit; + assert(address == line_address(address)); + DPRINTF(RubyCache, "address: %s\n", address); + + for (int level = 0; level < m_number_of_levels; level++) { + // Create the appropriate sub address for this level + // Note: that set Address is inclusive of the specified range, + // thus the high bit is one less than the total number of bits + // used to create the address. + lowBit = highBit - m_number_of_bits_per_level[level]; + curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); + + DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, " + "curAddress: %s\n", + level, lowBit, highBit - 1, curAddress); + + // Adjust the highBit value for the next level + highBit -= m_number_of_bits_per_level[level]; + + // If the address is found, move on to the next level. + // Otherwise, return not found + if (curTable->count(curAddress) != 0) { + curTable = (SparseMapType*)((*curTable)[curAddress]); + } else { + DPRINTF(RubyCache, "Not found\n"); + return false; + } + } + + DPRINTF(RubyCache, "Entry found\n"); + return true; +} + +// add an address to memory +void +SparseMemory::add(const Address& address, AbstractEntry* entry) +{ + assert(address == line_address(address)); + assert(!exist(address)); + + m_total_adds++; + + Address curAddress; + SparseMapType* curTable = m_map_head; + + // Initiallize the high bit to be the total number of bits plus + // the block offset. However the highest bit index is one less + // than this value. + int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); + int lowBit; + void* newEntry = NULL; + + for (int level = 0; level < m_number_of_levels; level++) { + // create the appropriate address for this level + // Note: that set Address is inclusive of the specified range, + // thus the high bit is one less than the total number of bits + // used to create the address. + lowBit = highBit - m_number_of_bits_per_level[level]; + curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); + + // Adjust the highBit value for the next level + highBit -= m_number_of_bits_per_level[level]; + + // if the address exists in the cur table, move on. Otherwise + // create a new table. + if (curTable->count(curAddress) != 0) { + curTable = (SparseMapType*)((*curTable)[curAddress]); + } else { + m_adds_per_level[level]++; + + // if the last level, add a directory entry. Otherwise add a map. + if (level == (m_number_of_levels - 1)) { + entry->getDataBlk().clear(); + newEntry = (void*)entry; + } else { + SparseMapType* tempMap = new SparseMapType; + newEntry = (void*)(tempMap); + } + + // Create the pointer container SparseMemEntry and add it + // to the table. + (*curTable)[curAddress] = newEntry; + + // Move to the next level of the heirarchy + curTable = (SparseMapType*)newEntry; + } + } + + assert(exist(address)); + return; +} + +// recursively search table hierarchy for the lowest level table. +// remove the lowest entry and any empty tables above it. +int +SparseMemory::recursivelyRemoveLevels(const Address& address, + CurNextInfo& curInfo) +{ + Address curAddress; + CurNextInfo nextInfo; + SparseMemEntry entry; + + // create the appropriate address for this level + // Note: that set Address is inclusive of the specified range, + // thus the high bit is one less than the total number of bits + // used to create the address. + curAddress.setAddress(address.bitSelect(curInfo.lowBit, + curInfo.highBit - 1)); + + DPRINTF(RubyCache, "address: %s, curInfo.level: %d, curInfo.lowBit: %d, " + "curInfo.highBit - 1: %d, curAddress: %s\n", + address, curInfo.level, curInfo.lowBit, + curInfo.highBit - 1, curAddress); + + assert(curInfo.curTable->count(curAddress) != 0); + + entry = (*(curInfo.curTable))[curAddress]; + + if (curInfo.level < (m_number_of_levels - 1)) { + // set up next level's info + nextInfo.curTable = (SparseMapType*)(entry); + nextInfo.level = curInfo.level + 1; + + nextInfo.highBit = curInfo.highBit - + m_number_of_bits_per_level[curInfo.level]; + + nextInfo.lowBit = curInfo.lowBit - + m_number_of_bits_per_level[curInfo.level + 1]; + + // recursively search the table hierarchy + int tableSize = recursivelyRemoveLevels(address, nextInfo); + + // If this table below is now empty, we must delete it and + // erase it from our table. + if (tableSize == 0) { + m_removes_per_level[curInfo.level]++; + delete nextInfo.curTable; + entry = NULL; + curInfo.curTable->erase(curAddress); + } + } else { + // if this is the last level, we have reached the Directory + // Entry and thus we should delete it including the + // SparseMemEntry container struct. + delete (AbstractEntry*)(entry); + entry = NULL; + curInfo.curTable->erase(curAddress); + m_removes_per_level[curInfo.level]++; + } + return curInfo.curTable->size(); +} + +// remove an entry from the table +void +SparseMemory::remove(const Address& address) +{ + assert(address == line_address(address)); + assert(exist(address)); + + m_total_removes++; + + CurNextInfo nextInfo; + + // Initialize table pointer and level value + nextInfo.curTable = m_map_head; + nextInfo.level = 0; + + // Initiallize the high bit to be the total number of bits plus + // the block offset. However the highest bit index is one less + // than this value. + nextInfo.highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); + nextInfo.lowBit = nextInfo.highBit - m_number_of_bits_per_level[0];; + + // recursively search the table hierarchy for empty tables + // starting from the level 0. Note we do not check the return + // value because the head table is never deleted; + recursivelyRemoveLevels(address, nextInfo); + + assert(!exist(address)); + return; +} + +// looks an address up in memory +AbstractEntry* +SparseMemory::lookup(const Address& address) +{ + assert(address == line_address(address)); + + Address curAddress; + SparseMapType* curTable = m_map_head; + AbstractEntry* entry = NULL; + + // Initiallize the high bit to be the total number of bits plus + // the block offset. However the highest bit index is one less + // than this value. + int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); + int lowBit; + + for (int level = 0; level < m_number_of_levels; level++) { + // create the appropriate address for this level + // Note: that set Address is inclusive of the specified range, + // thus the high bit is one less than the total number of bits + // used to create the address. + lowBit = highBit - m_number_of_bits_per_level[level]; + curAddress.setAddress(address.bitSelect(lowBit, highBit - 1)); + + DPRINTF(RubyCache, "level: %d, lowBit: %d, highBit - 1: %d, " + "curAddress: %s\n", + level, lowBit, highBit - 1, curAddress); + + // Adjust the highBit value for the next level + highBit -= m_number_of_bits_per_level[level]; + + // If the address is found, move on to the next level. + // Otherwise, return not found + if (curTable->count(curAddress) != 0) { + curTable = (SparseMapType*)((*curTable)[curAddress]); + } else { + DPRINTF(RubyCache, "Not found\n"); + return NULL; + } + } + + // The last entry actually points to the Directory entry not a table + entry = (AbstractEntry*)curTable; + + return entry; +} + +void +SparseMemory::recordBlocks(int cntrl_id, CacheRecorder* tr) const +{ + queue<SparseMapType*> unexplored_nodes[2]; + queue<physical_address_t> address_of_nodes[2]; + + unexplored_nodes[0].push(m_map_head); + address_of_nodes[0].push(0); + + int parity_of_level = 0; + physical_address_t address, temp_address; + Address curAddress; + + // Initiallize the high bit to be the total number of bits plus + // the block offset. However the highest bit index is one less + // than this value. + int highBit = m_total_number_of_bits + RubySystem::getBlockSizeBits(); + int lowBit; + + for (int cur_level = 0; cur_level < m_number_of_levels; cur_level++) { + + // create the appropriate address for this level + // Note: that set Address is inclusive of the specified range, + // thus the high bit is one less than the total number of bits + // used to create the address. + lowBit = highBit - m_number_of_bits_per_level[cur_level]; + + while (!unexplored_nodes[parity_of_level].empty()) { + + SparseMapType* node = unexplored_nodes[parity_of_level].front(); + unexplored_nodes[parity_of_level].pop(); + + address = address_of_nodes[parity_of_level].front(); + address_of_nodes[parity_of_level].pop(); + + SparseMapType::iterator iter; + + for (iter = node->begin(); iter != node->end(); iter++) { + SparseMemEntry entry = (*iter).second; + curAddress = (*iter).first; + + if (cur_level != (m_number_of_levels - 1)) { + // If not at the last level, put this node in the queue + unexplored_nodes[1 - parity_of_level].push( + (SparseMapType*)(entry)); + address_of_nodes[1 - parity_of_level].push(address | + (curAddress.getAddress() << lowBit)); + } else { + // If at the last level, add a trace record + temp_address = address | (curAddress.getAddress() + << lowBit); + DataBlock block = ((AbstractEntry*)entry)->getDataBlk(); + tr->addRecord(cntrl_id, temp_address, 0, RubyRequestType_ST, 0, + block); + } + } + } + + // Adjust the highBit value for the next level + highBit -= m_number_of_bits_per_level[cur_level]; + parity_of_level = 1 - parity_of_level; + } +} + +void +SparseMemory::regStats(const string &name) +{ + m_total_adds.name(name + ".total_adds"); + + m_adds_per_level + .init(m_number_of_levels) + .name(name + ".adds_per_level") + .flags(Stats::pdf | Stats::total) + ; + + m_total_removes.name(name + ".total_removes"); + m_removes_per_level + .init(m_number_of_levels) + .name(name + ".removes_per_level") + .flags(Stats::pdf | Stats::total) + ; +} |