summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/AbstractMemory.py61
-rw-r--r--src/mem/SConscript5
-rw-r--r--src/mem/SimpleMemory.py (renamed from src/mem/PhysicalMemory.py)30
-rw-r--r--src/mem/abstract_mem.cc537
-rw-r--r--src/mem/abstract_mem.hh250
-rw-r--r--src/mem/cache/mshr.cc2
-rw-r--r--src/mem/physical.cc642
-rw-r--r--src/mem/physical.hh238
-rw-r--r--src/mem/ruby/system/RubyPort.cc6
-rw-r--r--src/mem/simple_mem.cc167
-rw-r--r--src/mem/simple_mem.hh114
11 files changed, 1283 insertions, 769 deletions
diff --git a/src/mem/AbstractMemory.py b/src/mem/AbstractMemory.py
new file mode 100644
index 000000000..bf1621f4d
--- /dev/null
+++ b/src/mem/AbstractMemory.py
@@ -0,0 +1,61 @@
+# Copyright (c) 2012 ARM Limited
+# All rights reserved.
+#
+# The license below extends only to copyright in the software and shall
+# not be construed as granting a license to any other intellectual
+# property including but not limited to intellectual property relating
+# to a hardware implementation of the functionality of the software
+# licensed hereunder. You may use the software subject to the license
+# terms below provided that you ensure that this notice is replicated
+# unmodified and in its entirety in all distributions of the software,
+# modified or unmodified, in source code or in binary form.
+#
+# Copyright (c) 2005-2008 The Regents of The University of Michigan
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met: redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer;
+# redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution;
+# neither the name of the copyright holders nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# Authors: Nathan Binkert
+# Andreas Hansson
+
+from m5.params import *
+from MemObject import MemObject
+
+class AbstractMemory(MemObject):
+ type = 'AbstractMemory'
+ abstract = True
+ range = Param.AddrRange(AddrRange('128MB'), "Address range")
+ file = Param.String('', "Memory-mapped file")
+ null = Param.Bool(False, "Do not store data, always return zero")
+ zero = Param.Bool(False, "Initialize memory with zeros")
+
+ # All memories are passed to the global physical memory, and
+ # certain memories may be excluded from the global address map,
+ # e.g. by the testers that use shadow memories as a reference
+ in_addr_map = Param.Bool(True, "Memory part of the global address map")
+
+ # Should the bootloader include this memory when passing
+ # configuration information about the physical memory layout to
+ # the kernel, e.g. using ATAG or ACPI
+ conf_table_reported = Param.Bool(False, "Report to configuration table")
diff --git a/src/mem/SConscript b/src/mem/SConscript
index d30fd7fcb..efb3c947a 100644
--- a/src/mem/SConscript
+++ b/src/mem/SConscript
@@ -47,7 +47,10 @@ Source('fs_translating_port_proxy.cc')
Source('se_translating_port_proxy.cc')
if env['TARGET_ISA'] != 'no':
- SimObject('PhysicalMemory.py')
+ SimObject('AbstractMemory.py')
+ SimObject('SimpleMemory.py')
+ Source('abstract_mem.cc')
+ Source('simple_mem.cc')
Source('page_table.cc')
Source('physical.cc')
diff --git a/src/mem/PhysicalMemory.py b/src/mem/SimpleMemory.py
index 756117972..51de3374d 100644
--- a/src/mem/PhysicalMemory.py
+++ b/src/mem/SimpleMemory.py
@@ -1,3 +1,15 @@
+# Copyright (c) 2012 ARM Limited
+# All rights reserved.
+#
+# The license below extends only to copyright in the software and shall
+# not be construed as granting a license to any other intellectual
+# property including but not limited to intellectual property relating
+# to a hardware implementation of the functionality of the software
+# licensed hereunder. You may use the software subject to the license
+# terms below provided that you ensure that this notice is replicated
+# unmodified and in its entirety in all distributions of the software,
+# modified or unmodified, in source code or in binary form.
+#
# Copyright (c) 2005-2008 The Regents of The University of Michigan
# All rights reserved.
#
@@ -25,17 +37,13 @@
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
+# Andreas Hansson
from m5.params import *
-from m5.proxy import *
-from MemObject import *
+from AbstractMemory import *
-class PhysicalMemory(MemObject):
- type = 'PhysicalMemory'
- port = VectorSlavePort("the access port")
- range = Param.AddrRange(AddrRange('128MB'), "Device Address")
- file = Param.String('', "memory mapped file")
- latency = Param.Latency('30ns', "latency of an access")
- latency_var = Param.Latency('0ns', "access variablity")
- zero = Param.Bool(False, "zero initialize memory")
- null = Param.Bool(False, "do not store data, always return zero")
+class SimpleMemory(AbstractMemory):
+ type = 'SimpleMemory'
+ port = VectorSlavePort("Slave ports")
+ latency = Param.Latency('30ns', "Request to response latency")
+ latency_var = Param.Latency('0ns', "Request to response latency variance")
diff --git a/src/mem/abstract_mem.cc b/src/mem/abstract_mem.cc
new file mode 100644
index 000000000..13a1183a2
--- /dev/null
+++ b/src/mem/abstract_mem.cc
@@ -0,0 +1,537 @@
+/*
+ * Copyright (c) 2010-2012 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ * Ali Saidi
+ * Andreas Hansson
+ */
+
+#include <sys/mman.h>
+#include <sys/types.h>
+#include <sys/user.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <zlib.h>
+
+#include <cerrno>
+#include <cstdio>
+#include <iostream>
+#include <string>
+
+#include "arch/registers.hh"
+#include "config/the_isa.hh"
+#include "debug/LLSC.hh"
+#include "debug/MemoryAccess.hh"
+#include "mem/abstract_mem.hh"
+#include "mem/packet_access.hh"
+
+using namespace std;
+
+AbstractMemory::AbstractMemory(const Params *p) :
+ MemObject(p), range(params()->range), pmemAddr(NULL),
+ confTableReported(p->conf_table_reported), inAddrMap(p->in_addr_map)
+{
+ if (size() % TheISA::PageBytes != 0)
+ panic("Memory Size not divisible by page size\n");
+
+ if (params()->null)
+ return;
+
+ if (params()->file == "") {
+ int map_flags = MAP_ANON | MAP_PRIVATE;
+ pmemAddr = (uint8_t *)mmap(NULL, size(),
+ PROT_READ | PROT_WRITE, map_flags, -1, 0);
+ } else {
+ int map_flags = MAP_PRIVATE;
+ int fd = open(params()->file.c_str(), O_RDONLY);
+ long _size = lseek(fd, 0, SEEK_END);
+ if (_size != range.size()) {
+ warn("Specified size %d does not match file %s %d\n", range.size(),
+ params()->file, _size);
+ range = RangeSize(range.start, _size);
+ }
+ lseek(fd, 0, SEEK_SET);
+ pmemAddr = (uint8_t *)mmap(NULL, roundUp(_size, sysconf(_SC_PAGESIZE)),
+ PROT_READ | PROT_WRITE, map_flags, fd, 0);
+ }
+
+ if (pmemAddr == (void *)MAP_FAILED) {
+ perror("mmap");
+ if (params()->file == "")
+ fatal("Could not mmap!\n");
+ else
+ fatal("Could not find file: %s\n", params()->file);
+ }
+
+ //If requested, initialize all the memory to 0
+ if (p->zero)
+ memset(pmemAddr, 0, size());
+}
+
+
+AbstractMemory::~AbstractMemory()
+{
+ if (pmemAddr)
+ munmap((char*)pmemAddr, size());
+}
+
+void
+AbstractMemory::regStats()
+{
+ using namespace Stats;
+
+ bytesRead
+ .name(name() + ".bytes_read")
+ .desc("Number of bytes read from this memory")
+ ;
+ bytesInstRead
+ .name(name() + ".bytes_inst_read")
+ .desc("Number of instructions bytes read from this memory")
+ ;
+ bytesWritten
+ .name(name() + ".bytes_written")
+ .desc("Number of bytes written to this memory")
+ ;
+ numReads
+ .name(name() + ".num_reads")
+ .desc("Number of read requests responded to by this memory")
+ ;
+ numWrites
+ .name(name() + ".num_writes")
+ .desc("Number of write requests responded to by this memory")
+ ;
+ numOther
+ .name(name() + ".num_other")
+ .desc("Number of other requests responded to by this memory")
+ ;
+ bwRead
+ .name(name() + ".bw_read")
+ .desc("Total read bandwidth from this memory (bytes/s)")
+ .precision(0)
+ .prereq(bytesRead)
+ ;
+ bwInstRead
+ .name(name() + ".bw_inst_read")
+ .desc("Instruction read bandwidth from this memory (bytes/s)")
+ .precision(0)
+ .prereq(bytesInstRead)
+ ;
+ bwWrite
+ .name(name() + ".bw_write")
+ .desc("Write bandwidth from this memory (bytes/s)")
+ .precision(0)
+ .prereq(bytesWritten)
+ ;
+ bwTotal
+ .name(name() + ".bw_total")
+ .desc("Total bandwidth to/from this memory (bytes/s)")
+ .precision(0)
+ .prereq(bwTotal)
+ ;
+ bwRead = bytesRead / simSeconds;
+ bwInstRead = bytesInstRead / simSeconds;
+ bwWrite = bytesWritten / simSeconds;
+ bwTotal = (bytesRead + bytesWritten) / simSeconds;
+}
+
+Range<Addr>
+AbstractMemory::getAddrRange()
+{
+ return range;
+}
+
+// Add load-locked to tracking list. Should only be called if the
+// operation is a load and the LLSC flag is set.
+void
+AbstractMemory::trackLoadLocked(PacketPtr pkt)
+{
+ Request *req = pkt->req;
+ Addr paddr = LockedAddr::mask(req->getPaddr());
+
+ // first we check if we already have a locked addr for this
+ // xc. Since each xc only gets one, we just update the
+ // existing record with the new address.
+ list<LockedAddr>::iterator i;
+
+ for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
+ if (i->matchesContext(req)) {
+ DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
+ req->contextId(), paddr);
+ i->addr = paddr;
+ return;
+ }
+ }
+
+ // no record for this xc: need to allocate a new one
+ DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
+ req->contextId(), paddr);
+ lockedAddrList.push_front(LockedAddr(req));
+}
+
+
+// Called on *writes* only... both regular stores and
+// store-conditional operations. Check for conventional stores which
+// conflict with locked addresses, and for success/failure of store
+// conditionals.
+bool
+AbstractMemory::checkLockedAddrList(PacketPtr pkt)
+{
+ Request *req = pkt->req;
+ Addr paddr = LockedAddr::mask(req->getPaddr());
+ bool isLLSC = pkt->isLLSC();
+
+ // Initialize return value. Non-conditional stores always
+ // succeed. Assume conditional stores will fail until proven
+ // otherwise.
+ bool success = !isLLSC;
+
+ // Iterate over list. Note that there could be multiple matching
+ // records, as more than one context could have done a load locked
+ // to this location.
+ list<LockedAddr>::iterator i = lockedAddrList.begin();
+
+ while (i != lockedAddrList.end()) {
+
+ if (i->addr == paddr) {
+ // we have a matching address
+
+ if (isLLSC && i->matchesContext(req)) {
+ // it's a store conditional, and as far as the memory
+ // system can tell, the requesting context's lock is
+ // still valid.
+ DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
+ req->contextId(), paddr);
+ success = true;
+ }
+
+ // Get rid of our record of this lock and advance to next
+ DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
+ i->contextId, paddr);
+ i = lockedAddrList.erase(i);
+ }
+ else {
+ // no match: advance to next record
+ ++i;
+ }
+ }
+
+ if (isLLSC) {
+ req->setExtraData(success ? 1 : 0);
+ }
+
+ return success;
+}
+
+
+#if TRACING_ON
+
+#define CASE(A, T) \
+ case sizeof(T): \
+ DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n", \
+ A, pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \
+ break
+
+
+#define TRACE_PACKET(A) \
+ do { \
+ switch (pkt->getSize()) { \
+ CASE(A, uint64_t); \
+ CASE(A, uint32_t); \
+ CASE(A, uint16_t); \
+ CASE(A, uint8_t); \
+ default: \
+ DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n", \
+ A, pkt->getSize(), pkt->getAddr()); \
+ DDUMP(MemoryAccess, pkt->getPtr<uint8_t>(), pkt->getSize());\
+ } \
+ } while (0)
+
+#else
+
+#define TRACE_PACKET(A)
+
+#endif
+
+void
+AbstractMemory::access(PacketPtr pkt)
+{
+ assert(pkt->getAddr() >= range.start &&
+ (pkt->getAddr() + pkt->getSize() - 1) <= range.end);
+
+ if (pkt->memInhibitAsserted()) {
+ DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
+ pkt->getAddr());
+ return;
+ }
+
+ uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start;
+
+ if (pkt->cmd == MemCmd::SwapReq) {
+ TheISA::IntReg overwrite_val;
+ bool overwrite_mem;
+ uint64_t condition_val64;
+ uint32_t condition_val32;
+
+ if (!pmemAddr)
+ panic("Swap only works if there is real memory (i.e. null=False)");
+ assert(sizeof(TheISA::IntReg) >= pkt->getSize());
+
+ overwrite_mem = true;
+ // keep a copy of our possible write value, and copy what is at the
+ // memory address into the packet
+ std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
+ std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
+
+ if (pkt->req->isCondSwap()) {
+ if (pkt->getSize() == sizeof(uint64_t)) {
+ condition_val64 = pkt->req->getExtraData();
+ overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
+ sizeof(uint64_t));
+ } else if (pkt->getSize() == sizeof(uint32_t)) {
+ condition_val32 = (uint32_t)pkt->req->getExtraData();
+ overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
+ sizeof(uint32_t));
+ } else
+ panic("Invalid size for conditional read/write\n");
+ }
+
+ if (overwrite_mem)
+ std::memcpy(hostAddr, &overwrite_val, pkt->getSize());
+
+ assert(!pkt->req->isInstFetch());
+ TRACE_PACKET("Read/Write");
+ numOther++;
+ } else if (pkt->isRead()) {
+ assert(!pkt->isWrite());
+ if (pkt->isLLSC()) {
+ trackLoadLocked(pkt);
+ }
+ if (pmemAddr)
+ memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
+ TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
+ numReads++;
+ bytesRead += pkt->getSize();
+ if (pkt->req->isInstFetch())
+ bytesInstRead += pkt->getSize();
+ } else if (pkt->isWrite()) {
+ if (writeOK(pkt)) {
+ if (pmemAddr)
+ memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
+ assert(!pkt->req->isInstFetch());
+ TRACE_PACKET("Write");
+ numWrites++;
+ bytesWritten += pkt->getSize();
+ }
+ } else if (pkt->isInvalidate()) {
+ // no need to do anything
+ } else {
+ panic("unimplemented");
+ }
+
+ if (pkt->needsResponse()) {
+ pkt->makeResponse();
+ }
+}
+
+void
+AbstractMemory::functionalAccess(PacketPtr pkt)
+{
+ assert(pkt->getAddr() >= range.start &&
+ (pkt->getAddr() + pkt->getSize() - 1) <= range.end);
+
+ uint8_t *hostAddr = pmemAddr + pkt->getAddr() - range.start;
+
+ if (pkt->isRead()) {
+ if (pmemAddr)
+ memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
+ TRACE_PACKET("Read");
+ pkt->makeResponse();
+ } else if (pkt->isWrite()) {
+ if (pmemAddr)
+ memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
+ TRACE_PACKET("Write");
+ pkt->makeResponse();
+ } else if (pkt->isPrint()) {
+ Packet::PrintReqState *prs =
+ dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
+ // Need to call printLabels() explicitly since we're not going
+ // through printObj().
+ prs->printLabels();
+ // Right now we just print the single byte at the specified address.
+ ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
+ } else {
+ panic("AbstractMemory: unimplemented functional command %s",
+ pkt->cmdString());
+ }
+}
+
+void
+AbstractMemory::serialize(ostream &os)
+{
+ if (!pmemAddr)
+ return;
+
+ gzFile compressedMem;
+ string filename = name() + ".physmem";
+ long _size = range.size();
+
+ SERIALIZE_SCALAR(filename);
+ SERIALIZE_SCALAR(_size);
+
+ // write memory file
+ string thefile = Checkpoint::dir() + "/" + filename.c_str();
+ int fd = creat(thefile.c_str(), 0664);
+ if (fd < 0) {
+ perror("creat");
+ fatal("Can't open physical memory checkpoint file '%s'\n", filename);
+ }
+
+ compressedMem = gzdopen(fd, "wb");
+ if (compressedMem == NULL)
+ fatal("Insufficient memory to allocate compression state for %s\n",
+ filename);
+
+ if (gzwrite(compressedMem, pmemAddr, size()) != (int)size()) {
+ fatal("Write failed on physical memory checkpoint file '%s'\n",
+ filename);
+ }
+
+ if (gzclose(compressedMem))
+ fatal("Close failed on physical memory checkpoint file '%s'\n",
+ filename);
+
+ list<LockedAddr>::iterator i = lockedAddrList.begin();
+
+ vector<Addr> lal_addr;
+ vector<int> lal_cid;
+ while (i != lockedAddrList.end()) {
+ lal_addr.push_back(i->addr);
+ lal_cid.push_back(i->contextId);
+ i++;
+ }
+ arrayParamOut(os, "lal_addr", lal_addr);
+ arrayParamOut(os, "lal_cid", lal_cid);
+}
+
+void
+AbstractMemory::unserialize(Checkpoint *cp, const string &section)
+{
+ if (!pmemAddr)
+ return;
+
+ gzFile compressedMem;
+ long *tempPage;
+ long *pmem_current;
+ uint64_t curSize;
+ uint32_t bytesRead;
+ const uint32_t chunkSize = 16384;
+
+ string filename;
+
+ UNSERIALIZE_SCALAR(filename);
+
+ filename = cp->cptDir + "/" + filename;
+
+ // mmap memoryfile
+ int fd = open(filename.c_str(), O_RDONLY);
+ if (fd < 0) {
+ perror("open");
+ fatal("Can't open physical memory checkpoint file '%s'", filename);
+ }
+
+ compressedMem = gzdopen(fd, "rb");
+ if (compressedMem == NULL)
+ fatal("Insufficient memory to allocate compression state for %s\n",
+ filename);
+
+ // unmap file that was mmapped in the constructor
+ // This is done here to make sure that gzip and open don't muck with our
+ // nice large space of memory before we reallocate it
+ munmap((char*)pmemAddr, size());
+
+ long _size;
+ UNSERIALIZE_SCALAR(_size);
+ if (_size > params()->range.size())
+ fatal("Memory size has changed! size %lld, param size %lld\n",
+ _size, params()->range.size());
+
+ pmemAddr = (uint8_t *)mmap(NULL, size(),
+ PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
+
+ if (pmemAddr == (void *)MAP_FAILED) {
+ perror("mmap");
+ fatal("Could not mmap physical memory!\n");
+ }
+
+ curSize = 0;
+ tempPage = (long*)malloc(chunkSize);
+ if (tempPage == NULL)
+ fatal("Unable to malloc memory to read file %s\n", filename);
+
+ /* Only copy bytes that are non-zero, so we don't give the VM system hell */
+ while (curSize < size()) {
+ bytesRead = gzread(compressedMem, tempPage, chunkSize);
+ if (bytesRead == 0)
+ break;
+
+ assert(bytesRead % sizeof(long) == 0);
+
+ for (uint32_t x = 0; x < bytesRead / sizeof(long); x++)
+ {
+ if (*(tempPage+x) != 0) {
+ pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
+ *pmem_current = *(tempPage+x);
+ }
+ }
+ curSize += bytesRead;
+ }
+
+ free(tempPage);
+
+ if (gzclose(compressedMem))
+ fatal("Close failed on physical memory checkpoint file '%s'\n",
+ filename);
+
+ vector<Addr> lal_addr;
+ vector<int> lal_cid;
+ arrayParamIn(cp, section, "lal_addr", lal_addr);
+ arrayParamIn(cp, section, "lal_cid", lal_cid);
+ for(int i = 0; i < lal_addr.size(); i++)
+ lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i]));
+}
diff --git a/src/mem/abstract_mem.hh b/src/mem/abstract_mem.hh
new file mode 100644
index 000000000..42ad08e5c
--- /dev/null
+++ b/src/mem/abstract_mem.hh
@@ -0,0 +1,250 @@
+/*
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ * Andreas Hansson
+ */
+
+/**
+ * @file
+ * AbstractMemory declaration
+ */
+
+#ifndef __ABSTRACT_MEMORY_HH__
+#define __ABSTRACT_MEMORY_HH__
+
+#include "mem/mem_object.hh"
+#include "params/AbstractMemory.hh"
+#include "sim/stats.hh"
+
+/**
+ * An abstract memory represents a contiguous block of physical
+ * memory, with an associated address range, and also provides basic
+ * functionality for reading and writing this memory without any
+ * timing information. It is a MemObject since any subclass must have
+ * at least one slave port.
+ */
+class AbstractMemory : public MemObject
+{
+ protected:
+
+ // Address range of this memory
+ Range<Addr> range;
+
+ // Pointer to host memory used to implement this memory
+ uint8_t* pmemAddr;
+
+ // Enable specific memories to be reported to the configuration table
+ bool confTableReported;
+
+ // Should the memory appear in the global address map
+ bool inAddrMap;
+
+ class LockedAddr {
+
+ public:
+ // on alpha, minimum LL/SC granularity is 16 bytes, so lower
+ // bits need to masked off.
+ static const Addr Addr_Mask = 0xf;
+
+ static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); }
+
+ Addr addr; // locked address
+ int contextId; // locking hw context
+
+ // check for matching execution context
+ bool matchesContext(Request *req)
+ {
+ return (contextId == req->contextId());
+ }
+
+ LockedAddr(Request *req) : addr(mask(req->getPaddr())),
+ contextId(req->contextId())
+ {
+ }
+ // constructor for unserialization use
+ LockedAddr(Addr _addr, int _cid) : addr(_addr), contextId(_cid)
+ {
+ }
+ };
+
+ std::list<LockedAddr> lockedAddrList;
+
+ // helper function for checkLockedAddrs(): we really want to
+ // inline a quick check for an empty locked addr list (hopefully
+ // the common case), and do the full list search (if necessary) in
+ // this out-of-line function
+ bool checkLockedAddrList(PacketPtr pkt);
+
+ // Record the address of a load-locked operation so that we can
+ // clear the execution context's lock flag if a matching store is
+ // performed
+ void trackLoadLocked(PacketPtr pkt);
+
+ // Compare a store address with any locked addresses so we can
+ // clear the lock flag appropriately. Return value set to 'false'
+ // if store operation should be suppressed (because it was a
+ // conditional store and the address was no longer locked by the
+ // requesting execution context), 'true' otherwise. Note that
+ // this method must be called on *all* stores since even
+ // non-conditional stores must clear any matching lock addresses.
+ bool writeOK(PacketPtr pkt) {
+ Request *req = pkt->req;
+ if (lockedAddrList.empty()) {
+ // no locked addrs: nothing to check, store_conditional fails
+ bool isLLSC = pkt->isLLSC();
+ if (isLLSC) {
+ req->setExtraData(0);
+ }
+ return !isLLSC; // only do write if not an sc
+ } else {
+ // iterate over list...
+ return checkLockedAddrList(pkt);
+ }
+ }
+
+ /** Number of total bytes read from this memory */
+ Stats::Scalar bytesRead;
+ /** Number of instruction bytes read from this memory */
+ Stats::Scalar bytesInstRead;
+ /** Number of bytes written to this memory */
+ Stats::Scalar bytesWritten;
+ /** Number of read requests */
+ Stats::Scalar numReads;
+ /** Number of write requests */
+ Stats::Scalar numWrites;
+ /** Number of other requests */
+ Stats::Scalar numOther;
+ /** Read bandwidth from this memory */
+ Stats::Formula bwRead;
+ /** Read bandwidth from this memory */
+ Stats::Formula bwInstRead;
+ /** Write bandwidth from this memory */
+ Stats::Formula bwWrite;
+ /** Total bandwidth from this memory */
+ Stats::Formula bwTotal;
+
+ private:
+
+ // Prevent copying
+ AbstractMemory(const AbstractMemory&);
+
+ // Prevent assignment
+ AbstractMemory& operator=(const AbstractMemory&);
+
+ public:
+
+ typedef AbstractMemoryParams Params;
+
+ AbstractMemory(const Params* p);
+ virtual ~AbstractMemory();
+
+ const Params *
+ params() const
+ {
+ return dynamic_cast<const Params *>(_params);
+ }
+
+ /**
+ * Get the address range
+ *
+ * @return a single contigous address range
+ */
+ Range<Addr> getAddrRange();
+
+ /**
+ * Get the memory size.
+ *
+ * @return the size of the memory
+ */
+ uint64_t size() { return range.size(); }
+
+ /**
+ * Get the start address.
+ *
+ * @return the start address of the memory
+ */
+ Addr start() { return range.start; }
+
+ /**
+ * Should this memory be passed to the kernel and part of the OS
+ * physical memory layout.
+ *
+ * @return if this memory is reported
+ */
+ bool isConfReported() const { return confTableReported; }
+
+ /**
+ * Some memories are used as shadow memories or should for other
+ * reasons not be part of the global address map.
+ *
+ * @return if this memory is part of the address map
+ */
+ bool isInAddrMap() const { return inAddrMap; }
+
+ /**
+ * Perform an untimed memory access and update all the state
+ * (e.g. locked addresses) and statistics accordingly. The packet
+ * is turned into a response if required.
+ *
+ * @param pkt Packet performing the access
+ */
+ void access(PacketPtr pkt);
+
+ /**
+ * Perform an untimed memory read or write without changing
+ * anything but the memory itself. No stats are affected by this
+ * access. In addition to normal accesses this also facilitates
+ * print requests.
+ *
+ * @param pkt Packet performing the access
+ */
+ void functionalAccess(PacketPtr pkt);
+
+ /**
+ * Register Statistics
+ */
+ virtual void regStats();
+
+ virtual void serialize(std::ostream &os);
+ virtual void unserialize(Checkpoint *cp, const std::string &section);
+
+};
+
+#endif //__ABSTRACT_MEMORY_HH__
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index 821a3635b..6da779538 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -331,7 +331,7 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
// to forward the snoop up the hierarchy after the current
// transaction completes.
- // Actual target device (typ. PhysicalMemory) will delete the
+ // Actual target device (typ. a memory) will delete the
// packet on reception, so we need to save a copy here.
PacketPtr cp_pkt = new Packet(pkt, true);
targets->add(cp_pkt, curTick(), _order, Target::FromSnoop,
diff --git a/src/mem/physical.cc b/src/mem/physical.cc
index 78181b7df..5f92976f9 100644
--- a/src/mem/physical.cc
+++ b/src/mem/physical.cc
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2011 ARM Limited
+ * Copyright (c) 2012 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
@@ -11,9 +11,6 @@
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
- * Copyright (c) 2001-2005 The Regents of The University of Michigan
- * All rights reserved.
- *
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
@@ -37,618 +34,93 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Authors: Ron Dreslinski
- * Ali Saidi
+ * Authors: Andreas Hansson
*/
-#include <sys/mman.h>
-#include <sys/types.h>
-#include <sys/user.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <zlib.h>
-
-#include <cerrno>
-#include <cstdio>
-#include <iostream>
-#include <string>
-
-#include "arch/isa_traits.hh"
-#include "arch/registers.hh"
-#include "base/intmath.hh"
-#include "base/misc.hh"
-#include "base/random.hh"
-#include "base/types.hh"
-#include "config/the_isa.hh"
-#include "debug/LLSC.hh"
-#include "debug/MemoryAccess.hh"
-#include "mem/packet_access.hh"
+#include "debug/BusAddrRanges.hh"
#include "mem/physical.hh"
-#include "sim/eventq.hh"
using namespace std;
-using namespace TheISA;
-
-PhysicalMemory::PhysicalMemory(const Params *p)
- : MemObject(p), pmemAddr(NULL), lat(p->latency), lat_var(p->latency_var),
- _size(params()->range.size()), _start(params()->range.start)
-{
- if (size() % TheISA::PageBytes != 0)
- panic("Memory Size not divisible by page size\n");
-
- // create the appropriate number of ports
- for (int i = 0; i < p->port_port_connection_count; ++i) {
- ports.push_back(new MemoryPort(csprintf("%s-port%d", name(), i),
- this));
- }
-
- if (params()->null)
- return;
-
-
- if (params()->file == "") {
- int map_flags = MAP_ANON | MAP_PRIVATE;
- pmemAddr = (uint8_t *)mmap(NULL, size(),
- PROT_READ | PROT_WRITE, map_flags, -1, 0);
- } else {
- int map_flags = MAP_PRIVATE;
- int fd = open(params()->file.c_str(), O_RDONLY);
- _size = lseek(fd, 0, SEEK_END);
- lseek(fd, 0, SEEK_SET);
- pmemAddr = (uint8_t *)mmap(NULL, roundUp(size(), sysconf(_SC_PAGESIZE)),
- PROT_READ | PROT_WRITE, map_flags, fd, 0);
- }
-
- if (pmemAddr == (void *)MAP_FAILED) {
- perror("mmap");
- if (params()->file == "")
- fatal("Could not mmap!\n");
- else
- fatal("Could not find file: %s\n", params()->file);
- }
-
- //If requested, initialize all the memory to 0
- if (p->zero)
- memset(pmemAddr, 0, size());
-}
-
-void
-PhysicalMemory::init()
-{
- for (PortIterator p = ports.begin(); p != ports.end(); ++p) {
- if (!(*p)->isConnected()) {
- fatal("PhysicalMemory port %s is unconnected!\n", (*p)->name());
- } else {
- (*p)->sendRangeChange();
- }
- }
-}
-
-PhysicalMemory::~PhysicalMemory()
-{
- if (pmemAddr)
- munmap((char*)pmemAddr, size());
-}
-
-void
-PhysicalMemory::regStats()
-{
- using namespace Stats;
-
- bytesRead
- .name(name() + ".bytes_read")
- .desc("Number of bytes read from this memory")
- ;
- bytesInstRead
- .name(name() + ".bytes_inst_read")
- .desc("Number of instructions bytes read from this memory")
- ;
- bytesWritten
- .name(name() + ".bytes_written")
- .desc("Number of bytes written to this memory")
- ;
- numReads
- .name(name() + ".num_reads")
- .desc("Number of read requests responded to by this memory")
- ;
- numWrites
- .name(name() + ".num_writes")
- .desc("Number of write requests responded to by this memory")
- ;
- numOther
- .name(name() + ".num_other")
- .desc("Number of other requests responded to by this memory")
- ;
- bwRead
- .name(name() + ".bw_read")
- .desc("Total read bandwidth from this memory (bytes/s)")
- .precision(0)
- .prereq(bytesRead)
- ;
- bwInstRead
- .name(name() + ".bw_inst_read")
- .desc("Instruction read bandwidth from this memory (bytes/s)")
- .precision(0)
- .prereq(bytesInstRead)
- ;
- bwWrite
- .name(name() + ".bw_write")
- .desc("Write bandwidth from this memory (bytes/s)")
- .precision(0)
- .prereq(bytesWritten)
- ;
- bwTotal
- .name(name() + ".bw_total")
- .desc("Total bandwidth to/from this memory (bytes/s)")
- .precision(0)
- .prereq(bwTotal)
- ;
- bwRead = bytesRead / simSeconds;
- bwInstRead = bytesInstRead / simSeconds;
- bwWrite = bytesWritten / simSeconds;
- bwTotal = (bytesRead + bytesWritten) / simSeconds;
-}
-unsigned
-PhysicalMemory::deviceBlockSize() const
-{
- //Can accept anysize request
- return 0;
-}
-
-Tick
-PhysicalMemory::calculateLatency(PacketPtr pkt)
-{
- Tick latency = lat;
- if (lat_var != 0)
- latency += random_mt.random<Tick>(0, lat_var);
- return latency;
-}
-
-
-
-// Add load-locked to tracking list. Should only be called if the
-// operation is a load and the LLSC flag is set.
-void
-PhysicalMemory::trackLoadLocked(PacketPtr pkt)
+PhysicalMemory::PhysicalMemory(const vector<AbstractMemory*>& _memories) :
+ size(0)
{
- Request *req = pkt->req;
- Addr paddr = LockedAddr::mask(req->getPaddr());
+ for (vector<AbstractMemory*>::const_iterator m = _memories.begin();
+ m != _memories.end(); ++m) {
+ // only add the memory if it is part of the global address map
+ if ((*m)->isInAddrMap()) {
+ memories.push_back(*m);
- // first we check if we already have a locked addr for this
- // xc. Since each xc only gets one, we just update the
- // existing record with the new address.
- list<LockedAddr>::iterator i;
+ // calculate the total size once and for all
+ size += (*m)->size();
- for (i = lockedAddrList.begin(); i != lockedAddrList.end(); ++i) {
- if (i->matchesContext(req)) {
- DPRINTF(LLSC, "Modifying lock record: context %d addr %#x\n",
- req->contextId(), paddr);
- i->addr = paddr;
- return;
+ // add the range to our interval tree and make sure it does not
+ // intersect an existing range
+ if (addrMap.insert((*m)->getAddrRange(), *m) == addrMap.end())
+ fatal("Memory address range for %s is overlapping\n",
+ (*m)->name());
}
+ DPRINTF(BusAddrRanges,
+ "Skipping memory %s that is not in global address map\n",
+ (*m)->name());
}
-
- // no record for this xc: need to allocate a new one
- DPRINTF(LLSC, "Adding lock record: context %d addr %#x\n",
- req->contextId(), paddr);
- lockedAddrList.push_front(LockedAddr(req));
+ rangeCache.invalidate();
}
-
-// Called on *writes* only... both regular stores and
-// store-conditional operations. Check for conventional stores which
-// conflict with locked addresses, and for success/failure of store
-// conditionals.
bool
-PhysicalMemory::checkLockedAddrList(PacketPtr pkt)
-{
- Request *req = pkt->req;
- Addr paddr = LockedAddr::mask(req->getPaddr());
- bool isLLSC = pkt->isLLSC();
-
- // Initialize return value. Non-conditional stores always
- // succeed. Assume conditional stores will fail until proven
- // otherwise.
- bool success = !isLLSC;
-
- // Iterate over list. Note that there could be multiple matching
- // records, as more than one context could have done a load locked
- // to this location.
- list<LockedAddr>::iterator i = lockedAddrList.begin();
-
- while (i != lockedAddrList.end()) {
-
- if (i->addr == paddr) {
- // we have a matching address
-
- if (isLLSC && i->matchesContext(req)) {
- // it's a store conditional, and as far as the memory
- // system can tell, the requesting context's lock is
- // still valid.
- DPRINTF(LLSC, "StCond success: context %d addr %#x\n",
- req->contextId(), paddr);
- success = true;
- }
-
- // Get rid of our record of this lock and advance to next
- DPRINTF(LLSC, "Erasing lock record: context %d addr %#x\n",
- i->contextId, paddr);
- i = lockedAddrList.erase(i);
- }
- else {
- // no match: advance to next record
- ++i;
+PhysicalMemory::isMemAddr(Addr addr) const
+{
+ // see if the address is within the last matched range
+ if (addr != rangeCache) {
+ // lookup in the interval tree
+ range_map<Addr, AbstractMemory*>::const_iterator r =
+ addrMap.find(addr);
+ if (r == addrMap.end()) {
+ // not in the cache, and not in the tree
+ return false;
}
+ // the range is in the tree, update the cache
+ rangeCache = r->first;
}
- if (isLLSC) {
- req->setExtraData(success ? 1 : 0);
- }
-
- return success;
-}
-
-
-#if TRACING_ON
-
-#define CASE(A, T) \
- case sizeof(T): \
- DPRINTF(MemoryAccess,"%s of size %i on address 0x%x data 0x%x\n", \
- A, pkt->getSize(), pkt->getAddr(), pkt->get<T>()); \
- break
-
-
-#define TRACE_PACKET(A) \
- do { \
- switch (pkt->getSize()) { \
- CASE(A, uint64_t); \
- CASE(A, uint32_t); \
- CASE(A, uint16_t); \
- CASE(A, uint8_t); \
- default: \
- DPRINTF(MemoryAccess, "%s of size %i on address 0x%x\n", \
- A, pkt->getSize(), pkt->getAddr()); \
- DDUMP(MemoryAccess, pkt->getPtr<uint8_t>(), pkt->getSize());\
- } \
- } while (0)
-
-#else
-
-#define TRACE_PACKET(A)
-
-#endif
-
-Tick
-PhysicalMemory::doAtomicAccess(PacketPtr pkt)
-{
- assert(pkt->getAddr() >= start() &&
- pkt->getAddr() + pkt->getSize() <= start() + size());
-
- if (pkt->memInhibitAsserted()) {
- DPRINTF(MemoryAccess, "mem inhibited on 0x%x: not responding\n",
- pkt->getAddr());
- return 0;
- }
-
- uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
-
- if (pkt->cmd == MemCmd::SwapReq) {
- IntReg overwrite_val;
- bool overwrite_mem;
- uint64_t condition_val64;
- uint32_t condition_val32;
-
- if (!pmemAddr)
- panic("Swap only works if there is real memory (i.e. null=False)");
- assert(sizeof(IntReg) >= pkt->getSize());
-
- overwrite_mem = true;
- // keep a copy of our possible write value, and copy what is at the
- // memory address into the packet
- std::memcpy(&overwrite_val, pkt->getPtr<uint8_t>(), pkt->getSize());
- std::memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
+ assert(addrMap.find(addr) != addrMap.end());
- if (pkt->req->isCondSwap()) {
- if (pkt->getSize() == sizeof(uint64_t)) {
- condition_val64 = pkt->req->getExtraData();
- overwrite_mem = !std::memcmp(&condition_val64, hostAddr,
- sizeof(uint64_t));
- } else if (pkt->getSize() == sizeof(uint32_t)) {
- condition_val32 = (uint32_t)pkt->req->getExtraData();
- overwrite_mem = !std::memcmp(&condition_val32, hostAddr,
- sizeof(uint32_t));
- } else
- panic("Invalid size for conditional read/write\n");
- }
-
- if (overwrite_mem)
- std::memcpy(hostAddr, &overwrite_val, pkt->getSize());
-
- assert(!pkt->req->isInstFetch());
- TRACE_PACKET("Read/Write");
- numOther++;
- } else if (pkt->isRead()) {
- assert(!pkt->isWrite());
- if (pkt->isLLSC()) {
- trackLoadLocked(pkt);
- }
- if (pmemAddr)
- memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
- TRACE_PACKET(pkt->req->isInstFetch() ? "IFetch" : "Read");
- numReads++;
- bytesRead += pkt->getSize();
- if (pkt->req->isInstFetch())
- bytesInstRead += pkt->getSize();
- } else if (pkt->isWrite()) {
- if (writeOK(pkt)) {
- if (pmemAddr)
- memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
- assert(!pkt->req->isInstFetch());
- TRACE_PACKET("Write");
- numWrites++;
- bytesWritten += pkt->getSize();
- }
- } else if (pkt->isInvalidate()) {
- //upgrade or invalidate
- if (pkt->needsResponse()) {
- pkt->makeAtomicResponse();
- }
- } else {
- panic("unimplemented");
- }
-
- if (pkt->needsResponse()) {
- pkt->makeAtomicResponse();
- }
- return calculateLatency(pkt);
-}
-
-
-void
-PhysicalMemory::doFunctionalAccess(PacketPtr pkt)
-{
- assert(pkt->getAddr() >= start() &&
- pkt->getAddr() + pkt->getSize() <= start() + size());
-
-
- uint8_t *hostAddr = pmemAddr + pkt->getAddr() - start();
-
- if (pkt->isRead()) {
- if (pmemAddr)
- memcpy(pkt->getPtr<uint8_t>(), hostAddr, pkt->getSize());
- TRACE_PACKET("Read");
- pkt->makeAtomicResponse();
- } else if (pkt->isWrite()) {
- if (pmemAddr)
- memcpy(hostAddr, pkt->getPtr<uint8_t>(), pkt->getSize());
- TRACE_PACKET("Write");
- pkt->makeAtomicResponse();
- } else if (pkt->isPrint()) {
- Packet::PrintReqState *prs =
- dynamic_cast<Packet::PrintReqState*>(pkt->senderState);
- // Need to call printLabels() explicitly since we're not going
- // through printObj().
- prs->printLabels();
- // Right now we just print the single byte at the specified address.
- ccprintf(prs->os, "%s%#x\n", prs->curPrefix(), *hostAddr);
- } else {
- panic("PhysicalMemory: unimplemented functional command %s",
- pkt->cmdString());
- }
-}
-
-
-SlavePort &
-PhysicalMemory::getSlavePort(const std::string &if_name, int idx)
-{
- if (if_name != "port") {
- return MemObject::getSlavePort(if_name, idx);
- } else {
- if (idx >= static_cast<int>(ports.size())) {
- fatal("PhysicalMemory::getSlavePort: unknown index %d\n", idx);
- }
-
- return *ports[idx];
- }
-}
-
-PhysicalMemory::MemoryPort::MemoryPort(const std::string &_name,
- PhysicalMemory *_memory)
- : SimpleTimingPort(_name, _memory), memory(_memory)
-{ }
-
-AddrRangeList
-PhysicalMemory::MemoryPort::getAddrRanges()
-{
- return memory->getAddrRanges();
+ // either matched the cache or found in the tree
+ return true;
}
AddrRangeList
-PhysicalMemory::getAddrRanges()
+PhysicalMemory::getConfAddrRanges() const
{
+ // this could be done once in the constructor, but since it is unlikely to
+ // be called more than once the iteration should not be a problem
AddrRangeList ranges;
- ranges.push_back(RangeSize(start(), size()));
- return ranges;
-}
-
-unsigned
-PhysicalMemory::MemoryPort::deviceBlockSize() const
-{
- return memory->deviceBlockSize();
-}
-
-Tick
-PhysicalMemory::MemoryPort::recvAtomic(PacketPtr pkt)
-{
- return memory->doAtomicAccess(pkt);
-}
-
-void
-PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
-{
- pkt->pushLabel(memory->name());
-
- if (!queue.checkFunctional(pkt)) {
- // Default implementation of SimpleTimingPort::recvFunctional()
- // calls recvAtomic() and throws away the latency; we can save a
- // little here by just not calculating the latency.
- memory->doFunctionalAccess(pkt);
- }
-
- pkt->popLabel();
-}
-
-unsigned int
-PhysicalMemory::drain(Event *de)
-{
- int count = 0;
- for (PortIterator pi = ports.begin(); pi != ports.end(); ++pi) {
- count += (*pi)->drain(de);
+ for (vector<AbstractMemory*>::const_iterator m = memories.begin();
+ m != memories.end(); ++m) {
+ if ((*m)->isConfReported()) {
+ ranges.push_back((*m)->getAddrRange());
+ }
}
- if (count)
- changeState(Draining);
- else
- changeState(Drained);
- return count;
+ return ranges;
}
void
-PhysicalMemory::serialize(ostream &os)
+PhysicalMemory::access(PacketPtr pkt)
{
- if (!pmemAddr)
- return;
-
- gzFile compressedMem;
- string filename = name() + ".physmem";
-
- SERIALIZE_SCALAR(filename);
- SERIALIZE_SCALAR(_size);
-
- // write memory file
- string thefile = Checkpoint::dir() + "/" + filename.c_str();
- int fd = creat(thefile.c_str(), 0664);
- if (fd < 0) {
- perror("creat");
- fatal("Can't open physical memory checkpoint file '%s'\n", filename);
- }
-
- compressedMem = gzdopen(fd, "wb");
- if (compressedMem == NULL)
- fatal("Insufficient memory to allocate compression state for %s\n",
- filename);
-
- if (gzwrite(compressedMem, pmemAddr, size()) != (int)size()) {
- fatal("Write failed on physical memory checkpoint file '%s'\n",
- filename);
- }
-
- if (gzclose(compressedMem))
- fatal("Close failed on physical memory checkpoint file '%s'\n",
- filename);
-
- list<LockedAddr>::iterator i = lockedAddrList.begin();
-
- vector<Addr> lal_addr;
- vector<int> lal_cid;
- while (i != lockedAddrList.end()) {
- lal_addr.push_back(i->addr);
- lal_cid.push_back(i->contextId);
- i++;
- }
- arrayParamOut(os, "lal_addr", lal_addr);
- arrayParamOut(os, "lal_cid", lal_cid);
+ assert(pkt->isRequest());
+ Addr addr = pkt->getAddr();
+ range_map<Addr, AbstractMemory*>::const_iterator m = addrMap.find(addr);
+ assert(m != addrMap.end());
+ m->second->access(pkt);
}
void
-PhysicalMemory::unserialize(Checkpoint *cp, const string &section)
-{
- if (!pmemAddr)
- return;
-
- gzFile compressedMem;
- long *tempPage;
- long *pmem_current;
- uint64_t curSize;
- uint32_t bytesRead;
- const uint32_t chunkSize = 16384;
-
- string filename;
-
- UNSERIALIZE_SCALAR(filename);
-
- filename = cp->cptDir + "/" + filename;
-
- // mmap memoryfile
- int fd = open(filename.c_str(), O_RDONLY);
- if (fd < 0) {
- perror("open");
- fatal("Can't open physical memory checkpoint file '%s'", filename);
- }
-
- compressedMem = gzdopen(fd, "rb");
- if (compressedMem == NULL)
- fatal("Insufficient memory to allocate compression state for %s\n",
- filename);
-
- // unmap file that was mmapped in the constructor
- // This is done here to make sure that gzip and open don't muck with our
- // nice large space of memory before we reallocate it
- munmap((char*)pmemAddr, size());
-
- UNSERIALIZE_SCALAR(_size);
- if (size() > params()->range.size())
- fatal("Memory size has changed! size %lld, param size %lld\n",
- size(), params()->range.size());
-
- pmemAddr = (uint8_t *)mmap(NULL, size(),
- PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0);
-
- if (pmemAddr == (void *)MAP_FAILED) {
- perror("mmap");
- fatal("Could not mmap physical memory!\n");
- }
-
- curSize = 0;
- tempPage = (long*)malloc(chunkSize);
- if (tempPage == NULL)
- fatal("Unable to malloc memory to read file %s\n", filename);
-
- /* Only copy bytes that are non-zero, so we don't give the VM system hell */
- while (curSize < size()) {
- bytesRead = gzread(compressedMem, tempPage, chunkSize);
- if (bytesRead == 0)
- break;
-
- assert(bytesRead % sizeof(long) == 0);
-
- for (uint32_t x = 0; x < bytesRead / sizeof(long); x++)
- {
- if (*(tempPage+x) != 0) {
- pmem_current = (long*)(pmemAddr + curSize + x * sizeof(long));
- *pmem_current = *(tempPage+x);
- }
- }
- curSize += bytesRead;
- }
-
- free(tempPage);
-
- if (gzclose(compressedMem))
- fatal("Close failed on physical memory checkpoint file '%s'\n",
- filename);
-
- vector<Addr> lal_addr;
- vector<int> lal_cid;
- arrayParamIn(cp, section, "lal_addr", lal_addr);
- arrayParamIn(cp, section, "lal_cid", lal_cid);
- for(int i = 0; i < lal_addr.size(); i++)
- lockedAddrList.push_front(LockedAddr(lal_addr[i], lal_cid[i]));
-}
-
-PhysicalMemory *
-PhysicalMemoryParams::create()
+PhysicalMemory::functionalAccess(PacketPtr pkt)
{
- return new PhysicalMemory(this);
+ assert(pkt->isRequest());
+ Addr addr = pkt->getAddr();
+ range_map<Addr, AbstractMemory*>::const_iterator m = addrMap.find(addr);
+ assert(m != addrMap.end());
+ m->second->functionalAccess(pkt);
}
diff --git a/src/mem/physical.hh b/src/mem/physical.hh
index 23d4052fa..e78b1d2da 100644
--- a/src/mem/physical.hh
+++ b/src/mem/physical.hh
@@ -1,6 +1,15 @@
/*
- * Copyright (c) 2001-2005 The Regents of The University of Michigan
- * All rights reserved.
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@@ -25,195 +34,88 @@
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
- * Authors: Ron Dreslinski
- */
-
-/* @file
+ * Authors: Andreas Hansson
*/
#ifndef __PHYSICAL_MEMORY_HH__
#define __PHYSICAL_MEMORY_HH__
-#include <map>
-#include <string>
-
-#include "base/range.hh"
-#include "base/statistics.hh"
-#include "mem/mem_object.hh"
+#include "base/range_map.hh"
+#include "mem/abstract_mem.hh"
#include "mem/packet.hh"
-#include "mem/tport.hh"
-#include "params/PhysicalMemory.hh"
-#include "sim/eventq.hh"
-#include "sim/stats.hh"
-
-//
-// Functional model for a contiguous block of physical memory. (i.e. RAM)
-//
-class PhysicalMemory : public MemObject
-{
- protected:
-
- class MemoryPort : public SimpleTimingPort
- {
- PhysicalMemory *memory;
-
- public:
- MemoryPort(const std::string &_name, PhysicalMemory *_memory);
-
- protected:
+/**
+ * The physical memory encapsulates all memories in the system and
+ * provides basic functionality for accessing those memories without
+ * going through the memory system and interconnect.
+ */
+class PhysicalMemory
+{
- virtual Tick recvAtomic(PacketPtr pkt);
+ private:
- virtual void recvFunctional(PacketPtr pkt);
+ // Global address map
+ range_map<Addr, AbstractMemory* > addrMap;
- virtual AddrRangeList getAddrRanges();
+ // a mutable cache for the last range that matched an address
+ mutable Range<Addr> rangeCache;
- virtual unsigned deviceBlockSize() const;
- };
+ // All address-mapped memories
+ std::vector<AbstractMemory*> memories;
- int numPorts;
+ // The total memory size
+ uint64_t size;
+ // Prevent copying
+ PhysicalMemory(const PhysicalMemory&);
- private:
- // prevent copying of a MainMemory object
- PhysicalMemory(const PhysicalMemory &specmem);
- const PhysicalMemory &operator=(const PhysicalMemory &specmem);
-
- protected:
-
- class LockedAddr {
- public:
- // on alpha, minimum LL/SC granularity is 16 bytes, so lower
- // bits need to masked off.
- static const Addr Addr_Mask = 0xf;
-
- static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); }
-
- Addr addr; // locked address
- int contextId; // locking hw context
-
- // check for matching execution context
- bool matchesContext(Request *req)
- {
- return (contextId == req->contextId());
- }
-
- LockedAddr(Request *req)
- : addr(mask(req->getPaddr())),
- contextId(req->contextId())
- {
- }
- // constructor for unserialization use
- LockedAddr(Addr _addr, int _cid)
- : addr(_addr), contextId(_cid)
- {
- }
- };
-
- std::list<LockedAddr> lockedAddrList;
-
- // helper function for checkLockedAddrs(): we really want to
- // inline a quick check for an empty locked addr list (hopefully
- // the common case), and do the full list search (if necessary) in
- // this out-of-line function
- bool checkLockedAddrList(PacketPtr pkt);
-
- // Record the address of a load-locked operation so that we can
- // clear the execution context's lock flag if a matching store is
- // performed
- void trackLoadLocked(PacketPtr pkt);
-
- // Compare a store address with any locked addresses so we can
- // clear the lock flag appropriately. Return value set to 'false'
- // if store operation should be suppressed (because it was a
- // conditional store and the address was no longer locked by the
- // requesting execution context), 'true' otherwise. Note that
- // this method must be called on *all* stores since even
- // non-conditional stores must clear any matching lock addresses.
- bool writeOK(PacketPtr pkt) {
- Request *req = pkt->req;
- if (lockedAddrList.empty()) {
- // no locked addrs: nothing to check, store_conditional fails
- bool isLLSC = pkt->isLLSC();
- if (isLLSC) {
- req->setExtraData(0);
- }
- return !isLLSC; // only do write if not an sc
- } else {
- // iterate over list...
- return checkLockedAddrList(pkt);
- }
- }
-
- uint8_t *pmemAddr;
- Tick lat;
- Tick lat_var;
- std::vector<MemoryPort*> ports;
- typedef std::vector<MemoryPort*>::iterator PortIterator;
-
- uint64_t _size;
- uint64_t _start;
-
- /** Number of total bytes read from this memory */
- Stats::Scalar bytesRead;
- /** Number of instruction bytes read from this memory */
- Stats::Scalar bytesInstRead;
- /** Number of bytes written to this memory */
- Stats::Scalar bytesWritten;
- /** Number of read requests */
- Stats::Scalar numReads;
- /** Number of write requests */
- Stats::Scalar numWrites;
- /** Number of other requests */
- Stats::Scalar numOther;
- /** Read bandwidth from this memory */
- Stats::Formula bwRead;
- /** Read bandwidth from this memory */
- Stats::Formula bwInstRead;
- /** Write bandwidth from this memory */
- Stats::Formula bwWrite;
- /** Total bandwidth from this memory */
- Stats::Formula bwTotal;
+ // Prevent assignment
+ PhysicalMemory& operator=(const PhysicalMemory&);
public:
- uint64_t size() { return _size; }
- uint64_t start() { return _start; }
- public:
- typedef PhysicalMemoryParams Params;
- PhysicalMemory(const Params *p);
- virtual ~PhysicalMemory();
-
- const Params *
- params() const
- {
- return dynamic_cast<const Params *>(_params);
- }
-
- public:
- unsigned deviceBlockSize() const;
- AddrRangeList getAddrRanges();
- virtual SlavePort &getSlavePort(const std::string &if_name, int idx = -1);
- void virtual init();
- unsigned int drain(Event *de);
+ /**
+ * Create a physical memory object, wrapping a number of memories.
+ */
+ PhysicalMemory(const std::vector<AbstractMemory*>& _memories);
- Tick doAtomicAccess(PacketPtr pkt);
- void doFunctionalAccess(PacketPtr pkt);
+ /**
+ * Nothing to destruct.
+ */
+ ~PhysicalMemory() { }
+
+ /**
+ * Check if a physical address is within a range of a memory that
+ * is part of the global address map.
+ *
+ * @param addr A physical address
+ * @return Whether the address corresponds to a memory
+ */
+ bool isMemAddr(Addr addr) const;
+ /**
+ * Get the memory ranges for all memories that are to be reported
+ * to the configuration table.
+ *
+ * @return All configuration table memory ranges
+ */
+ AddrRangeList getConfAddrRanges() const;
- protected:
- virtual Tick calculateLatency(PacketPtr pkt);
+ /**
+ * Get the total physical memory size.
+ *
+ * @return The sum of all memory sizes
+ */
+ uint64_t totalSize() const { return size; }
- public:
- /**
- * Register Statistics
+ /**
+ *
*/
- void regStats();
+ void access(PacketPtr pkt);
+ void functionalAccess(PacketPtr pkt);
+};
+
- virtual void serialize(std::ostream &os);
- virtual void unserialize(Checkpoint *cp, const std::string &section);
-};
#endif //__PHYSICAL_MEMORY_HH__
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
index 8fd68be7c..0cdb919b1 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -467,7 +467,7 @@ RubyPort::M5Port::recvFunctional(PacketPtr pkt)
// The following command performs the real functional access.
// This line should be removed once Ruby supplies the official version
// of data.
- ruby_port->system->physmem->doFunctionalAccess(pkt);
+ ruby_port->system->getPhysMem().functionalAccess(pkt);
}
// turn packet around to go back to requester if response expected
@@ -646,7 +646,7 @@ RubyPort::M5Port::hitCallback(PacketPtr pkt)
DPRINTF(RubyPort, "Hit callback needs response %d\n", needsResponse);
if (accessPhysMem) {
- ruby_port->system->physmem->doAtomicAccess(pkt);
+ ruby_port->system->getPhysMem().access(pkt);
} else if (needsResponse) {
pkt->makeResponse();
}
@@ -688,7 +688,7 @@ RubyPort::M5Port::getAddrRanges()
bool
RubyPort::M5Port::isPhysMemAddress(Addr addr)
{
- return ruby_port->system->isMemory(addr);
+ return ruby_port->system->isMemAddr(addr);
}
unsigned
diff --git a/src/mem/simple_mem.cc b/src/mem/simple_mem.cc
new file mode 100644
index 000000000..10e809d0b
--- /dev/null
+++ b/src/mem/simple_mem.cc
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2010-2012 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ * Ali Saidi
+ * Andreas Hansson
+ */
+
+#include "base/random.hh"
+#include "mem/simple_mem.hh"
+
+using namespace std;
+
+SimpleMemory::SimpleMemory(const Params* p) :
+ AbstractMemory(p),
+ lat(p->latency), lat_var(p->latency_var)
+{
+ for (size_t i = 0; i < p->port_port_connection_count; ++i) {
+ ports.push_back(new MemoryPort(csprintf("%s-port-%d", name(), i),
+ *this));
+ }
+}
+
+void
+SimpleMemory::init()
+{
+ for (vector<MemoryPort*>::iterator p = ports.begin(); p != ports.end();
+ ++p) {
+ if (!(*p)->isConnected()) {
+ fatal("SimpleMemory port %s is unconnected!\n", (*p)->name());
+ } else {
+ (*p)->sendRangeChange();
+ }
+ }
+}
+
+Tick
+SimpleMemory::calculateLatency(PacketPtr pkt)
+{
+ if (pkt->memInhibitAsserted()) {
+ return 0;
+ } else {
+ Tick latency = lat;
+ if (lat_var != 0)
+ latency += random_mt.random<Tick>(0, lat_var);
+ return latency;
+ }
+}
+
+Tick
+SimpleMemory::doAtomicAccess(PacketPtr pkt)
+{
+ access(pkt);
+ return calculateLatency(pkt);
+}
+
+void
+SimpleMemory::doFunctionalAccess(PacketPtr pkt)
+{
+ functionalAccess(pkt);
+}
+
+SlavePort &
+SimpleMemory::getSlavePort(const std::string &if_name, int idx)
+{
+ if (if_name != "port") {
+ return MemObject::getSlavePort(if_name, idx);
+ } else {
+ if (idx >= static_cast<int>(ports.size())) {
+ fatal("SimpleMemory::getSlavePort: unknown index %d\n", idx);
+ }
+
+ return *ports[idx];
+ }
+}
+
+unsigned int
+SimpleMemory::drain(Event *de)
+{
+ int count = 0;
+ for (vector<MemoryPort*>::iterator p = ports.begin(); p != ports.end();
+ ++p) {
+ count += (*p)->drain(de);
+ }
+
+ if (count)
+ changeState(Draining);
+ else
+ changeState(Drained);
+ return count;
+}
+
+SimpleMemory::MemoryPort::MemoryPort(const std::string& _name,
+ SimpleMemory& _memory)
+ : SimpleTimingPort(_name, &_memory), memory(_memory)
+{ }
+
+AddrRangeList
+SimpleMemory::MemoryPort::getAddrRanges()
+{
+ AddrRangeList ranges;
+ ranges.push_back(memory.getAddrRange());
+ return ranges;
+}
+
+Tick
+SimpleMemory::MemoryPort::recvAtomic(PacketPtr pkt)
+{
+ return memory.doAtomicAccess(pkt);
+}
+
+void
+SimpleMemory::MemoryPort::recvFunctional(PacketPtr pkt)
+{
+ pkt->pushLabel(memory.name());
+
+ if (!queue.checkFunctional(pkt)) {
+ // Default implementation of SimpleTimingPort::recvFunctional()
+ // calls recvAtomic() and throws away the latency; we can save a
+ // little here by just not calculating the latency.
+ memory.doFunctionalAccess(pkt);
+ }
+
+ pkt->popLabel();
+}
+
+SimpleMemory*
+SimpleMemoryParams::create()
+{
+ return new SimpleMemory(this);
+}
diff --git a/src/mem/simple_mem.hh b/src/mem/simple_mem.hh
new file mode 100644
index 000000000..b21b38fd8
--- /dev/null
+++ b/src/mem/simple_mem.hh
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2001-2005 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ron Dreslinski
+ * Andreas Hansson
+ */
+
+/**
+ * @file
+ * SimpleMemory declaration
+ */
+
+#ifndef __SIMPLE_MEMORY_HH__
+#define __SIMPLE_MEMORY_HH__
+
+#include "mem/abstract_mem.hh"
+#include "mem/tport.hh"
+#include "params/SimpleMemory.hh"
+
+/**
+ * The simple memory is a basic multi-ported memory with an infinite
+ * throughput and a fixed latency, potentially with a variance added
+ * to it. It uses a SimpleTimingPort to implement the timing accesses.
+ */
+class SimpleMemory : public AbstractMemory
+{
+
+ private:
+
+ class MemoryPort : public SimpleTimingPort
+ {
+ SimpleMemory& memory;
+
+ public:
+
+ MemoryPort(const std::string& _name, SimpleMemory& _memory);
+
+ protected:
+
+ virtual Tick recvAtomic(PacketPtr pkt);
+
+ virtual void recvFunctional(PacketPtr pkt);
+
+ virtual AddrRangeList getAddrRanges();
+
+ };
+
+ std::vector<MemoryPort*> ports;
+
+ Tick lat;
+ Tick lat_var;
+
+ public:
+
+ typedef SimpleMemoryParams Params;
+ SimpleMemory(const Params *p);
+ virtual ~SimpleMemory() { }
+
+ unsigned int drain(Event* de);
+
+ virtual SlavePort& getSlavePort(const std::string& if_name, int idx = -1);
+ virtual void init();
+
+ const Params *
+ params() const
+ {
+ return dynamic_cast<const Params *>(_params);
+ }
+
+ protected:
+
+ Tick doAtomicAccess(PacketPtr pkt);
+ void doFunctionalAccess(PacketPtr pkt);
+ virtual Tick calculateLatency(PacketPtr pkt);
+
+};
+
+#endif //__SIMPLE_MEMORY_HH__