summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/abstract_mem.cc4
-rw-r--r--src/mem/abstract_mem.hh8
-rw-r--r--src/mem/cache/base.cc22
-rw-r--r--src/mem/cache/blk.hh10
-rw-r--r--src/mem/cache/cache.cc26
-rw-r--r--src/mem/cache/mshr.cc3
-rw-r--r--src/mem/cache/noncoherent_cache.cc1
-rw-r--r--src/mem/cache/prefetch/queued.cc5
-rw-r--r--src/mem/packet.hh36
-rw-r--r--src/mem/page_table.cc2
-rw-r--r--src/mem/page_table.hh2
-rw-r--r--src/mem/port.cc6
-rw-r--r--src/mem/port_proxy.cc14
-rw-r--r--src/mem/request.hh6
-rw-r--r--src/mem/ruby/slicc_interface/AbstractController.cc11
-rw-r--r--src/mem/ruby/system/CacheRecorder.cc17
-rw-r--r--src/mem/ruby/system/GPUCoalescer.cc4
-rw-r--r--src/mem/ruby/system/GPUCoalescer.hh4
-rw-r--r--src/mem/ruby/system/RubyPort.cc8
-rw-r--r--src/mem/ruby/system/Sequencer.cc1
20 files changed, 88 insertions, 102 deletions
diff --git a/src/mem/abstract_mem.cc b/src/mem/abstract_mem.cc
index 1d112dc06..01817bbf9 100644
--- a/src/mem/abstract_mem.cc
+++ b/src/mem/abstract_mem.cc
@@ -199,7 +199,7 @@ AbstractMemory::getAddrRange() const
void
AbstractMemory::trackLoadLocked(PacketPtr pkt)
{
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
Addr paddr = LockedAddr::mask(req->getPaddr());
// first we check if we already have a locked addr for this
@@ -230,7 +230,7 @@ AbstractMemory::trackLoadLocked(PacketPtr pkt)
bool
AbstractMemory::checkLockedAddrList(PacketPtr pkt)
{
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
Addr paddr = LockedAddr::mask(req->getPaddr());
bool isLLSC = pkt->isLLSC();
diff --git a/src/mem/abstract_mem.hh b/src/mem/abstract_mem.hh
index 29c8c3f3e..4dd255f5f 100644
--- a/src/mem/abstract_mem.hh
+++ b/src/mem/abstract_mem.hh
@@ -79,13 +79,13 @@ class LockedAddr {
static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); }
// check for matching execution context
- bool matchesContext(RequestPtr req) const
+ bool matchesContext(const RequestPtr &req) const
{
return (contextId == req->contextId());
}
- LockedAddr(RequestPtr req) : addr(mask(req->getPaddr())),
- contextId(req->contextId())
+ LockedAddr(const RequestPtr &req) : addr(mask(req->getPaddr())),
+ contextId(req->contextId())
{}
// constructor for unserialization use
@@ -140,7 +140,7 @@ class AbstractMemory : public MemObject
// this method must be called on *all* stores since even
// non-conditional stores must clear any matching lock addresses.
bool writeOK(PacketPtr pkt) {
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
if (lockedAddrList.empty()) {
// no locked addrs: nothing to check, store_conditional fails
bool isLLSC = pkt->isLLSC();
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index f753cc315..a5ad07d6c 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -810,7 +810,6 @@ BaseCache::getNextQueueEntry()
return allocateMissBuffer(pkt, curTick(), false);
} else {
// free the request and packet
- delete pkt->req;
delete pkt;
}
}
@@ -1278,8 +1277,9 @@ BaseCache::writebackBlk(CacheBlk *blk)
writebacks[Request::wbMasterId]++;
- RequestPtr req = new Request(regenerateBlkAddr(blk), blkSize, 0,
- Request::wbMasterId);
+ RequestPtr req = std::make_shared<Request>(
+ regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+
if (blk->isSecure())
req->setFlags(Request::SECURE);
@@ -1313,8 +1313,9 @@ BaseCache::writebackBlk(CacheBlk *blk)
PacketPtr
BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id)
{
- RequestPtr req = new Request(regenerateBlkAddr(blk), blkSize, 0,
- Request::wbMasterId);
+ RequestPtr req = std::make_shared<Request>(
+ regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
+
if (blk->isSecure()) {
req->setFlags(Request::SECURE);
}
@@ -1373,14 +1374,15 @@ BaseCache::writebackVisitor(CacheBlk &blk)
if (blk.isDirty()) {
assert(blk.isValid());
- Request request(regenerateBlkAddr(&blk),
- blkSize, 0, Request::funcMasterId);
- request.taskId(blk.task_id);
+ RequestPtr request = std::make_shared<Request>(
+ regenerateBlkAddr(&blk), blkSize, 0, Request::funcMasterId);
+
+ request->taskId(blk.task_id);
if (blk.isSecure()) {
- request.setFlags(Request::SECURE);
+ request->setFlags(Request::SECURE);
}
- Packet packet(&request, MemCmd::WriteReq);
+ Packet packet(request, MemCmd::WriteReq);
packet.dataStatic(blk.data);
memSidePort.sendFunctional(&packet);
diff --git a/src/mem/cache/blk.hh b/src/mem/cache/blk.hh
index c4ec12ff3..93189bd97 100644
--- a/src/mem/cache/blk.hh
+++ b/src/mem/cache/blk.hh
@@ -136,7 +136,7 @@ class CacheBlk : public ReplaceableEntry
// check for matching execution context, and an address that
// is within the lock
- bool matches(const RequestPtr req) const
+ bool matches(const RequestPtr &req) const
{
Addr req_low = req->getPaddr();
Addr req_high = req_low + req->getSize() -1;
@@ -145,7 +145,7 @@ class CacheBlk : public ReplaceableEntry
}
// check if a request is intersecting and thus invalidating the lock
- bool intersects(const RequestPtr req) const
+ bool intersects(const RequestPtr &req) const
{
Addr req_low = req->getPaddr();
Addr req_high = req_low + req->getSize() - 1;
@@ -153,7 +153,7 @@ class CacheBlk : public ReplaceableEntry
return (req_low <= highAddr) && (req_high >= lowAddr);
}
- Lock(const RequestPtr req)
+ Lock(const RequestPtr &req)
: contextId(req->contextId()),
lowAddr(req->getPaddr()),
highAddr(lowAddr + req->getSize() - 1)
@@ -285,7 +285,7 @@ class CacheBlk : public ReplaceableEntry
* Clear the any load lock that intersect the request, and is from
* a different context.
*/
- void clearLoadLocks(RequestPtr req)
+ void clearLoadLocks(const RequestPtr &req)
{
auto l = lockList.begin();
while (l != lockList.end()) {
@@ -357,7 +357,7 @@ class CacheBlk : public ReplaceableEntry
if (!pkt->isLLSC() && lockList.empty())
return true;
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
if (pkt->isLLSC()) {
// it's a store conditional... have to check for matching
diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc
index 86c1640e5..ffd60811e 100644
--- a/src/mem/cache/cache.cc
+++ b/src/mem/cache/cache.cc
@@ -377,10 +377,10 @@ Cache::handleTimingReqMiss(PacketPtr pkt, CacheBlk *blk, Tick forward_time,
if (!mshr) {
// copy the request and create a new SoftPFReq packet
- RequestPtr req = new Request(pkt->req->getPaddr(),
- pkt->req->getSize(),
- pkt->req->getFlags(),
- pkt->req->masterId());
+ RequestPtr req = std::make_shared<Request>(pkt->req->getPaddr(),
+ pkt->req->getSize(),
+ pkt->req->getFlags(),
+ pkt->req->masterId());
pf = new Packet(req, pkt->cmd);
pf->allocate();
assert(pf->getAddr() == pkt->getAddr());
@@ -696,7 +696,6 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
// immediately with dummy data so the core would be able to
// retire it. This request completes right here, so we
// deallocate it.
- delete tgt_pkt->req;
delete tgt_pkt;
break; // skip response
}
@@ -803,7 +802,6 @@ Cache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt, CacheBlk *blk,
assert(tgt_pkt->cmd == MemCmd::HardPFReq);
if (blk)
blk->status |= BlkHWPrefetched;
- delete tgt_pkt->req;
delete tgt_pkt;
break;
@@ -871,11 +869,11 @@ Cache::cleanEvictBlk(CacheBlk *blk)
{
assert(!writebackClean);
assert(blk && blk->isValid() && !blk->isDirty());
+
// Creating a zero sized write, a message to the snoop filter
+ RequestPtr req = std::make_shared<Request>(
+ regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId);
- RequestPtr req =
- new Request(regenerateBlkAddr(blk), blkSize, 0,
- Request::wbMasterId);
if (blk->isSecure())
req->setFlags(Request::SECURE);
@@ -1138,15 +1136,6 @@ Cache::handleSnoop(PacketPtr pkt, CacheBlk *blk, bool is_timing,
if (!respond && is_deferred) {
assert(pkt->needsResponse());
-
- // if we copied the deferred packet with the intention to
- // respond, but are not responding, then a cache above us must
- // be, and we can use this as the indication of whether this
- // is a packet where we created a copy of the request or not
- if (!pkt->cacheResponding()) {
- delete pkt->req;
- }
-
delete pkt;
}
@@ -1396,7 +1385,6 @@ Cache::sendMSHRQueuePacket(MSHR* mshr)
}
// given that no response is expected, delete Request and Packet
- delete tgt_pkt->req;
delete tgt_pkt;
return false;
diff --git a/src/mem/cache/mshr.cc b/src/mem/cache/mshr.cc
index dc490790b..21ce8a36d 100644
--- a/src/mem/cache/mshr.cc
+++ b/src/mem/cache/mshr.cc
@@ -424,7 +424,8 @@ MSHR::handleSnoop(PacketPtr pkt, Counter _order)
// the packet and the request as part of handling the deferred
// snoop.
PacketPtr cp_pkt = will_respond ? new Packet(pkt, true, true) :
- new Packet(new Request(*pkt->req), pkt->cmd, blkSize, pkt->id);
+ new Packet(std::make_shared<Request>(*pkt->req), pkt->cmd,
+ blkSize, pkt->id);
if (will_respond) {
// we are the ordering point, and will consequently
diff --git a/src/mem/cache/noncoherent_cache.cc b/src/mem/cache/noncoherent_cache.cc
index bf75be4d6..50738375e 100644
--- a/src/mem/cache/noncoherent_cache.cc
+++ b/src/mem/cache/noncoherent_cache.cc
@@ -299,7 +299,6 @@ NoncoherentCache::serviceMSHRTargets(MSHR *mshr, const PacketPtr pkt,
// We have filled the block and the prefetcher does not
// require responses.
- delete tgt_pkt->req;
delete tgt_pkt;
break;
diff --git a/src/mem/cache/prefetch/queued.cc b/src/mem/cache/prefetch/queued.cc
index 00d62f17f..3c5647ae3 100644
--- a/src/mem/cache/prefetch/queued.cc
+++ b/src/mem/cache/prefetch/queued.cc
@@ -59,7 +59,6 @@ QueuedPrefetcher::~QueuedPrefetcher()
{
// Delete the queued prefetch packets
for (DeferredPacket &p : pfq) {
- delete p.pkt->req;
delete p.pkt;
}
}
@@ -78,7 +77,6 @@ QueuedPrefetcher::notify(const PacketPtr &pkt)
while (itr != pfq.end()) {
if (itr->pkt->getAddr() == blk_addr &&
itr->pkt->isSecure() == is_secure) {
- delete itr->pkt->req;
delete itr->pkt;
itr = pfq.erase(itr);
} else {
@@ -224,7 +222,7 @@ QueuedPrefetcher::insert(AddrPriority &pf_info, bool is_secure)
/* Create a prefetch memory request */
RequestPtr pf_req =
- new Request(pf_info.first, blkSize, 0, masterId);
+ std::make_shared<Request>(pf_info.first, blkSize, 0, masterId);
if (is_secure) {
pf_req->setFlags(Request::SECURE);
@@ -255,7 +253,6 @@ QueuedPrefetcher::insert(AddrPriority &pf_info, bool is_secure)
}
DPRINTF(HWPrefetch, "Prefetch queue full, removing lowest priority "
"oldest packet, addr: %#x", it->pkt->getAddr());
- delete it->pkt->req;
delete it->pkt;
pfq.erase(it);
}
diff --git a/src/mem/packet.hh b/src/mem/packet.hh
index a4eeabe29..5bc466bf1 100644
--- a/src/mem/packet.hh
+++ b/src/mem/packet.hh
@@ -320,7 +320,7 @@ class Packet : public Printable
const PacketId id;
/// A pointer to the original request.
- const RequestPtr req;
+ RequestPtr req;
private:
/**
@@ -745,9 +745,9 @@ class Packet : public Printable
* first, but the Requests's physical address and size fields need
* not be valid. The command must be supplied.
*/
- Packet(const RequestPtr _req, MemCmd _cmd)
- : cmd(_cmd), id((PacketId)_req), req(_req), data(nullptr), addr(0),
- _isSecure(false), size(0), headerDelay(0), snoopDelay(0),
+ Packet(const RequestPtr &_req, MemCmd _cmd)
+ : cmd(_cmd), id((PacketId)_req.get()), req(_req), data(nullptr),
+ addr(0), _isSecure(false), size(0), headerDelay(0), snoopDelay(0),
payloadDelay(0), senderState(NULL)
{
if (req->hasPaddr()) {
@@ -766,10 +766,10 @@ class Packet : public Printable
* a request that is for a whole block, not the address from the
* req. this allows for overriding the size/addr of the req.
*/
- Packet(const RequestPtr _req, MemCmd _cmd, int _blkSize, PacketId _id = 0)
- : cmd(_cmd), id(_id ? _id : (PacketId)_req), req(_req), data(nullptr),
- addr(0), _isSecure(false), headerDelay(0), snoopDelay(0),
- payloadDelay(0), senderState(NULL)
+ Packet(const RequestPtr &_req, MemCmd _cmd, int _blkSize, PacketId _id = 0)
+ : cmd(_cmd), id(_id ? _id : (PacketId)_req.get()), req(_req),
+ data(nullptr), addr(0), _isSecure(false), headerDelay(0),
+ snoopDelay(0), payloadDelay(0), senderState(NULL)
{
if (req->hasPaddr()) {
addr = req->getPaddr() & ~(_blkSize - 1);
@@ -823,7 +823,7 @@ class Packet : public Printable
* Generate the appropriate read MemCmd based on the Request flags.
*/
static MemCmd
- makeReadCmd(const RequestPtr req)
+ makeReadCmd(const RequestPtr &req)
{
if (req->isLLSC())
return MemCmd::LoadLockedReq;
@@ -837,7 +837,7 @@ class Packet : public Printable
* Generate the appropriate write MemCmd based on the Request flags.
*/
static MemCmd
- makeWriteCmd(const RequestPtr req)
+ makeWriteCmd(const RequestPtr &req)
{
if (req->isLLSC())
return MemCmd::StoreCondReq;
@@ -857,13 +857,13 @@ class Packet : public Printable
* Fine-tune the MemCmd type if it's not a vanilla read or write.
*/
static PacketPtr
- createRead(const RequestPtr req)
+ createRead(const RequestPtr &req)
{
return new Packet(req, makeReadCmd(req));
}
static PacketPtr
- createWrite(const RequestPtr req)
+ createWrite(const RequestPtr &req)
{
return new Packet(req, makeWriteCmd(req));
}
@@ -873,18 +873,6 @@ class Packet : public Printable
*/
~Packet()
{
- // Delete the request object if this is a request packet which
- // does not need a response, because the requester will not get
- // a chance. If the request packet needs a response then the
- // request will be deleted on receipt of the response
- // packet. We also make sure to never delete the request for
- // express snoops, even for cases when responses are not
- // needed (CleanEvict and Writeback), since the snoop packet
- // re-uses the same request.
- if (req && isRequest() && !needsResponse() &&
- !isExpressSnoop()) {
- delete req;
- }
deleteData();
}
diff --git a/src/mem/page_table.cc b/src/mem/page_table.cc
index 8abeb2984..35f14798d 100644
--- a/src/mem/page_table.cc
+++ b/src/mem/page_table.cc
@@ -154,7 +154,7 @@ EmulationPageTable::translate(Addr vaddr, Addr &paddr)
}
Fault
-EmulationPageTable::translate(RequestPtr req)
+EmulationPageTable::translate(const RequestPtr &req)
{
Addr paddr;
assert(pageAlign(req->getVaddr() + req->getSize() - 1) ==
diff --git a/src/mem/page_table.hh b/src/mem/page_table.hh
index fc0c0923e..447d3a50f 100644
--- a/src/mem/page_table.hh
+++ b/src/mem/page_table.hh
@@ -151,7 +151,7 @@ class EmulationPageTable : public Serializable
* field of req.
* @param req The memory request.
*/
- Fault translate(RequestPtr req);
+ Fault translate(const RequestPtr &req);
void getMappings(std::vector<std::pair<Addr, Addr>> *addr_mappings);
diff --git a/src/mem/port.cc b/src/mem/port.cc
index 756eb8bdd..47f56e633 100644
--- a/src/mem/port.cc
+++ b/src/mem/port.cc
@@ -206,8 +206,10 @@ MasterPort::sendRetryResp()
void
MasterPort::printAddr(Addr a)
{
- Request req(a, 1, 0, Request::funcMasterId);
- Packet pkt(&req, MemCmd::PrintReq);
+ auto req = std::make_shared<Request>(
+ a, 1, 0, Request::funcMasterId);
+
+ Packet pkt(req, MemCmd::PrintReq);
Packet::PrintReqState prs(std::cerr);
pkt.senderState = &prs;
diff --git a/src/mem/port_proxy.cc b/src/mem/port_proxy.cc
index d454ef78d..a36e66a99 100644
--- a/src/mem/port_proxy.cc
+++ b/src/mem/port_proxy.cc
@@ -47,8 +47,11 @@ PortProxy::readBlobPhys(Addr addr, Request::Flags flags,
{
for (ChunkGenerator gen(addr, size, _cacheLineSize); !gen.done();
gen.next()) {
- Request req(gen.addr(), gen.size(), flags, Request::funcMasterId);
- Packet pkt(&req, MemCmd::ReadReq);
+
+ auto req = std::make_shared<Request>(
+ gen.addr(), gen.size(), flags, Request::funcMasterId);
+
+ Packet pkt(req, MemCmd::ReadReq);
pkt.dataStatic(p);
_port.sendFunctional(&pkt);
p += gen.size();
@@ -61,8 +64,11 @@ PortProxy::writeBlobPhys(Addr addr, Request::Flags flags,
{
for (ChunkGenerator gen(addr, size, _cacheLineSize); !gen.done();
gen.next()) {
- Request req(gen.addr(), gen.size(), flags, Request::funcMasterId);
- Packet pkt(&req, MemCmd::WriteReq);
+
+ auto req = std::make_shared<Request>(
+ gen.addr(), gen.size(), flags, Request::funcMasterId);
+
+ Packet pkt(req, MemCmd::WriteReq);
pkt.dataStaticConst(p);
_port.sendFunctional(&pkt);
p += gen.size();
diff --git a/src/mem/request.hh b/src/mem/request.hh
index 5cb08ca39..1615a644a 100644
--- a/src/mem/request.hh
+++ b/src/mem/request.hh
@@ -81,7 +81,7 @@ namespace ContextSwitchTaskId {
class Request;
-typedef Request* RequestPtr;
+typedef std::shared_ptr<Request> RequestPtr;
typedef uint16_t MasterID;
class Request
@@ -515,8 +515,8 @@ class Request
assert(privateFlags.isSet(VALID_VADDR));
assert(privateFlags.noneSet(VALID_PADDR));
assert(split_addr > _vaddr && split_addr < _vaddr + _size);
- req1 = new Request(*this);
- req2 = new Request(*this);
+ req1 = std::make_shared<Request>(*this);
+ req2 = std::make_shared<Request>(*this);
req1->_size = split_addr - _vaddr;
req2->_vaddr = split_addr;
req2->_size = _size - req1->_size;
diff --git a/src/mem/ruby/slicc_interface/AbstractController.cc b/src/mem/ruby/slicc_interface/AbstractController.cc
index de5e81057..5f7eb6558 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.cc
+++ b/src/mem/ruby/slicc_interface/AbstractController.cc
@@ -240,8 +240,8 @@ void
AbstractController::queueMemoryRead(const MachineID &id, Addr addr,
Cycles latency)
{
- RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
- m_masterId);
+ RequestPtr req = std::make_shared<Request>(
+ addr, RubySystem::getBlockSizeBytes(), 0, m_masterId);
PacketPtr pkt = Packet::createRead(req);
uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
@@ -264,8 +264,8 @@ void
AbstractController::queueMemoryWrite(const MachineID &id, Addr addr,
Cycles latency, const DataBlock &block)
{
- RequestPtr req = new Request(addr, RubySystem::getBlockSizeBytes(), 0,
- m_masterId);
+ RequestPtr req = std::make_shared<Request>(
+ addr, RubySystem::getBlockSizeBytes(), 0, m_masterId);
PacketPtr pkt = Packet::createWrite(req);
uint8_t *newData = new uint8_t[RubySystem::getBlockSizeBytes()];
@@ -292,7 +292,7 @@ AbstractController::queueMemoryWritePartial(const MachineID &id, Addr addr,
Cycles latency,
const DataBlock &block, int size)
{
- RequestPtr req = new Request(addr, size, 0, m_masterId);
+ RequestPtr req = std::make_shared<Request>(addr, size, 0, m_masterId);
PacketPtr pkt = Packet::createWrite(req);
uint8_t *newData = new uint8_t[size];
@@ -356,7 +356,6 @@ AbstractController::recvTimingResp(PacketPtr pkt)
}
getMemoryQueue()->enqueue(msg, clockEdge(), cyclesToTicks(Cycles(1)));
- delete pkt->req;
delete pkt;
}
diff --git a/src/mem/ruby/system/CacheRecorder.cc b/src/mem/ruby/system/CacheRecorder.cc
index 83e8a70dd..1fc7bb8ef 100644
--- a/src/mem/ruby/system/CacheRecorder.cc
+++ b/src/mem/ruby/system/CacheRecorder.cc
@@ -85,9 +85,9 @@ CacheRecorder::enqueueNextFlushRequest()
if (m_records_flushed < m_records.size()) {
TraceRecord* rec = m_records[m_records_flushed];
m_records_flushed++;
- Request* req = new Request(rec->m_data_address,
- m_block_size_bytes, 0,
- Request::funcMasterId);
+ auto req = std::make_shared<Request>(rec->m_data_address,
+ m_block_size_bytes, 0,
+ Request::funcMasterId);
MemCmd::Command requestType = MemCmd::FlushReq;
Packet *pkt = new Packet(req, requestType);
@@ -112,21 +112,24 @@ CacheRecorder::enqueueNextFetchRequest()
for (int rec_bytes_read = 0; rec_bytes_read < m_block_size_bytes;
rec_bytes_read += RubySystem::getBlockSizeBytes()) {
- Request* req = nullptr;
+ RequestPtr req;
MemCmd::Command requestType;
if (traceRecord->m_type == RubyRequestType_LD) {
requestType = MemCmd::ReadReq;
- req = new Request(traceRecord->m_data_address + rec_bytes_read,
+ req = std::make_shared<Request>(
+ traceRecord->m_data_address + rec_bytes_read,
RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
} else if (traceRecord->m_type == RubyRequestType_IFETCH) {
requestType = MemCmd::ReadReq;
- req = new Request(traceRecord->m_data_address + rec_bytes_read,
+ req = std::make_shared<Request>(
+ traceRecord->m_data_address + rec_bytes_read,
RubySystem::getBlockSizeBytes(),
Request::INST_FETCH, Request::funcMasterId);
} else {
requestType = MemCmd::WriteReq;
- req = new Request(traceRecord->m_data_address + rec_bytes_read,
+ req = std::make_shared<Request>(
+ traceRecord->m_data_address + rec_bytes_read,
RubySystem::getBlockSizeBytes(), 0, Request::funcMasterId);
}
diff --git a/src/mem/ruby/system/GPUCoalescer.cc b/src/mem/ruby/system/GPUCoalescer.cc
index ef726be3d..e17bf63fd 100644
--- a/src/mem/ruby/system/GPUCoalescer.cc
+++ b/src/mem/ruby/system/GPUCoalescer.cc
@@ -70,7 +70,7 @@ RubyGPUCoalescerParams::create()
}
HSAScope
-reqScopeToHSAScope(Request* req)
+reqScopeToHSAScope(const RequestPtr &req)
{
HSAScope accessScope = HSAScope_UNSPECIFIED;
if (req->isScoped()) {
@@ -90,7 +90,7 @@ reqScopeToHSAScope(Request* req)
}
HSASegment
-reqSegmentToHSASegment(Request* req)
+reqSegmentToHSASegment(const RequestPtr &req)
{
HSASegment accessSegment = HSASegment_GLOBAL;
diff --git a/src/mem/ruby/system/GPUCoalescer.hh b/src/mem/ruby/system/GPUCoalescer.hh
index cf2005046..6576ecb36 100644
--- a/src/mem/ruby/system/GPUCoalescer.hh
+++ b/src/mem/ruby/system/GPUCoalescer.hh
@@ -58,8 +58,8 @@ class CacheMemory;
class RubyGPUCoalescerParams;
-HSAScope reqScopeToHSAScope(Request* req);
-HSASegment reqSegmentToHSASegment(Request* req);
+HSAScope reqScopeToHSAScope(const RequestPtr &req);
+HSASegment reqSegmentToHSASegment(const RequestPtr &req);
struct GPUCoalescerRequest
{
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
index 02d23790a..84a70c0f1 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -608,11 +608,13 @@ RubyPort::ruby_eviction_callback(Addr address)
// Allocate the invalidate request and packet on the stack, as it is
// assumed they will not be modified or deleted by receivers.
// TODO: should this really be using funcMasterId?
- Request request(address, RubySystem::getBlockSizeBytes(), 0,
- Request::funcMasterId);
+ auto request = std::make_shared<Request>(
+ address, RubySystem::getBlockSizeBytes(), 0,
+ Request::funcMasterId);
+
// Use a single packet to signal all snooping ports of the invalidation.
// This assumes that snooping ports do NOT modify the packet/request
- Packet pkt(&request, MemCmd::InvalidateReq);
+ Packet pkt(request, MemCmd::InvalidateReq);
for (CpuPortIter p = slave_ports.begin(); p != slave_ports.end(); ++p) {
// check if the connected master port is snooping
if ((*p)->isSnooping()) {
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 4037fb8f1..f30369710 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -516,7 +516,6 @@ Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data,
RubySystem *rs = m_ruby_system;
if (RubySystem::getWarmupEnabled()) {
assert(pkt->req);
- delete pkt->req;
delete pkt;
rs->m_cache_recorder->enqueueNextFetchRequest();
} else if (RubySystem::getCooldownEnabled()) {