summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorGiacomo Travaglini <giacomo.travaglini@arm.com>2018-06-04 09:40:19 +0100
committerGiacomo Travaglini <giacomo.travaglini@arm.com>2018-06-11 16:55:30 +0000
commitf54020eb8155371725ab75b0fc5c419287eca084 (patch)
tree65d379f7603e689e083e9a58ff4c2e90abd19fbf /src/cpu
parent2113b21996d086dab32b9fd388efe3df241bfbd2 (diff)
downloadgem5-f54020eb8155371725ab75b0fc5c419287eca084.tar.xz
misc: Using smart pointers for memory Requests
This patch is changing the underlying type for RequestPtr from Request* to shared_ptr<Request>. Having memory requests being managed by smart pointers will simplify the code; it will also prevent memory leakage and dangling pointers. Change-Id: I7749af38a11ac8eb4d53d8df1252951e0890fde3 Signed-off-by: Giacomo Travaglini <giacomo.travaglini@arm.com> Reviewed-by: Andreas Sandberg <andreas.sandberg@arm.com> Reviewed-on: https://gem5-review.googlesource.com/10996 Reviewed-by: Nikos Nikoleris <nikos.nikoleris@arm.com> Maintainer: Nikos Nikoleris <nikos.nikoleris@arm.com>
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/base.cc8
-rw-r--r--src/cpu/base_dyn_inst.hh34
-rw-r--r--src/cpu/base_dyn_inst_impl.hh3
-rw-r--r--src/cpu/checker/cpu.cc49
-rw-r--r--src/cpu/checker/cpu.hh5
-rw-r--r--src/cpu/checker/cpu_impl.hh22
-rw-r--r--src/cpu/kvm/base.cc8
-rw-r--r--src/cpu/kvm/x86_cpu.cc6
-rw-r--r--src/cpu/minor/fetch1.cc27
-rw-r--r--src/cpu/minor/fetch1.hh8
-rw-r--r--src/cpu/minor/lsq.cc92
-rw-r--r--src/cpu/minor/lsq.hh10
-rw-r--r--src/cpu/o3/cpu.hh6
-rw-r--r--src/cpu/o3/fetch.hh6
-rw-r--r--src/cpu/o3/fetch_impl.hh17
-rw-r--r--src/cpu/o3/lsq.hh12
-rw-r--r--src/cpu/o3/lsq_impl.hh1
-rw-r--r--src/cpu/o3/lsq_unit.hh49
-rw-r--r--src/cpu/o3/lsq_unit_impl.hh22
-rw-r--r--src/cpu/simple/atomic.cc27
-rw-r--r--src/cpu/simple/atomic.hh6
-rw-r--r--src/cpu/simple/base.cc2
-rw-r--r--src/cpu/simple/base.hh2
-rw-r--r--src/cpu/simple/timing.cc32
-rw-r--r--src/cpu/simple/timing.hh16
-rw-r--r--src/cpu/testers/directedtest/InvalidateGenerator.cc3
-rw-r--r--src/cpu/testers/directedtest/RubyDirectedTester.cc1
-rw-r--r--src/cpu/testers/directedtest/SeriesRequestGenerator.cc3
-rw-r--r--src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc12
-rw-r--r--src/cpu/testers/memtest/memtest.cc6
-rw-r--r--src/cpu/testers/rubytest/Check.cc13
-rw-r--r--src/cpu/testers/rubytest/RubyTester.cc1
-rw-r--r--src/cpu/testers/traffic_gen/base_gen.cc2
-rw-r--r--src/cpu/testers/traffic_gen/traffic_gen.cc2
-rw-r--r--src/cpu/trace/trace_cpu.cc16
-rw-r--r--src/cpu/translation.hh18
36 files changed, 241 insertions, 306 deletions
diff --git a/src/cpu/base.cc b/src/cpu/base.cc
index c576f1def..1a497db9a 100644
--- a/src/cpu/base.cc
+++ b/src/cpu/base.cc
@@ -318,7 +318,7 @@ BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb)
assert(tid < numThreads);
AddressMonitor &monitor = addressMonitor[tid];
- Request req;
+ RequestPtr req;
Addr addr = monitor.vAddr;
int block_size = cacheLineSize();
uint64_t mask = ~((uint64_t)(block_size - 1));
@@ -330,13 +330,13 @@ BaseCPU::mwaitAtomic(ThreadID tid, ThreadContext *tc, BaseTLB *dtb)
if (secondAddr > addr)
size = secondAddr - addr;
- req.setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
+ req->setVirt(0, addr, size, 0x0, dataMasterId(), tc->instAddr());
// translate to physical address
- Fault fault = dtb->translateAtomic(&req, tc, BaseTLB::Read);
+ Fault fault = dtb->translateAtomic(req, tc, BaseTLB::Read);
assert(fault == NoFault);
- monitor.pAddr = req.getPaddr() & mask;
+ monitor.pAddr = req->getPaddr() & mask;
monitor.waiting = true;
DPRINTF(Mwait,"[tid:%d] mwait called (vAddr=0x%lx, line's paddr=0x%lx)\n",
diff --git a/src/cpu/base_dyn_inst.hh b/src/cpu/base_dyn_inst.hh
index e94f500ea..2c08a3c67 100644
--- a/src/cpu/base_dyn_inst.hh
+++ b/src/cpu/base_dyn_inst.hh
@@ -304,12 +304,12 @@ class BaseDynInst : public ExecContext, public RefCounted
Request::Flags flags, uint64_t *res);
/** Splits a request in two if it crosses a dcache block. */
- void splitRequest(RequestPtr req, RequestPtr &sreqLow,
+ void splitRequest(const RequestPtr &req, RequestPtr &sreqLow,
RequestPtr &sreqHigh);
/** Initiate a DTB address translation. */
- void initiateTranslation(RequestPtr req, RequestPtr sreqLow,
- RequestPtr sreqHigh, uint64_t *res,
+ void initiateTranslation(const RequestPtr &req, const RequestPtr &sreqLow,
+ const RequestPtr &sreqHigh, uint64_t *res,
BaseTLB::Mode mode);
/** Finish a DTB address translation. */
@@ -902,8 +902,9 @@ BaseDynInst<Impl>::initiateMemRead(Addr addr, unsigned size,
sreqLow = savedSreqLow;
sreqHigh = savedSreqHigh;
} else {
- req = new Request(asid, addr, size, flags, masterId(), this->pc.instAddr(),
- thread->contextId());
+ req = std::make_shared<Request>(
+ asid, addr, size, flags, masterId(),
+ this->pc.instAddr(), thread->contextId());
req->taskId(cpu->taskId());
@@ -921,10 +922,7 @@ BaseDynInst<Impl>::initiateMemRead(Addr addr, unsigned size,
instFlags[EffAddrValid] = true;
if (cpu->checker) {
- if (reqToVerify != NULL) {
- delete reqToVerify;
- }
- reqToVerify = new Request(*req);
+ reqToVerify = std::make_shared<Request>(*req);
}
fault = cpu->read(req, sreqLow, sreqHigh, lqIdx);
} else {
@@ -958,8 +956,9 @@ BaseDynInst<Impl>::writeMem(uint8_t *data, unsigned size, Addr addr,
sreqLow = savedSreqLow;
sreqHigh = savedSreqHigh;
} else {
- req = new Request(asid, addr, size, flags, masterId(), this->pc.instAddr(),
- thread->contextId());
+ req = std::make_shared<Request>(
+ asid, addr, size, flags, masterId(),
+ this->pc.instAddr(), thread->contextId());
req->taskId(cpu->taskId());
@@ -976,10 +975,7 @@ BaseDynInst<Impl>::writeMem(uint8_t *data, unsigned size, Addr addr,
instFlags[EffAddrValid] = true;
if (cpu->checker) {
- if (reqToVerify != NULL) {
- delete reqToVerify;
- }
- reqToVerify = new Request(*req);
+ reqToVerify = std::make_shared<Request>(*req);
}
fault = cpu->write(req, sreqLow, sreqHigh, data, sqIdx);
}
@@ -989,7 +985,7 @@ BaseDynInst<Impl>::writeMem(uint8_t *data, unsigned size, Addr addr,
template<class Impl>
inline void
-BaseDynInst<Impl>::splitRequest(RequestPtr req, RequestPtr &sreqLow,
+BaseDynInst<Impl>::splitRequest(const RequestPtr &req, RequestPtr &sreqLow,
RequestPtr &sreqHigh)
{
// Check to see if the request crosses the next level block boundary.
@@ -1006,8 +1002,10 @@ BaseDynInst<Impl>::splitRequest(RequestPtr req, RequestPtr &sreqLow,
template<class Impl>
inline void
-BaseDynInst<Impl>::initiateTranslation(RequestPtr req, RequestPtr sreqLow,
- RequestPtr sreqHigh, uint64_t *res,
+BaseDynInst<Impl>::initiateTranslation(const RequestPtr &req,
+ const RequestPtr &sreqLow,
+ const RequestPtr &sreqHigh,
+ uint64_t *res,
BaseTLB::Mode mode)
{
translationStarted(true);
diff --git a/src/cpu/base_dyn_inst_impl.hh b/src/cpu/base_dyn_inst_impl.hh
index f55bd8ed5..b499fe4e6 100644
--- a/src/cpu/base_dyn_inst_impl.hh
+++ b/src/cpu/base_dyn_inst_impl.hh
@@ -131,7 +131,6 @@ BaseDynInst<Impl>::initVars()
cpu->snList.insert(seqNum);
#endif
- reqToVerify = NULL;
}
template <class Impl>
@@ -158,8 +157,6 @@ BaseDynInst<Impl>::~BaseDynInst()
cpu->snList.erase(seqNum);
#endif
- if (reqToVerify)
- delete reqToVerify;
}
#ifdef DEBUG
diff --git a/src/cpu/checker/cpu.cc b/src/cpu/checker/cpu.cc
index 1533d7405..8329e3191 100644
--- a/src/cpu/checker/cpu.cc
+++ b/src/cpu/checker/cpu.cc
@@ -69,7 +69,6 @@ CheckerCPU::CheckerCPU(Params *p)
: BaseCPU(p, true), systemPtr(NULL), icachePort(NULL), dcachePort(NULL),
tc(NULL), thread(NULL)
{
- memReq = NULL;
curStaticInst = NULL;
curMacroStaticInst = NULL;
@@ -156,27 +155,28 @@ CheckerCPU::readMem(Addr addr, uint8_t *data, unsigned size,
// Need to account for multiple accesses like the Atomic and TimingSimple
while (1) {
- memReq = new Request(0, addr, size, flags, masterId,
- thread->pcState().instAddr(), tc->contextId());
+ auto mem_req = std::make_shared<Request>(
+ 0, addr, size, flags, masterId,
+ thread->pcState().instAddr(), tc->contextId());
// translate to physical address
- fault = dtb->translateFunctional(memReq, tc, BaseTLB::Read);
+ fault = dtb->translateFunctional(mem_req, tc, BaseTLB::Read);
if (!checked_flags && fault == NoFault && unverifiedReq) {
- flags_match = checkFlags(unverifiedReq, memReq->getVaddr(),
- memReq->getPaddr(), memReq->getFlags());
- pAddr = memReq->getPaddr();
+ flags_match = checkFlags(unverifiedReq, mem_req->getVaddr(),
+ mem_req->getPaddr(), mem_req->getFlags());
+ pAddr = mem_req->getPaddr();
checked_flags = true;
}
// Now do the access
if (fault == NoFault &&
- !memReq->getFlags().isSet(Request::NO_ACCESS)) {
- PacketPtr pkt = Packet::createRead(memReq);
+ !mem_req->getFlags().isSet(Request::NO_ACCESS)) {
+ PacketPtr pkt = Packet::createRead(mem_req);
pkt->dataStatic(data);
- if (!(memReq->isUncacheable() || memReq->isMmappedIpr())) {
+ if (!(mem_req->isUncacheable() || mem_req->isMmappedIpr())) {
// Access memory to see if we have the same data
dcachePort->sendFunctional(pkt);
} else {
@@ -184,24 +184,16 @@ CheckerCPU::readMem(Addr addr, uint8_t *data, unsigned size,
memcpy(data, unverifiedMemData, size);
}
- delete memReq;
- memReq = NULL;
delete pkt;
}
if (fault != NoFault) {
- if (memReq->isPrefetch()) {
+ if (mem_req->isPrefetch()) {
fault = NoFault;
}
- delete memReq;
- memReq = NULL;
break;
}
- if (memReq != NULL) {
- delete memReq;
- }
-
//If we don't need to access a second cache line, stop now.
if (secondAddr <= addr)
{
@@ -244,16 +236,17 @@ CheckerCPU::writeMem(uint8_t *data, unsigned size,
// Need to account for a multiple access like Atomic and Timing CPUs
while (1) {
- memReq = new Request(0, addr, size, flags, masterId,
- thread->pcState().instAddr(), tc->contextId());
+ auto mem_req = std::make_shared<Request>(
+ 0, addr, size, flags, masterId,
+ thread->pcState().instAddr(), tc->contextId());
// translate to physical address
- fault = dtb->translateFunctional(memReq, tc, BaseTLB::Write);
+ fault = dtb->translateFunctional(mem_req, tc, BaseTLB::Write);
if (!checked_flags && fault == NoFault && unverifiedReq) {
- flags_match = checkFlags(unverifiedReq, memReq->getVaddr(),
- memReq->getPaddr(), memReq->getFlags());
- pAddr = memReq->getPaddr();
+ flags_match = checkFlags(unverifiedReq, mem_req->getVaddr(),
+ mem_req->getPaddr(), mem_req->getFlags());
+ pAddr = mem_req->getPaddr();
checked_flags = true;
}
@@ -264,9 +257,7 @@ CheckerCPU::writeMem(uint8_t *data, unsigned size,
* enabled. This is left as future work for the Checker: LSQ snooping
* and memory validation after stores have committed.
*/
- bool was_prefetch = memReq->isPrefetch();
-
- delete memReq;
+ bool was_prefetch = mem_req->isPrefetch();
//If we don't need to access a second cache line, stop now.
if (fault != NoFault || secondAddr <= addr)
@@ -337,7 +328,7 @@ CheckerCPU::dbg_vtophys(Addr addr)
* Checks if the flags set by the Checker and Checkee match.
*/
bool
-CheckerCPU::checkFlags(RequestPtr unverified_req, Addr vAddr,
+CheckerCPU::checkFlags(const RequestPtr &unverified_req, Addr vAddr,
Addr pAddr, int flags)
{
Addr unverifiedVAddr = unverified_req->getVaddr();
diff --git a/src/cpu/checker/cpu.hh b/src/cpu/checker/cpu.hh
index 101a16be6..bee72253e 100644
--- a/src/cpu/checker/cpu.hh
+++ b/src/cpu/checker/cpu.hh
@@ -144,9 +144,6 @@ class CheckerCPU : public BaseCPU, public ExecContext
// keep them all in a std::queue
std::queue<InstResult> result;
- // Pointer to the one memory request.
- RequestPtr memReq;
-
StaticInstPtr curStaticInst;
StaticInstPtr curMacroStaticInst;
@@ -531,7 +528,7 @@ class CheckerCPU : public BaseCPU, public ExecContext
dumpAndExit();
}
- bool checkFlags(RequestPtr unverified_req, Addr vAddr,
+ bool checkFlags(const RequestPtr &unverified_req, Addr vAddr,
Addr pAddr, int flags);
void dumpAndExit();
diff --git a/src/cpu/checker/cpu_impl.hh b/src/cpu/checker/cpu_impl.hh
index d81858c14..57282cd13 100644
--- a/src/cpu/checker/cpu_impl.hh
+++ b/src/cpu/checker/cpu_impl.hh
@@ -244,16 +244,17 @@ Checker<Impl>::verify(DynInstPtr &completed_inst)
// If not in the middle of a macro instruction
if (!curMacroStaticInst) {
// set up memory request for instruction fetch
- memReq = new Request(unverifiedInst->threadNumber, fetch_PC,
- sizeof(MachInst),
- 0,
- masterId,
- fetch_PC, thread->contextId());
- memReq->setVirt(0, fetch_PC, sizeof(MachInst),
- Request::INST_FETCH, masterId, thread->instAddr());
+ auto mem_req = std::make_shared<Request>(
+ unverifiedInst->threadNumber, fetch_PC,
+ sizeof(MachInst), 0, masterId, fetch_PC,
+ thread->contextId());
+ mem_req->setVirt(0, fetch_PC, sizeof(MachInst),
+ Request::INST_FETCH, masterId,
+ thread->instAddr());
- fault = itb->translateFunctional(memReq, tc, BaseTLB::Execute);
+ fault = itb->translateFunctional(
+ mem_req, tc, BaseTLB::Execute);
if (fault != NoFault) {
if (unverifiedInst->getFault() == NoFault) {
@@ -270,7 +271,6 @@ Checker<Impl>::verify(DynInstPtr &completed_inst)
advancePC(NoFault);
// Give up on an ITB fault..
- delete memReq;
unverifiedInst = NULL;
return;
} else {
@@ -278,17 +278,15 @@ Checker<Impl>::verify(DynInstPtr &completed_inst)
// the fault and see if our results match the CPU on
// the next tick().
fault = unverifiedInst->getFault();
- delete memReq;
break;
}
} else {
- PacketPtr pkt = new Packet(memReq, MemCmd::ReadReq);
+ PacketPtr pkt = new Packet(mem_req, MemCmd::ReadReq);
pkt->dataStatic(&machInst);
icachePort->sendFunctional(pkt);
machInst = gtoh(machInst);
- delete memReq;
delete pkt;
}
}
diff --git a/src/cpu/kvm/base.cc b/src/cpu/kvm/base.cc
index 3df0fddda..77cf277a6 100644
--- a/src/cpu/kvm/base.cc
+++ b/src/cpu/kvm/base.cc
@@ -181,7 +181,6 @@ BaseKvmCPU::KVMCpuPort::submitIO(PacketPtr pkt)
{
if (cpu->system->isAtomicMode()) {
Tick delay = sendAtomic(pkt);
- delete pkt->req;
delete pkt;
return delay;
} else {
@@ -200,7 +199,6 @@ BaseKvmCPU::KVMCpuPort::recvTimingResp(PacketPtr pkt)
{
DPRINTF(KvmIO, "KVM: Finished timing request\n");
- delete pkt->req;
delete pkt;
activeMMIOReqs--;
@@ -1119,8 +1117,9 @@ BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write)
ThreadContext *tc(thread->getTC());
syncThreadContext();
- RequestPtr mmio_req = new Request(paddr, size, Request::UNCACHEABLE,
- dataMasterId());
+ RequestPtr mmio_req = std::make_shared<Request>(
+ paddr, size, Request::UNCACHEABLE, dataMasterId());
+
mmio_req->setContext(tc->contextId());
// Some architectures do need to massage physical addresses a bit
// before they are inserted into the memory system. This enables
@@ -1144,7 +1143,6 @@ BaseKvmCPU::doMMIOAccess(Addr paddr, void *data, int size, bool write)
TheISA::handleIprWrite(tc, pkt) :
TheISA::handleIprRead(tc, pkt));
threadContextDirty = true;
- delete pkt->req;
delete pkt;
return clockPeriod() * ipr_delay;
} else {
diff --git a/src/cpu/kvm/x86_cpu.cc b/src/cpu/kvm/x86_cpu.cc
index 1a23b6717..012cccd20 100644
--- a/src/cpu/kvm/x86_cpu.cc
+++ b/src/cpu/kvm/x86_cpu.cc
@@ -1354,8 +1354,10 @@ X86KvmCPU::handleKvmExitIO()
// prevent races in multi-core mode.
EventQueue::ScopedMigration migrate(deviceEventQueue());
for (int i = 0; i < count; ++i) {
- RequestPtr io_req = new Request(pAddr, kvm_run.io.size,
- Request::UNCACHEABLE, dataMasterId());
+ RequestPtr io_req = std::make_shared<Request>(
+ pAddr, kvm_run.io.size,
+ Request::UNCACHEABLE, dataMasterId());
+
io_req->setContext(tc->contextId());
PacketPtr pkt = new Packet(io_req, cmd);
diff --git a/src/cpu/minor/fetch1.cc b/src/cpu/minor/fetch1.cc
index 0620fee1e..465372a08 100644
--- a/src/cpu/minor/fetch1.cc
+++ b/src/cpu/minor/fetch1.cc
@@ -168,8 +168,8 @@ Fetch1::fetchLine(ThreadID tid)
"%s addr: 0x%x pc: %s line_offset: %d request_size: %d\n",
request_id, aligned_pc, thread.pc, line_offset, request_size);
- request->request.setContext(cpu.threads[tid]->getTC()->contextId());
- request->request.setVirt(0 /* asid */,
+ request->request->setContext(cpu.threads[tid]->getTC()->contextId());
+ request->request->setVirt(0 /* asid */,
aligned_pc, request_size, Request::INST_FETCH, cpu.instMasterId(),
/* I've no idea why we need the PC, but give it */
thread.pc.instAddr());
@@ -187,7 +187,7 @@ Fetch1::fetchLine(ThreadID tid)
* through finish/markDelayed on this request as it bears
* the Translation interface */
cpu.threads[request->id.threadId]->itb->translateTiming(
- &request->request,
+ request->request,
cpu.getContext(request->id.threadId),
request, BaseTLB::Execute);
@@ -228,7 +228,7 @@ void
Fetch1::FetchRequest::makePacket()
{
/* Make the necessary packet for a memory transaction */
- packet = new Packet(&request, MemCmd::ReadReq);
+ packet = new Packet(request, MemCmd::ReadReq);
packet->allocate();
/* This FetchRequest becomes SenderState to allow the response to be
@@ -237,7 +237,7 @@ Fetch1::FetchRequest::makePacket()
}
void
-Fetch1::FetchRequest::finish(const Fault &fault_, RequestPtr request_,
+Fetch1::FetchRequest::finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode)
{
fault = fault_;
@@ -258,8 +258,9 @@ Fetch1::handleTLBResponse(FetchRequestPtr response)
DPRINTF(Fetch, "Fault in address ITLB translation: %s, "
"paddr: 0x%x, vaddr: 0x%x\n",
response->fault->name(),
- (response->request.hasPaddr() ? response->request.getPaddr() : 0),
- response->request.getVaddr());
+ (response->request->hasPaddr() ?
+ response->request->getPaddr() : 0),
+ response->request->getVaddr());
if (DTRACE(MinorTrace))
minorTraceResponseLine(name(), response);
@@ -397,18 +398,18 @@ void
Fetch1::minorTraceResponseLine(const std::string &name,
Fetch1::FetchRequestPtr response) const
{
- Request &request M5_VAR_USED = response->request;
+ const RequestPtr &request M5_VAR_USED = response->request;
if (response->packet && response->packet->isError()) {
MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"error packet\"\n",
- response->id, request.getVaddr());
+ response->id, request->getVaddr());
} else if (response->fault != NoFault) {
MINORLINE(this, "id=F;%s vaddr=0x%x fault=\"%s\"\n",
- response->id, request.getVaddr(), response->fault->name());
+ response->id, request->getVaddr(), response->fault->name());
} else {
MINORLINE(this, "id=%s size=%d vaddr=0x%x paddr=0x%x\n",
- response->id, request.getSize(),
- request.getVaddr(), request.getPaddr());
+ response->id, request->getSize(),
+ request->getVaddr(), request->getPaddr());
}
}
@@ -550,7 +551,7 @@ Fetch1::processResponse(Fetch1::FetchRequestPtr response,
line.pc = response->pc;
/* Set the lineBase, which is a sizeof(MachInst) aligned address <=
* pc.instAddr() */
- line.lineBaseAddr = response->request.getVaddr();
+ line.lineBaseAddr = response->request->getVaddr();
if (response->fault != NoFault) {
/* Stop fetching if there was a fault */
diff --git a/src/cpu/minor/fetch1.hh b/src/cpu/minor/fetch1.hh
index cf6c9d254..7b4c468ed 100644
--- a/src/cpu/minor/fetch1.hh
+++ b/src/cpu/minor/fetch1.hh
@@ -130,7 +130,7 @@ class Fetch1 : public Named
PacketPtr packet;
/** The underlying request that this fetch represents */
- Request request;
+ RequestPtr request;
/** PC to fixup with line address */
TheISA::PCState pc;
@@ -163,7 +163,7 @@ class Fetch1 : public Named
/** Interface for ITLB responses. Populates self and then passes
* the request on to the ports' handleTLBResponse member
* function */
- void finish(const Fault &fault_, RequestPtr request_,
+ void finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode);
public:
@@ -176,7 +176,9 @@ class Fetch1 : public Named
request(),
pc(pc_),
fault(NoFault)
- { }
+ {
+ request = std::make_shared<Request>();
+ }
~FetchRequest();
};
diff --git a/src/cpu/minor/lsq.cc b/src/cpu/minor/lsq.cc
index 822df0294..ad103b001 100644
--- a/src/cpu/minor/lsq.cc
+++ b/src/cpu/minor/lsq.cc
@@ -83,7 +83,9 @@ LSQ::LSQRequest::LSQRequest(LSQ &port_, MinorDynInstPtr inst_, bool isLoad_,
skipped(false),
issuedToMemory(false),
state(NotIssued)
-{ }
+{
+ request = std::make_shared<Request>();
+}
LSQ::AddrRangeCoverage
LSQ::LSQRequest::containsAddrRangeOf(
@@ -110,8 +112,8 @@ LSQ::LSQRequest::containsAddrRangeOf(
LSQ::AddrRangeCoverage
LSQ::LSQRequest::containsAddrRangeOf(LSQRequestPtr other_request)
{
- return containsAddrRangeOf(request.getPaddr(), request.getSize(),
- other_request->request.getPaddr(), other_request->request.getSize());
+ return containsAddrRangeOf(request->getPaddr(), request->getSize(),
+ other_request->request->getPaddr(), other_request->request->getSize());
}
bool
@@ -228,7 +230,7 @@ LSQ::clearMemBarrier(MinorDynInstPtr inst)
}
void
-LSQ::SingleDataRequest::finish(const Fault &fault_, RequestPtr request_,
+LSQ::SingleDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode)
{
fault = fault_;
@@ -262,7 +264,7 @@ LSQ::SingleDataRequest::startAddrTranslation()
* finish/markDelayed on the LSQRequest as it bears the Translation
* interface */
thread->getDTBPtr()->translateTiming(
- &request, thread, this, (isLoad ? BaseTLB::Read : BaseTLB::Write));
+ request, thread, this, (isLoad ? BaseTLB::Read : BaseTLB::Write));
}
void
@@ -275,7 +277,7 @@ LSQ::SingleDataRequest::retireResponse(PacketPtr packet_)
}
void
-LSQ::SplitDataRequest::finish(const Fault &fault_, RequestPtr request_,
+LSQ::SplitDataRequest::finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode)
{
fault = fault_;
@@ -337,12 +339,6 @@ LSQ::SplitDataRequest::SplitDataRequest(LSQ &port_, MinorDynInstPtr inst_,
LSQ::SplitDataRequest::~SplitDataRequest()
{
- for (auto i = fragmentRequests.begin();
- i != fragmentRequests.end(); i++)
- {
- delete *i;
- }
-
for (auto i = fragmentPackets.begin();
i != fragmentPackets.end(); i++)
{
@@ -353,8 +349,8 @@ LSQ::SplitDataRequest::~SplitDataRequest()
void
LSQ::SplitDataRequest::makeFragmentRequests()
{
- Addr base_addr = request.getVaddr();
- unsigned int whole_size = request.getSize();
+ Addr base_addr = request->getVaddr();
+ unsigned int whole_size = request->getSize();
unsigned int line_width = port.lineWidth;
unsigned int fragment_size;
@@ -423,13 +419,13 @@ LSQ::SplitDataRequest::makeFragmentRequests()
}
}
- RequestPtr fragment = new Request();
+ RequestPtr fragment = std::make_shared<Request>();
- fragment->setContext(request.contextId());
+ fragment->setContext(request->contextId());
fragment->setVirt(0 /* asid */,
- fragment_addr, fragment_size, request.getFlags(),
- request.masterId(),
- request.getPC());
+ fragment_addr, fragment_size, request->getFlags(),
+ request->masterId(),
+ request->getPC());
DPRINTFS(MinorMem, (&port), "Generating fragment addr: 0x%x size: %d"
" (whole request addr: 0x%x size: %d) %s\n",
@@ -445,7 +441,7 @@ LSQ::SplitDataRequest::makeFragmentRequests()
void
LSQ::SplitDataRequest::makeFragmentPackets()
{
- Addr base_addr = request.getVaddr();
+ Addr base_addr = request->getVaddr();
DPRINTFS(MinorMem, (&port), "Making packets for request: %s\n", *inst);
@@ -476,17 +472,17 @@ LSQ::SplitDataRequest::makeFragmentPackets()
assert(fragment->hasPaddr());
PacketPtr fragment_packet =
- makePacketForRequest(*fragment, isLoad, this, request_data);
+ makePacketForRequest(fragment, isLoad, this, request_data);
fragmentPackets.push_back(fragment_packet);
/* Accumulate flags in parent request */
- request.setFlags(fragment->getFlags());
+ request->setFlags(fragment->getFlags());
}
/* Might as well make the overall/response packet here */
/* Get the physical address for the whole request/packet from the first
* fragment */
- request.setPaddr(fragmentRequests[0]->getPaddr());
+ request->setPaddr(fragmentRequests[0]->getPaddr());
makePacket();
}
@@ -535,7 +531,7 @@ LSQ::SplitDataRequest::retireResponse(PacketPtr response)
DPRINTFS(MinorMem, (&port), "Retiring fragment addr: 0x%x size: %d"
" offset: 0x%x (retired fragment num: %d) %s\n",
response->req->getVaddr(), response->req->getSize(),
- request.getVaddr() - response->req->getVaddr(),
+ request->getVaddr() - response->req->getVaddr(),
numRetiredFragments,
(fault == NoFault ? "" : fault->name()));
@@ -556,13 +552,13 @@ LSQ::SplitDataRequest::retireResponse(PacketPtr response)
/* For a split transfer, a Packet must be constructed
* to contain all returning data. This is that packet's
* data */
- data = new uint8_t[request.getSize()];
+ data = new uint8_t[request->getSize()];
}
/* Populate the portion of the overall response data represented
* by the response fragment */
std::memcpy(
- data + (response->req->getVaddr() - request.getVaddr()),
+ data + (response->req->getVaddr() - request->getVaddr()),
response->getConstPtr<uint8_t>(),
response->req->getSize());
}
@@ -585,18 +581,18 @@ LSQ::SplitDataRequest::retireResponse(PacketPtr response)
DPRINTFS(MinorMem, (&port), "Retired packet isRead: %d isWrite: %d"
" needsResponse: %d packetSize: %s requestSize: %s responseSize:"
" %s\n", packet->isRead(), packet->isWrite(),
- packet->needsResponse(), packet->getSize(), request.getSize(),
+ packet->needsResponse(), packet->getSize(), request->getSize(),
response->getSize());
/* A request can become complete by several paths, this is a sanity
* check to make sure the packet's data is created */
if (!data) {
- data = new uint8_t[request.getSize()];
+ data = new uint8_t[request->getSize()];
}
if (isLoad) {
DPRINTFS(MinorMem, (&port), "Copying read data\n");
- std::memcpy(packet->getPtr<uint8_t>(), data, request.getSize());
+ std::memcpy(packet->getPtr<uint8_t>(), data, request->getSize());
}
packet->makeResponse();
}
@@ -691,8 +687,8 @@ LSQ::StoreBuffer::canForwardDataToLoad(LSQRequestPtr request,
DPRINTF(MinorMem, "Forwarding: slot: %d result: %s thisAddr:"
" 0x%x thisSize: %d slotAddr: 0x%x slotSize: %d\n",
slot_index, coverage,
- request->request.getPaddr(), request->request.getSize(),
- slot->request.getPaddr(), slot->request.getSize());
+ request->request->getPaddr(), request->request->getSize(),
+ slot->request->getPaddr(), slot->request->getSize());
found_slot = slot_index;
ret = coverage;
@@ -720,11 +716,11 @@ LSQ::StoreBuffer::forwardStoreData(LSQRequestPtr load,
assert(store->packet);
assert(store->containsAddrRangeOf(load) == FullAddrRangeCoverage);
- Addr load_addr = load->request.getPaddr();
- Addr store_addr = store->request.getPaddr();
+ Addr load_addr = load->request->getPaddr();
+ Addr store_addr = store->request->getPaddr();
Addr addr_offset = load_addr - store_addr;
- unsigned int load_size = load->request.getSize();
+ unsigned int load_size = load->request->getSize();
DPRINTF(MinorMem, "Forwarding %d bytes for addr: 0x%x from store buffer"
" slot: %d addr: 0x%x addressOffset: 0x%x\n",
@@ -932,9 +928,9 @@ LSQ::tryToSendToTransfers(LSQRequestPtr request)
}
bool is_load = request->isLoad;
- bool is_llsc = request->request.isLLSC();
- bool is_swap = request->request.isSwap();
- bool bufferable = !(request->request.isStrictlyOrdered() ||
+ bool is_llsc = request->request->isLLSC();
+ bool is_swap = request->request->isSwap();
+ bool bufferable = !(request->request->isStrictlyOrdered() ||
is_llsc || is_swap);
if (is_load) {
@@ -945,7 +941,7 @@ LSQ::tryToSendToTransfers(LSQRequestPtr request)
}
} else {
/* Store. Can it be sent to the store buffer? */
- if (bufferable && !request->request.isMmappedIpr()) {
+ if (bufferable && !request->request->isMmappedIpr()) {
request->setState(LSQRequest::StoreToStoreBuffer);
moveFromRequestsToTransfers(request);
DPRINTF(MinorMem, "Moving store into transfers queue\n");
@@ -1023,10 +1019,10 @@ LSQ::tryToSendToTransfers(LSQRequestPtr request)
/* Handle LLSC requests and tests */
if (is_load) {
- TheISA::handleLockedRead(&context, &request->request);
+ TheISA::handleLockedRead(&context, request->request);
} else {
do_access = TheISA::handleLockedWrite(&context,
- &request->request, cacheBlockMask);
+ request->request, cacheBlockMask);
if (!do_access) {
DPRINTF(MinorMem, "Not perfoming a memory "
@@ -1077,10 +1073,10 @@ LSQ::tryToSend(LSQRequestPtr request)
* so the response can be correctly handled */
assert(packet->findNextSenderState<LSQRequest>());
- if (request->request.isMmappedIpr()) {
+ if (request->request->isMmappedIpr()) {
ThreadContext *thread =
cpu.getContext(cpu.contextToThread(
- request->request.contextId()));
+ request->request->contextId()));
if (request->isLoad) {
DPRINTF(MinorMem, "IPR read inst: %s\n", *(request->inst));
@@ -1516,8 +1512,8 @@ LSQ::pushRequest(MinorDynInstPtr inst, bool isLoad, uint8_t *data,
inst->traceData->setMem(addr, size, flags);
int cid = cpu.threads[inst->id.threadId]->getTC()->contextId();
- request->request.setContext(cid);
- request->request.setVirt(0 /* asid */,
+ request->request->setContext(cid);
+ request->request->setVirt(0 /* asid */,
addr, size, flags, cpu.dataMasterId(),
/* I've no idea why we need the PC, but give it */
inst->pc.instAddr());
@@ -1557,18 +1553,18 @@ LSQ::StoreBuffer::StoreBuffer(std::string name_, LSQ &lsq_,
}
PacketPtr
-makePacketForRequest(Request &request, bool isLoad,
+makePacketForRequest(const RequestPtr &request, bool isLoad,
Packet::SenderState *sender_state, PacketDataPtr data)
{
- PacketPtr ret = isLoad ? Packet::createRead(&request)
- : Packet::createWrite(&request);
+ PacketPtr ret = isLoad ? Packet::createRead(request)
+ : Packet::createWrite(request);
if (sender_state)
ret->pushSenderState(sender_state);
if (isLoad) {
ret->allocate();
- } else if (!request.isCacheMaintenance()) {
+ } else if (!request->isCacheMaintenance()) {
// CMOs are treated as stores but they don't have data. All
// stores otherwise need to allocate for data.
ret->dataDynamic(data);
diff --git a/src/cpu/minor/lsq.hh b/src/cpu/minor/lsq.hh
index 9ee40f5d3..da873b4ac 100644
--- a/src/cpu/minor/lsq.hh
+++ b/src/cpu/minor/lsq.hh
@@ -143,7 +143,7 @@ class LSQ : public Named
PacketPtr packet;
/** The underlying request of this LSQRequest */
- Request request;
+ RequestPtr request;
/** Fault generated performing this request */
Fault fault;
@@ -272,7 +272,7 @@ class LSQ : public Named
{
protected:
/** TLB interace */
- void finish(const Fault &fault_, RequestPtr request_,
+ void finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode)
{ }
@@ -333,7 +333,7 @@ class LSQ : public Named
{
protected:
/** TLB interace */
- void finish(const Fault &fault_, RequestPtr request_,
+ void finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode);
/** Has my only packet been sent to the memory system but has not
@@ -406,7 +406,7 @@ class LSQ : public Named
protected:
/** TLB response interface */
- void finish(const Fault &fault_, RequestPtr request_,
+ void finish(const Fault &fault_, const RequestPtr &request_,
ThreadContext *tc, BaseTLB::Mode mode);
public:
@@ -720,7 +720,7 @@ class LSQ : public Named
/** Make a suitable packet for the given request. If the request is a store,
* data will be the payload data. If sender_state is NULL, it won't be
* pushed into the packet as senderState */
-PacketPtr makePacketForRequest(Request &request, bool isLoad,
+PacketPtr makePacketForRequest(const RequestPtr &request, bool isLoad,
Packet::SenderState *sender_state = NULL, PacketDataPtr data = NULL);
}
diff --git a/src/cpu/o3/cpu.hh b/src/cpu/o3/cpu.hh
index 10af087d1..1589220a9 100644
--- a/src/cpu/o3/cpu.hh
+++ b/src/cpu/o3/cpu.hh
@@ -744,14 +744,16 @@ class FullO3CPU : public BaseO3CPU
std::vector<ThreadID> tids;
/** CPU read function, forwards read to LSQ. */
- Fault read(RequestPtr &req, RequestPtr &sreqLow, RequestPtr &sreqHigh,
+ Fault read(const RequestPtr &req,
+ RequestPtr &sreqLow, RequestPtr &sreqHigh,
int load_idx)
{
return this->iew.ldstQueue.read(req, sreqLow, sreqHigh, load_idx);
}
/** CPU write function, forwards write to LSQ. */
- Fault write(RequestPtr &req, RequestPtr &sreqLow, RequestPtr &sreqHigh,
+ Fault write(const RequestPtr &req,
+ const RequestPtr &sreqLow, const RequestPtr &sreqHigh,
uint8_t *data, int store_idx)
{
return this->iew.ldstQueue.write(req, sreqLow, sreqHigh,
diff --git a/src/cpu/o3/fetch.hh b/src/cpu/o3/fetch.hh
index 4382197f4..da7ba4bb3 100644
--- a/src/cpu/o3/fetch.hh
+++ b/src/cpu/o3/fetch.hh
@@ -99,7 +99,7 @@ class DefaultFetch
{}
void
- finish(const Fault &fault, RequestPtr req, ThreadContext *tc,
+ finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc,
BaseTLB::Mode mode)
{
assert(mode == BaseTLB::Execute);
@@ -129,7 +129,7 @@ class DefaultFetch
fault = _fault;
}
- void setReq(RequestPtr _req)
+ void setReq(const RequestPtr &_req)
{
req = _req;
}
@@ -295,7 +295,7 @@ class DefaultFetch
* @return Any fault that occured.
*/
bool fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc);
- void finishTranslation(const Fault &fault, RequestPtr mem_req);
+ void finishTranslation(const Fault &fault, const RequestPtr &mem_req);
/** Check if an interrupt is pending and that we need to handle
diff --git a/src/cpu/o3/fetch_impl.hh b/src/cpu/o3/fetch_impl.hh
index 2e8ec67ae..2df7b84ee 100644
--- a/src/cpu/o3/fetch_impl.hh
+++ b/src/cpu/o3/fetch_impl.hh
@@ -388,7 +388,6 @@ DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
if (fetchStatus[tid] != IcacheWaitResponse ||
pkt->req != memReq[tid]) {
++fetchIcacheSquashes;
- delete pkt->req;
delete pkt;
return;
}
@@ -415,7 +414,6 @@ DefaultFetch<Impl>::processCacheCompletion(PacketPtr pkt)
pkt->req->setAccessLatency();
cpu->ppInstAccessComplete->notify(pkt);
// Reset the mem req to NULL.
- delete pkt->req;
delete pkt;
memReq[tid] = NULL;
}
@@ -621,10 +619,10 @@ DefaultFetch<Impl>::fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc)
// Setup the memReq to do a read of the first instruction's address.
// Set the appropriate read size and flags as well.
// Build request here.
- RequestPtr mem_req =
- new Request(tid, fetchBufferBlockPC, fetchBufferSize,
- Request::INST_FETCH, cpu->instMasterId(), pc,
- cpu->thread[tid]->contextId());
+ RequestPtr mem_req = std::make_shared<Request>(
+ tid, fetchBufferBlockPC, fetchBufferSize,
+ Request::INST_FETCH, cpu->instMasterId(), pc,
+ cpu->thread[tid]->contextId());
mem_req->taskId(cpu->taskId());
@@ -640,7 +638,8 @@ DefaultFetch<Impl>::fetchCacheLine(Addr vaddr, ThreadID tid, Addr pc)
template <class Impl>
void
-DefaultFetch<Impl>::finishTranslation(const Fault &fault, RequestPtr mem_req)
+DefaultFetch<Impl>::finishTranslation(const Fault &fault,
+ const RequestPtr &mem_req)
{
ThreadID tid = cpu->contextToThread(mem_req->contextId());
Addr fetchBufferBlockPC = mem_req->getVaddr();
@@ -655,7 +654,6 @@ DefaultFetch<Impl>::finishTranslation(const Fault &fault, RequestPtr mem_req)
DPRINTF(Fetch, "[tid:%i] Ignoring itlb completed after squash\n",
tid);
++fetchTlbSquashes;
- delete mem_req;
return;
}
@@ -669,7 +667,6 @@ DefaultFetch<Impl>::finishTranslation(const Fault &fault, RequestPtr mem_req)
warn("Address %#x is outside of physical memory, stopping fetch\n",
mem_req->getPaddr());
fetchStatus[tid] = NoGoodAddr;
- delete mem_req;
memReq[tid] = NULL;
return;
}
@@ -717,7 +714,6 @@ DefaultFetch<Impl>::finishTranslation(const Fault &fault, RequestPtr mem_req)
DPRINTF(Fetch, "[tid:%i] Got back req with addr %#x but expected %#x\n",
tid, mem_req->getVaddr(), memReq[tid]->getVaddr());
// Translation faulted, icache request won't be sent.
- delete mem_req;
memReq[tid] = NULL;
// Send the fault to commit. This thread will not do anything
@@ -778,7 +774,6 @@ DefaultFetch<Impl>::doSquash(const TheISA::PCState &newPC,
if (retryTid == tid) {
assert(cacheBlocked);
if (retryPkt) {
- delete retryPkt->req;
delete retryPkt;
}
retryPkt = NULL;
diff --git a/src/cpu/o3/lsq.hh b/src/cpu/o3/lsq.hh
index 6bc9b3d73..7c78156d5 100644
--- a/src/cpu/o3/lsq.hh
+++ b/src/cpu/o3/lsq.hh
@@ -274,13 +274,15 @@ class LSQ {
/** Executes a read operation, using the load specified at the load
* index.
*/
- Fault read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+ Fault read(const RequestPtr &req,
+ RequestPtr &sreqLow, RequestPtr &sreqHigh,
int load_idx);
/** Executes a store operation, using the store specified at the store
* index.
*/
- Fault write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+ Fault write(const RequestPtr &req,
+ const RequestPtr &sreqLow, const RequestPtr &sreqHigh,
uint8_t *data, int store_idx);
/**
@@ -331,7 +333,8 @@ class LSQ {
template <class Impl>
Fault
-LSQ<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+LSQ<Impl>::read(const RequestPtr &req,
+ RequestPtr &sreqLow, RequestPtr &sreqHigh,
int load_idx)
{
ThreadID tid = cpu->contextToThread(req->contextId());
@@ -341,7 +344,8 @@ LSQ<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
template <class Impl>
Fault
-LSQ<Impl>::write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+LSQ<Impl>::write(const RequestPtr &req,
+ const RequestPtr &sreqLow, const RequestPtr &sreqHigh,
uint8_t *data, int store_idx)
{
ThreadID tid = cpu->contextToThread(req->contextId());
diff --git a/src/cpu/o3/lsq_impl.hh b/src/cpu/o3/lsq_impl.hh
index 9080907fe..56b95a5b6 100644
--- a/src/cpu/o3/lsq_impl.hh
+++ b/src/cpu/o3/lsq_impl.hh
@@ -370,7 +370,6 @@ LSQ<Impl>::recvTimingResp(PacketPtr pkt)
}
}
- delete pkt->req;
delete pkt;
return true;
}
diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh
index a7a095c82..f5b60b2fc 100644
--- a/src/cpu/o3/lsq_unit.hh
+++ b/src/cpu/o3/lsq_unit.hh
@@ -510,11 +510,13 @@ class LSQUnit {
public:
/** Executes the load at the given index. */
- Fault read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+ Fault read(const RequestPtr &req,
+ RequestPtr &sreqLow, RequestPtr &sreqHigh,
int load_idx);
/** Executes the store at the given index. */
- Fault write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+ Fault write(const RequestPtr &req,
+ const RequestPtr &sreqLow, const RequestPtr &sreqHigh,
uint8_t *data, int store_idx);
/** Returns the index of the head load instruction. */
@@ -549,7 +551,8 @@ class LSQUnit {
template <class Impl>
Fault
-LSQUnit<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+LSQUnit<Impl>::read(const RequestPtr &req,
+ RequestPtr &sreqLow, RequestPtr &sreqHigh,
int load_idx)
{
DynInstPtr load_inst = loadQueue[load_idx];
@@ -569,14 +572,6 @@ LSQUnit<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
DPRINTF(LSQUnit, "Strictly ordered load [sn:%lli] PC %s\n",
load_inst->seqNum, load_inst->pcState());
- // Must delete request now that it wasn't handed off to
- // memory. This is quite ugly. @todo: Figure out the proper
- // place to really handle request deletes.
- delete req;
- if (TheISA::HasUnalignedMemAcc && sreqLow) {
- delete sreqLow;
- delete sreqHigh;
- }
return std::make_shared<GenericISA::M5PanicFault>(
"Strictly ordered load [sn:%llx] PC %s\n",
load_inst->seqNum, load_inst->pcState());
@@ -626,8 +621,6 @@ LSQUnit<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
if (delay2 > delay)
delay = delay2;
- delete sreqLow;
- delete sreqHigh;
delete fst_data_pkt;
delete snd_data_pkt;
}
@@ -704,12 +697,6 @@ LSQUnit<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
// @todo: Need to make this a parameter.
cpu->schedule(wb, curTick());
- // Don't need to do anything special for split loads.
- if (TheISA::HasUnalignedMemAcc && sreqLow) {
- delete sreqLow;
- delete sreqHigh;
- }
-
++lsqForwLoads;
return NoFault;
} else if (
@@ -755,15 +742,6 @@ LSQUnit<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
"Store idx %i to load addr %#x\n",
store_idx, req->getVaddr());
- // Must delete request now that it wasn't handed off to
- // memory. This is quite ugly. @todo: Figure out the
- // proper place to really handle request deletes.
- delete req;
- if (TheISA::HasUnalignedMemAcc && sreqLow) {
- delete sreqLow;
- delete sreqHigh;
- }
-
return NoFault;
}
}
@@ -843,7 +821,6 @@ LSQUnit<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
if (!sreqLow) {
// Packet wasn't split, just delete main packet info
delete state;
- delete req;
delete data_pkt;
}
@@ -851,22 +828,17 @@ LSQUnit<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
if (!completedFirst) {
// Split packet, but first failed. Delete all state.
delete state;
- delete req;
delete data_pkt;
delete fst_data_pkt;
delete snd_data_pkt;
- delete sreqLow;
- delete sreqHigh;
- sreqLow = NULL;
- sreqHigh = NULL;
+ sreqLow.reset();
+ sreqHigh.reset();
} else {
// Can't delete main packet data or state because first packet
// was sent to the memory system
delete data_pkt;
- delete req;
- delete sreqHigh;
delete snd_data_pkt;
- sreqHigh = NULL;
+ sreqHigh.reset();
}
}
@@ -883,7 +855,8 @@ LSQUnit<Impl>::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
template <class Impl>
Fault
-LSQUnit<Impl>::write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh,
+LSQUnit<Impl>::write(const RequestPtr &req,
+ const RequestPtr &sreqLow, const RequestPtr &sreqHigh,
uint8_t *data, int store_idx)
{
assert(storeQueue[store_idx].inst);
diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh
index e8e2c1853..c2750be7d 100644
--- a/src/cpu/o3/lsq_unit_impl.hh
+++ b/src/cpu/o3/lsq_unit_impl.hh
@@ -79,7 +79,6 @@ LSQUnit<Impl>::WritebackEvent::process()
if (pkt->senderState)
delete pkt->senderState;
- delete pkt->req;
delete pkt;
}
@@ -133,7 +132,6 @@ LSQUnit<Impl>::completeDataAccess(PacketPtr pkt)
}
if (TheISA::HasUnalignedMemAcc && state->isSplit && state->isLoad) {
- delete state->mainPkt->req;
delete state->mainPkt;
}
@@ -831,9 +829,9 @@ LSQUnit<Impl>::writebackStores()
DynInstPtr inst = storeQueue[storeWBIdx].inst;
- RequestPtr req = storeQueue[storeWBIdx].req;
- RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
- RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
+ RequestPtr &req = storeQueue[storeWBIdx].req;
+ const RequestPtr &sreqLow = storeQueue[storeWBIdx].sreqLow;
+ const RequestPtr &sreqHigh = storeQueue[storeWBIdx].sreqHigh;
storeQueue[storeWBIdx].committed = true;
@@ -874,7 +872,6 @@ LSQUnit<Impl>::writebackStores()
state->outstanding = 2;
// Can delete the main request now.
- delete req;
req = sreqLow;
}
@@ -923,11 +920,8 @@ LSQUnit<Impl>::writebackStores()
assert(snd_data_pkt->req->isMmappedIpr());
TheISA::handleIprWrite(thread, snd_data_pkt);
delete snd_data_pkt;
- delete sreqLow;
- delete sreqHigh;
}
delete state;
- delete req;
completeStore(storeWBIdx);
incrStIdx(storeWBIdx);
} else if (!sendStore(data_pkt)) {
@@ -1061,16 +1055,12 @@ LSQUnit<Impl>::squash(const InstSeqNum &squashed_num)
// Must delete request now that it wasn't handed off to
// memory. This is quite ugly. @todo: Figure out the proper
// place to really handle request deletes.
- delete storeQueue[store_idx].req;
+ storeQueue[store_idx].req.reset();
if (TheISA::HasUnalignedMemAcc && storeQueue[store_idx].isSplit) {
- delete storeQueue[store_idx].sreqLow;
- delete storeQueue[store_idx].sreqHigh;
-
- storeQueue[store_idx].sreqLow = NULL;
- storeQueue[store_idx].sreqHigh = NULL;
+ storeQueue[store_idx].sreqLow.reset();
+ storeQueue[store_idx].sreqHigh.reset();
}
- storeQueue[store_idx].req = NULL;
--stores;
// Inefficient!
diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc
index 0e7c59f6a..040d1dbf9 100644
--- a/src/cpu/simple/atomic.cc
+++ b/src/cpu/simple/atomic.cc
@@ -69,9 +69,9 @@ AtomicSimpleCPU::init()
BaseSimpleCPU::init();
int cid = threadContexts[0]->contextId();
- ifetch_req.setContext(cid);
- data_read_req.setContext(cid);
- data_write_req.setContext(cid);
+ ifetch_req->setContext(cid);
+ data_read_req->setContext(cid);
+ data_write_req->setContext(cid);
}
AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
@@ -87,6 +87,9 @@ AtomicSimpleCPU::AtomicSimpleCPU(AtomicSimpleCPUParams *p)
ppCommit(nullptr)
{
_status = Idle;
+ ifetch_req = std::make_shared<Request>();
+ data_read_req = std::make_shared<Request>();
+ data_write_req = std::make_shared<Request>();
}
@@ -331,7 +334,7 @@ AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size,
SimpleThread* thread = t_info.thread;
// use the CPU's statically allocated read request and packet objects
- RequestPtr req = &data_read_req;
+ const RequestPtr &req = data_read_req;
if (traceData)
traceData->setMem(addr, size, flags);
@@ -435,7 +438,7 @@ AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr,
}
// use the CPU's statically allocated write request and packet objects
- RequestPtr req = &data_write_req;
+ const RequestPtr &req = data_write_req;
if (traceData)
traceData->setMem(addr, size, flags);
@@ -545,9 +548,9 @@ AtomicSimpleCPU::tick()
if (numThreads > 1) {
ContextID cid = threadContexts[curThread]->contextId();
- ifetch_req.setContext(cid);
- data_read_req.setContext(cid);
- data_write_req.setContext(cid);
+ ifetch_req->setContext(cid);
+ data_read_req->setContext(cid);
+ data_write_req->setContext(cid);
}
SimpleExecContext& t_info = *threadInfo[curThread];
@@ -577,9 +580,9 @@ AtomicSimpleCPU::tick()
bool needToFetch = !isRomMicroPC(pcState.microPC()) &&
!curMacroStaticInst;
if (needToFetch) {
- ifetch_req.taskId(taskId());
- setupFetchRequest(&ifetch_req);
- fault = thread->itb->translateAtomic(&ifetch_req, thread->getTC(),
+ ifetch_req->taskId(taskId());
+ setupFetchRequest(ifetch_req);
+ fault = thread->itb->translateAtomic(ifetch_req, thread->getTC(),
BaseTLB::Execute);
}
@@ -597,7 +600,7 @@ AtomicSimpleCPU::tick()
//if (decoder.needMoreBytes())
//{
icache_access = true;
- Packet ifetch_pkt = Packet(&ifetch_req, MemCmd::ReadReq);
+ Packet ifetch_pkt = Packet(ifetch_req, MemCmd::ReadReq);
ifetch_pkt.dataStatic(&inst);
if (fastmem && system->isMemAddr(ifetch_pkt.getAddr()))
diff --git a/src/cpu/simple/atomic.hh b/src/cpu/simple/atomic.hh
index c9dd954bb..addbe234e 100644
--- a/src/cpu/simple/atomic.hh
+++ b/src/cpu/simple/atomic.hh
@@ -159,9 +159,9 @@ class AtomicSimpleCPU : public BaseSimpleCPU
AtomicCPUDPort dcachePort;
bool fastmem;
- Request ifetch_req;
- Request data_read_req;
- Request data_write_req;
+ RequestPtr ifetch_req;
+ RequestPtr data_read_req;
+ RequestPtr data_write_req;
bool dcache_access;
Tick dcache_latency;
diff --git a/src/cpu/simple/base.cc b/src/cpu/simple/base.cc
index 025c7a3ea..825d3103f 100644
--- a/src/cpu/simple/base.cc
+++ b/src/cpu/simple/base.cc
@@ -468,7 +468,7 @@ BaseSimpleCPU::checkForInterrupts()
void
-BaseSimpleCPU::setupFetchRequest(RequestPtr req)
+BaseSimpleCPU::setupFetchRequest(const RequestPtr &req)
{
SimpleExecContext &t_info = *threadInfo[curThread];
SimpleThread* thread = t_info.thread;
diff --git a/src/cpu/simple/base.hh b/src/cpu/simple/base.hh
index 64fa58d92..e62fcf4d1 100644
--- a/src/cpu/simple/base.hh
+++ b/src/cpu/simple/base.hh
@@ -129,7 +129,7 @@ class BaseSimpleCPU : public BaseCPU
void checkForInterrupts();
- void setupFetchRequest(RequestPtr req);
+ void setupFetchRequest(const RequestPtr &req);
void preExecute();
void postExecute();
void advancePC(const Fault &fault);
diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc
index 657c2976f..14e760af9 100644
--- a/src/cpu/simple/timing.cc
+++ b/src/cpu/simple/timing.cc
@@ -261,7 +261,7 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
SimpleExecContext &t_info = *threadInfo[curThread];
SimpleThread* thread = t_info.thread;
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
// We're about the issues a locked load, so tell the monitor
// to start caring about this address
@@ -285,7 +285,7 @@ TimingSimpleCPU::handleReadPacket(PacketPtr pkt)
}
void
-TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
+TimingSimpleCPU::sendData(const RequestPtr &req, uint8_t *data, uint64_t *res,
bool read)
{
SimpleExecContext &t_info = *threadInfo[curThread];
@@ -321,8 +321,8 @@ TimingSimpleCPU::sendData(RequestPtr req, uint8_t *data, uint64_t *res,
}
void
-TimingSimpleCPU::sendSplitData(RequestPtr req1, RequestPtr req2,
- RequestPtr req, uint8_t *data, bool read)
+TimingSimpleCPU::sendSplitData(const RequestPtr &req1, const RequestPtr &req2,
+ const RequestPtr &req, uint8_t *data, bool read)
{
PacketPtr pkt1, pkt2;
buildSplitPacket(pkt1, pkt2, req1, req2, req, data, read);
@@ -377,14 +377,14 @@ TimingSimpleCPU::translationFault(const Fault &fault)
}
PacketPtr
-TimingSimpleCPU::buildPacket(RequestPtr req, bool read)
+TimingSimpleCPU::buildPacket(const RequestPtr &req, bool read)
{
return read ? Packet::createRead(req) : Packet::createWrite(req);
}
void
TimingSimpleCPU::buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
- RequestPtr req1, RequestPtr req2, RequestPtr req,
+ const RequestPtr &req1, const RequestPtr &req2, const RequestPtr &req,
uint8_t *data, bool read)
{
pkt1 = pkt2 = NULL;
@@ -438,8 +438,9 @@ TimingSimpleCPU::initiateMemRead(Addr addr, unsigned size,
if (traceData)
traceData->setMem(addr, size, flags);
- RequestPtr req = new Request(asid, addr, size, flags, dataMasterId(), pc,
- thread->contextId());
+ RequestPtr req = std::make_shared<Request>(
+ asid, addr, size, flags, dataMasterId(), pc,
+ thread->contextId());
req->taskId(taskId());
@@ -479,7 +480,7 @@ TimingSimpleCPU::handleWritePacket()
SimpleExecContext &t_info = *threadInfo[curThread];
SimpleThread* thread = t_info.thread;
- RequestPtr req = dcache_pkt->req;
+ const RequestPtr &req = dcache_pkt->req;
if (req->isMmappedIpr()) {
Cycles delay = TheISA::handleIprWrite(thread->getTC(), dcache_pkt);
new IprEvent(dcache_pkt, this, clockEdge(delay));
@@ -519,8 +520,9 @@ TimingSimpleCPU::writeMem(uint8_t *data, unsigned size,
if (traceData)
traceData->setMem(addr, size, flags);
- RequestPtr req = new Request(asid, addr, size, flags, dataMasterId(), pc,
- thread->contextId());
+ RequestPtr req = std::make_shared<Request>(
+ asid, addr, size, flags, dataMasterId(), pc,
+ thread->contextId());
req->taskId(taskId());
@@ -620,7 +622,7 @@ TimingSimpleCPU::fetch()
if (needToFetch) {
_status = BaseSimpleCPU::Running;
- RequestPtr ifetch_req = new Request();
+ RequestPtr ifetch_req = std::make_shared<Request>();
ifetch_req->taskId(taskId());
ifetch_req->setContext(thread->contextId());
setupFetchRequest(ifetch_req);
@@ -638,7 +640,7 @@ TimingSimpleCPU::fetch()
void
-TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
+TimingSimpleCPU::sendFetch(const Fault &fault, const RequestPtr &req,
ThreadContext *tc)
{
if (fault == NoFault) {
@@ -659,7 +661,6 @@ TimingSimpleCPU::sendFetch(const Fault &fault, RequestPtr req,
}
} else {
DPRINTF(SimpleCPU, "Translation of addr %#x faulted\n", req->getVaddr());
- delete req;
// fetch fault: advance directly to next instruction (fault handler)
_status = BaseSimpleCPU::Running;
advanceInst(fault);
@@ -775,7 +776,6 @@ TimingSimpleCPU::completeIfetch(PacketPtr pkt)
}
if (pkt) {
- delete pkt->req;
delete pkt;
}
}
@@ -831,7 +831,6 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
SplitFragmentSenderState * send_state =
dynamic_cast<SplitFragmentSenderState *>(pkt->senderState);
assert(send_state);
- delete pkt->req;
delete pkt;
PacketPtr big_pkt = send_state->bigPkt;
delete send_state;
@@ -866,7 +865,6 @@ TimingSimpleCPU::completeDataAccess(PacketPtr pkt)
traceData = NULL;
}
- delete pkt->req;
delete pkt;
postExecute();
diff --git a/src/cpu/simple/timing.hh b/src/cpu/simple/timing.hh
index 8498630b4..0300d38eb 100644
--- a/src/cpu/simple/timing.hh
+++ b/src/cpu/simple/timing.hh
@@ -124,7 +124,7 @@ class TimingSimpleCPU : public BaseSimpleCPU
}
void
- finish(const Fault &fault, RequestPtr req, ThreadContext *tc,
+ finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc,
BaseTLB::Mode mode)
{
cpu->sendFetch(fault, req, tc);
@@ -133,15 +133,18 @@ class TimingSimpleCPU : public BaseSimpleCPU
FetchTranslation fetchTranslation;
void threadSnoop(PacketPtr pkt, ThreadID sender);
- void sendData(RequestPtr req, uint8_t *data, uint64_t *res, bool read);
- void sendSplitData(RequestPtr req1, RequestPtr req2, RequestPtr req,
+ void sendData(const RequestPtr &req,
+ uint8_t *data, uint64_t *res, bool read);
+ void sendSplitData(const RequestPtr &req1, const RequestPtr &req2,
+ const RequestPtr &req,
uint8_t *data, bool read);
void translationFault(const Fault &fault);
- PacketPtr buildPacket(RequestPtr req, bool read);
+ PacketPtr buildPacket(const RequestPtr &req, bool read);
void buildSplitPacket(PacketPtr &pkt1, PacketPtr &pkt2,
- RequestPtr req1, RequestPtr req2, RequestPtr req,
+ const RequestPtr &req1, const RequestPtr &req2,
+ const RequestPtr &req,
uint8_t *data, bool read);
bool handleReadPacket(PacketPtr pkt);
@@ -289,7 +292,8 @@ class TimingSimpleCPU : public BaseSimpleCPU
Addr addr, Request::Flags flags, uint64_t *res) override;
void fetch();
- void sendFetch(const Fault &fault, RequestPtr req, ThreadContext *tc);
+ void sendFetch(const Fault &fault,
+ const RequestPtr &req, ThreadContext *tc);
void completeIfetch(PacketPtr );
void completeDataAccess(PacketPtr pkt);
void advanceInst(const Fault &fault);
diff --git a/src/cpu/testers/directedtest/InvalidateGenerator.cc b/src/cpu/testers/directedtest/InvalidateGenerator.cc
index 3319e8400..9351d91c4 100644
--- a/src/cpu/testers/directedtest/InvalidateGenerator.cc
+++ b/src/cpu/testers/directedtest/InvalidateGenerator.cc
@@ -60,7 +60,7 @@ InvalidateGenerator::initiate()
Packet::Command cmd;
// For simplicity, requests are assumed to be 1 byte-sized
- RequestPtr req = new Request(m_address, 1, flags, masterId);
+ RequestPtr req = std::make_shared<Request>(m_address, 1, flags, masterId);
//
// Based on the current state, issue a load or a store
@@ -92,7 +92,6 @@ InvalidateGenerator::initiate()
// If the packet did not issue, must delete
// Note: No need to delete the data, the packet destructor
// will delete it
- delete pkt->req;
delete pkt;
DPRINTF(DirectedTest, "failed to issue request - sequencer not ready\n");
diff --git a/src/cpu/testers/directedtest/RubyDirectedTester.cc b/src/cpu/testers/directedtest/RubyDirectedTester.cc
index ef133379d..be7f3c256 100644
--- a/src/cpu/testers/directedtest/RubyDirectedTester.cc
+++ b/src/cpu/testers/directedtest/RubyDirectedTester.cc
@@ -101,7 +101,6 @@ RubyDirectedTester::CpuPort::recvTimingResp(PacketPtr pkt)
//
// Now that the tester has completed, delete the packet, then return
//
- delete pkt->req;
delete pkt;
return true;
}
diff --git a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc
index 17ae04cdf..e5b7656d9 100644
--- a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc
+++ b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc
@@ -60,7 +60,7 @@ SeriesRequestGenerator::initiate()
Request::Flags flags;
// For simplicity, requests are assumed to be 1 byte-sized
- RequestPtr req = new Request(m_address, 1, flags, masterId);
+ RequestPtr req = std::make_shared<Request>(m_address, 1, flags, masterId);
Packet::Command cmd;
bool do_write = (random_mt.random(0, 100) < m_percent_writes);
@@ -81,7 +81,6 @@ SeriesRequestGenerator::initiate()
// If the packet did not issue, must delete
// Note: No need to delete the data, the packet destructor
// will delete it
- delete pkt->req;
delete pkt;
DPRINTF(DirectedTest, "failed to initiate request - sequencer not ready\n");
diff --git a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc
index be1921aad..0ced9df84 100644
--- a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc
+++ b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc
@@ -129,16 +129,13 @@ GarnetSyntheticTraffic::init()
void
GarnetSyntheticTraffic::completeRequest(PacketPtr pkt)
{
- RequestPtr req = pkt->req;
-
DPRINTF(GarnetSyntheticTraffic,
"Completed injection of %s packet for address %x\n",
pkt->isWrite() ? "write" : "read\n",
- req->getPaddr());
+ pkt->req->getPaddr());
assert(pkt->isResponse());
noResponseCycles = 0;
- delete req;
delete pkt;
}
@@ -296,17 +293,18 @@ GarnetSyntheticTraffic::generatePkt()
if (injReqType == 0) {
// generate packet for virtual network 0
requestType = MemCmd::ReadReq;
- req = new Request(paddr, access_size, flags, masterId);
+ req = std::make_shared<Request>(paddr, access_size, flags, masterId);
} else if (injReqType == 1) {
// generate packet for virtual network 1
requestType = MemCmd::ReadReq;
flags.set(Request::INST_FETCH);
- req = new Request(0, 0x0, access_size, flags, masterId, 0x0, 0);
+ req = std::make_shared<Request>(
+ 0, 0x0, access_size, flags, masterId, 0x0, 0);
req->setPaddr(paddr);
} else { // if (injReqType == 2)
// generate packet for virtual network 2
requestType = MemCmd::WriteReq;
- req = new Request(paddr, access_size, flags, masterId);
+ req = std::make_shared<Request>(paddr, access_size, flags, masterId);
}
req->setContext(id);
diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc
index 89b4d1159..09e7e88a1 100644
--- a/src/cpu/testers/memtest/memtest.cc
+++ b/src/cpu/testers/memtest/memtest.cc
@@ -136,7 +136,7 @@ MemTest::getMasterPort(const std::string &if_name, PortID idx)
void
MemTest::completeRequest(PacketPtr pkt, bool functional)
{
- RequestPtr req = pkt->req;
+ const RequestPtr &req = pkt->req;
assert(req->getSize() == 1);
// this address is no longer outstanding
@@ -187,8 +187,6 @@ MemTest::completeRequest(PacketPtr pkt, bool functional)
}
}
- delete pkt->req;
-
// the packet will delete the data
delete pkt;
@@ -246,7 +244,7 @@ MemTest::tick()
bool do_functional = (random_mt.random(0, 100) < percentFunctional) &&
!uncacheable;
- RequestPtr req = new Request(paddr, 1, flags, masterId);
+ RequestPtr req = std::make_shared<Request>(paddr, 1, flags, masterId);
req->setContext(id);
outstandingAddrs.insert(paddr);
diff --git a/src/cpu/testers/rubytest/Check.cc b/src/cpu/testers/rubytest/Check.cc
index 776d711a2..49332ab01 100644
--- a/src/cpu/testers/rubytest/Check.cc
+++ b/src/cpu/testers/rubytest/Check.cc
@@ -107,7 +107,7 @@ Check::initiatePrefetch()
}
// Prefetches are assumed to be 0 sized
- RequestPtr req = new Request(m_address, 0, flags,
+ RequestPtr req = std::make_shared<Request>(m_address, 0, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
req->setContext(index);
@@ -127,7 +127,6 @@ Check::initiatePrefetch()
} else {
// If the packet did not issue, must delete
delete pkt->senderState;
- delete pkt->req;
delete pkt;
DPRINTF(RubyTest,
@@ -146,7 +145,7 @@ Check::initiateFlush()
Request::Flags flags;
- RequestPtr req = new Request(m_address, CHECK_SIZE, flags,
+ RequestPtr req = std::make_shared<Request>(m_address, CHECK_SIZE, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
Packet::Command cmd;
@@ -179,8 +178,8 @@ Check::initiateAction()
Addr writeAddr(m_address + m_store_count);
// Stores are assumed to be 1 byte-sized
- RequestPtr req = new Request(writeAddr, 1, flags, m_tester_ptr->masterId(),
- curTick(), m_pc);
+ RequestPtr req = std::make_shared<Request>(
+ writeAddr, 1, flags, m_tester_ptr->masterId(), curTick(), m_pc);
req->setContext(index);
Packet::Command cmd;
@@ -215,7 +214,6 @@ Check::initiateAction()
// Note: No need to delete the data, the packet destructor
// will delete it
delete pkt->senderState;
- delete pkt->req;
delete pkt;
DPRINTF(RubyTest, "failed to initiate action - sequencer not ready\n");
@@ -244,7 +242,7 @@ Check::initiateCheck()
}
// Checks are sized depending on the number of bytes written
- RequestPtr req = new Request(m_address, CHECK_SIZE, flags,
+ RequestPtr req = std::make_shared<Request>(m_address, CHECK_SIZE, flags,
m_tester_ptr->masterId(), curTick(), m_pc);
req->setContext(index);
@@ -269,7 +267,6 @@ Check::initiateCheck()
// Note: No need to delete the data, the packet destructor
// will delete it
delete pkt->senderState;
- delete pkt->req;
delete pkt;
DPRINTF(RubyTest, "failed to initiate check - cpu port not ready\n");
diff --git a/src/cpu/testers/rubytest/RubyTester.cc b/src/cpu/testers/rubytest/RubyTester.cc
index 67c824806..93754467d 100644
--- a/src/cpu/testers/rubytest/RubyTester.cc
+++ b/src/cpu/testers/rubytest/RubyTester.cc
@@ -186,7 +186,6 @@ RubyTester::CpuPort::recvTimingResp(PacketPtr pkt)
// Now that the tester has completed, delete the senderState
// (includes sublock) and the packet, then return
delete pkt->senderState;
- delete pkt->req;
delete pkt;
return true;
}
diff --git a/src/cpu/testers/traffic_gen/base_gen.cc b/src/cpu/testers/traffic_gen/base_gen.cc
index b5b4f5817..f25bfb08a 100644
--- a/src/cpu/testers/traffic_gen/base_gen.cc
+++ b/src/cpu/testers/traffic_gen/base_gen.cc
@@ -59,7 +59,7 @@ BaseGen::getPacket(Addr addr, unsigned size, const MemCmd& cmd,
Request::FlagsType flags)
{
// Create new request
- RequestPtr req = new Request(addr, size, flags, masterID);
+ RequestPtr req = std::make_shared<Request>(addr, size, flags, masterID);
// Dummy PC to have PC-based prefetchers latch on; get entropy into higher
// bits
req->setPC(((Addr)masterID) << 2);
diff --git a/src/cpu/testers/traffic_gen/traffic_gen.cc b/src/cpu/testers/traffic_gen/traffic_gen.cc
index 2d4dd3752..d262fd9f7 100644
--- a/src/cpu/testers/traffic_gen/traffic_gen.cc
+++ b/src/cpu/testers/traffic_gen/traffic_gen.cc
@@ -213,7 +213,6 @@ TrafficGen::update()
warn("%s suppressed %d packets with non-memory addresses\n",
name(), numSuppressed);
- delete pkt->req;
delete pkt;
pkt = nullptr;
}
@@ -575,7 +574,6 @@ TrafficGen::regStats()
bool
TrafficGen::TrafficGenPort::recvTimingResp(PacketPtr pkt)
{
- delete pkt->req;
delete pkt;
return true;
diff --git a/src/cpu/trace/trace_cpu.cc b/src/cpu/trace/trace_cpu.cc
index 77755e888..2b198e966 100644
--- a/src/cpu/trace/trace_cpu.cc
+++ b/src/cpu/trace/trace_cpu.cc
@@ -662,9 +662,11 @@ TraceCPU::ElasticDataGen::executeMemReq(GraphNode* node_ptr)
}
// Create a request and the packet containing request
- Request* req = new Request(node_ptr->physAddr, node_ptr->size,
- node_ptr->flags, masterID, node_ptr->seqNum,
- ContextID(0));
+ auto req = std::make_shared<Request>(
+ node_ptr->physAddr, node_ptr->size,
+ node_ptr->flags, masterID, node_ptr->seqNum,
+ ContextID(0));
+
req->setPC(node_ptr->pc);
// If virtual address is valid, set the asid and virtual address fields
// of the request.
@@ -1158,7 +1160,7 @@ TraceCPU::FixedRetryGen::send(Addr addr, unsigned size, const MemCmd& cmd,
{
// Create new request
- Request* req = new Request(addr, size, flags, masterID);
+ auto req = std::make_shared<Request>(addr, size, flags, masterID);
req->setPC(pc);
// If this is not done it triggers assert in L1 cache for invalid contextId
@@ -1224,8 +1226,7 @@ bool
TraceCPU::IcachePort::recvTimingResp(PacketPtr pkt)
{
// All responses on the instruction fetch side are ignored. Simply delete
- // the request and packet to free allocated memory
- delete pkt->req;
+ // the packet to free allocated memory
delete pkt;
return true;
@@ -1250,9 +1251,8 @@ TraceCPU::DcachePort::recvTimingResp(PacketPtr pkt)
// Handle the responses for data memory requests which is done inside the
// elastic data generator
owner->dcacheRecvTimingResp(pkt);
- // After processing the response delete the request and packet to free
+ // After processing the response delete the packet to free
// memory
- delete pkt->req;
delete pkt;
return true;
diff --git a/src/cpu/translation.hh b/src/cpu/translation.hh
index a7372f3ee..601d24cd1 100644
--- a/src/cpu/translation.hh
+++ b/src/cpu/translation.hh
@@ -78,8 +78,8 @@ class WholeTranslationState
* Single translation state. We set the number of outstanding
* translations to one and indicate that it is not split.
*/
- WholeTranslationState(RequestPtr _req, uint8_t *_data, uint64_t *_res,
- BaseTLB::Mode _mode)
+ WholeTranslationState(const RequestPtr &_req, uint8_t *_data,
+ uint64_t *_res, BaseTLB::Mode _mode)
: outstanding(1), delay(false), isSplit(false), mainReq(_req),
sreqLow(NULL), sreqHigh(NULL), data(_data), res(_res), mode(_mode)
{
@@ -92,9 +92,9 @@ class WholeTranslationState
* number of outstanding translations to two and then mark this as a
* split translation.
*/
- WholeTranslationState(RequestPtr _req, RequestPtr _sreqLow,
- RequestPtr _sreqHigh, uint8_t *_data, uint64_t *_res,
- BaseTLB::Mode _mode)
+ WholeTranslationState(const RequestPtr &_req, const RequestPtr &_sreqLow,
+ const RequestPtr &_sreqHigh, uint8_t *_data,
+ uint64_t *_res, BaseTLB::Mode _mode)
: outstanding(2), delay(false), isSplit(true), mainReq(_req),
sreqLow(_sreqLow), sreqHigh(_sreqHigh), data(_data), res(_res),
mode(_mode)
@@ -196,10 +196,10 @@ class WholeTranslationState
void
deleteReqs()
{
- delete mainReq;
+ mainReq.reset();
if (isSplit) {
- delete sreqLow;
- delete sreqHigh;
+ sreqLow.reset();
+ sreqHigh.reset();
}
}
};
@@ -249,7 +249,7 @@ class DataTranslation : public BaseTLB::Translation
* translation is complete if the state says so.
*/
void
- finish(const Fault &fault, RequestPtr req, ThreadContext *tc,
+ finish(const Fault &fault, const RequestPtr &req, ThreadContext *tc,
BaseTLB::Mode mode)
{
assert(state);