From 2113b21996d086dab32b9fd388efe3df241bfbd2 Mon Sep 17 00:00:00 2001 From: Giacomo Travaglini Date: Sun, 3 Jun 2018 13:10:26 +0100 Subject: misc: Substitute pointer to Request with aliased RequestPtr Every usage of Request* in the code has been replaced with the RequestPtr alias. This is a preparing patch for when RequestPtr will be the typdefed to a smart pointer to Request rather then a raw pointer to Request. Change-Id: I73cbaf2d96ea9313a590cdc731a25662950cd51a Signed-off-by: Giacomo Travaglini Reviewed-by: Nikos Nikoleris Reviewed-on: https://gem5-review.googlesource.com/10995 Reviewed-by: Anthony Gutierrez Reviewed-by: Daniel Carvalho Maintainer: Anthony Gutierrez --- src/arch/alpha/locked_mem.hh | 4 ++-- src/arch/arm/locked_mem.hh | 4 ++-- src/arch/generic/locked_mem.hh | 4 ++-- src/arch/hsail/insts/mem.hh | 19 ++++++++++--------- src/arch/mips/locked_mem.hh | 4 ++-- src/arch/riscv/locked_mem.hh | 4 ++-- src/cpu/base_dyn_inst.hh | 12 ++++++------ src/cpu/checker/cpu.cc | 2 +- src/cpu/checker/cpu.hh | 4 ++-- src/cpu/minor/lsq.cc | 4 ++-- src/cpu/minor/lsq.hh | 2 +- src/cpu/o3/lsq_unit.hh | 8 ++++---- src/cpu/o3/lsq_unit_impl.hh | 2 +- src/cpu/simple/atomic.cc | 4 ++-- src/cpu/simple/base.cc | 2 +- src/cpu/simple/base.hh | 2 +- src/cpu/simple/timing.cc | 2 +- src/cpu/testers/directedtest/InvalidateGenerator.cc | 2 +- .../testers/directedtest/SeriesRequestGenerator.cc | 2 +- .../GarnetSyntheticTraffic.cc | 4 ++-- src/cpu/testers/memtest/memtest.cc | 4 ++-- src/cpu/testers/rubytest/Check.cc | 8 ++++---- src/cpu/testers/traffic_gen/base_gen.cc | 2 +- src/gpu-compute/compute_unit.cc | 4 ++-- src/gpu-compute/fetch_unit.cc | 2 +- src/gpu-compute/gpu_dyn_inst.hh | 2 +- src/gpu-compute/shader.cc | 2 +- src/mem/abstract_mem.cc | 4 ++-- src/mem/abstract_mem.hh | 6 +++--- src/mem/cache/base.cc | 4 ++-- src/mem/cache/cache.cc | 3 ++- src/mem/cache/prefetch/queued.cc | 2 +- 32 files changed, 68 insertions(+), 66 deletions(-) diff --git a/src/arch/alpha/locked_mem.hh b/src/arch/alpha/locked_mem.hh index 36a6a0333..a71a24cfb 100644 --- a/src/arch/alpha/locked_mem.hh +++ b/src/arch/alpha/locked_mem.hh @@ -85,7 +85,7 @@ handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask) template inline void -handleLockedRead(XC *xc, Request *req) +handleLockedRead(XC *xc, RequestPtr req) { xc->setMiscReg(MISCREG_LOCKADDR, req->getPaddr() & ~0xf); xc->setMiscReg(MISCREG_LOCKFLAG, true); @@ -99,7 +99,7 @@ handleLockedSnoopHit(XC *xc) template inline bool -handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) +handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask) { if (req->isUncacheable()) { // Funky Turbolaser mailbox access...don't update diff --git a/src/arch/arm/locked_mem.hh b/src/arch/arm/locked_mem.hh index 2fcbc4a92..d33978522 100644 --- a/src/arch/arm/locked_mem.hh +++ b/src/arch/arm/locked_mem.hh @@ -91,7 +91,7 @@ handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask) template inline void -handleLockedRead(XC *xc, Request *req) +handleLockedRead(XC *xc, RequestPtr req) { xc->setMiscReg(MISCREG_LOCKADDR, req->getPaddr()); xc->setMiscReg(MISCREG_LOCKFLAG, true); @@ -111,7 +111,7 @@ handleLockedSnoopHit(XC *xc) template inline bool -handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) +handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask) { if (req->isSwap()) return true; diff --git a/src/arch/generic/locked_mem.hh b/src/arch/generic/locked_mem.hh index 68a4ff540..f6537995b 100644 --- a/src/arch/generic/locked_mem.hh +++ b/src/arch/generic/locked_mem.hh @@ -63,7 +63,7 @@ handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask) template inline void -handleLockedRead(XC *xc, Request *req) +handleLockedRead(XC *xc, RequestPtr req) { } @@ -76,7 +76,7 @@ handleLockedSnoopHit(XC *xc) template inline bool -handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) +handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask) { return true; } diff --git a/src/arch/hsail/insts/mem.hh b/src/arch/hsail/insts/mem.hh index 36a6cbc79..68a61feea 100644 --- a/src/arch/hsail/insts/mem.hh +++ b/src/arch/hsail/insts/mem.hh @@ -461,9 +461,10 @@ namespace HsailISA *d = gpuDynInst->wavefront()->ldsChunk-> read(vaddr); } else { - Request *req = new Request(0, vaddr, sizeof(c0), 0, - gpuDynInst->computeUnit()->masterId(), - 0, gpuDynInst->wfDynId); + RequestPtr req = new Request(0, + vaddr, sizeof(c0), 0, + gpuDynInst->computeUnit()->masterId(), + 0, gpuDynInst->wfDynId); gpuDynInst->setRequestFlags(req); PacketPtr pkt = new Packet(req, MemCmd::ReadReq); @@ -588,7 +589,7 @@ namespace HsailISA gpuDynInst->statusBitVector = VectorMask(1); gpuDynInst->useContinuation = false; // create request - Request *req = new Request(0, 0, 0, 0, + RequestPtr req = new Request(0, 0, 0, 0, gpuDynInst->computeUnit()->masterId(), 0, gpuDynInst->wfDynId); req->setFlags(Request::ACQUIRE); @@ -1014,7 +1015,7 @@ namespace HsailISA gpuDynInst->execContinuation = &GPUStaticInst::execSt; gpuDynInst->useContinuation = true; // create request - Request *req = new Request(0, 0, 0, 0, + RequestPtr req = new Request(0, 0, 0, 0, gpuDynInst->computeUnit()->masterId(), 0, gpuDynInst->wfDynId); req->setFlags(Request::RELEASE); @@ -1065,7 +1066,7 @@ namespace HsailISA gpuDynInst->wavefront()->ldsChunk->write(vaddr, *d); } else { - Request *req = + RequestPtr req = new Request(0, vaddr, sizeof(c0), 0, gpuDynInst->computeUnit()->masterId(), 0, gpuDynInst->wfDynId); @@ -1488,7 +1489,7 @@ namespace HsailISA gpuDynInst->useContinuation = true; // create request - Request *req = new Request(0, 0, 0, 0, + RequestPtr req = new Request(0, 0, 0, 0, gpuDynInst->computeUnit()->masterId(), 0, gpuDynInst->wfDynId); req->setFlags(Request::RELEASE); @@ -1620,7 +1621,7 @@ namespace HsailISA "type.\n"); } } else { - Request *req = + RequestPtr req = new Request(0, vaddr, sizeof(c0), 0, gpuDynInst->computeUnit()->masterId(), 0, gpuDynInst->wfDynId, @@ -1675,7 +1676,7 @@ namespace HsailISA // the acquire completes gpuDynInst->useContinuation = false; // create request - Request *req = new Request(0, 0, 0, 0, + RequestPtr req = new Request(0, 0, 0, 0, gpuDynInst->computeUnit()->masterId(), 0, gpuDynInst->wfDynId); req->setFlags(Request::ACQUIRE); diff --git a/src/arch/mips/locked_mem.hh b/src/arch/mips/locked_mem.hh index 5c1e60aa1..7fa1642a8 100644 --- a/src/arch/mips/locked_mem.hh +++ b/src/arch/mips/locked_mem.hh @@ -75,7 +75,7 @@ handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask) template inline void -handleLockedRead(XC *xc, Request *req) +handleLockedRead(XC *xc, RequestPtr req) { xc->setMiscReg(MISCREG_LLADDR, req->getPaddr() & ~0xf); xc->setMiscReg(MISCREG_LLFLAG, true); @@ -92,7 +92,7 @@ handleLockedSnoopHit(XC *xc) template inline bool -handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) +handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask) { if (req->isUncacheable()) { // Funky Turbolaser mailbox access...don't update diff --git a/src/arch/riscv/locked_mem.hh b/src/arch/riscv/locked_mem.hh index 61fbe0de1..1583258a8 100644 --- a/src/arch/riscv/locked_mem.hh +++ b/src/arch/riscv/locked_mem.hh @@ -82,7 +82,7 @@ handleLockedSnoop(XC *xc, PacketPtr pkt, Addr cacheBlockMask) template inline void -handleLockedRead(XC *xc, Request *req) +handleLockedRead(XC *xc, RequestPtr req) { locked_addrs.push(req->getPaddr() & ~0xF); DPRINTF(LLSC, "[cid:%d]: Reserved address %x.\n", @@ -94,7 +94,7 @@ handleLockedSnoopHit(XC *xc) {} template inline bool -handleLockedWrite(XC *xc, Request *req, Addr cacheBlockMask) +handleLockedWrite(XC *xc, RequestPtr req, Addr cacheBlockMask) { // Normally RISC-V uses zero to indicate success and nonzero to indicate // failure (right now only 1 is reserved), but in gem5 zero indicates diff --git a/src/cpu/base_dyn_inst.hh b/src/cpu/base_dyn_inst.hh index ae408e3fb..e94f500ea 100644 --- a/src/cpu/base_dyn_inst.hh +++ b/src/cpu/base_dyn_inst.hh @@ -893,9 +893,9 @@ BaseDynInst::initiateMemRead(Addr addr, unsigned size, Request::Flags flags) { instFlags[ReqMade] = true; - Request *req = NULL; - Request *sreqLow = NULL; - Request *sreqHigh = NULL; + RequestPtr req = NULL; + RequestPtr sreqLow = NULL; + RequestPtr sreqHigh = NULL; if (instFlags[ReqMade] && translationStarted()) { req = savedReq; @@ -949,9 +949,9 @@ BaseDynInst::writeMem(uint8_t *data, unsigned size, Addr addr, traceData->setMem(addr, size, flags); instFlags[ReqMade] = true; - Request *req = NULL; - Request *sreqLow = NULL; - Request *sreqHigh = NULL; + RequestPtr req = NULL; + RequestPtr sreqLow = NULL; + RequestPtr sreqHigh = NULL; if (instFlags[ReqMade] && translationStarted()) { req = savedReq; diff --git a/src/cpu/checker/cpu.cc b/src/cpu/checker/cpu.cc index 07b655399..1533d7405 100644 --- a/src/cpu/checker/cpu.cc +++ b/src/cpu/checker/cpu.cc @@ -337,7 +337,7 @@ CheckerCPU::dbg_vtophys(Addr addr) * Checks if the flags set by the Checker and Checkee match. */ bool -CheckerCPU::checkFlags(Request *unverified_req, Addr vAddr, +CheckerCPU::checkFlags(RequestPtr unverified_req, Addr vAddr, Addr pAddr, int flags) { Addr unverifiedVAddr = unverified_req->getVaddr(); diff --git a/src/cpu/checker/cpu.hh b/src/cpu/checker/cpu.hh index f79aa0864..101a16be6 100644 --- a/src/cpu/checker/cpu.hh +++ b/src/cpu/checker/cpu.hh @@ -531,7 +531,7 @@ class CheckerCPU : public BaseCPU, public ExecContext dumpAndExit(); } - bool checkFlags(Request *unverified_req, Addr vAddr, + bool checkFlags(RequestPtr unverified_req, Addr vAddr, Addr pAddr, int flags); void dumpAndExit(); @@ -540,7 +540,7 @@ class CheckerCPU : public BaseCPU, public ExecContext SimpleThread *threadBase() { return thread; } InstResult unverifiedResult; - Request *unverifiedReq; + RequestPtr unverifiedReq; uint8_t *unverifiedMemData; bool changedPC; diff --git a/src/cpu/minor/lsq.cc b/src/cpu/minor/lsq.cc index cb0611be3..822df0294 100644 --- a/src/cpu/minor/lsq.cc +++ b/src/cpu/minor/lsq.cc @@ -423,7 +423,7 @@ LSQ::SplitDataRequest::makeFragmentRequests() } } - Request *fragment = new Request(); + RequestPtr fragment = new Request(); fragment->setContext(request.contextId()); fragment->setVirt(0 /* asid */, @@ -452,7 +452,7 @@ LSQ::SplitDataRequest::makeFragmentPackets() for (unsigned int fragment_index = 0; fragment_index < numFragments; fragment_index++) { - Request *fragment = fragmentRequests[fragment_index]; + RequestPtr fragment = fragmentRequests[fragment_index]; DPRINTFS(MinorMem, (&port), "Making packet %d for request: %s" " (%d, 0x%x)\n", diff --git a/src/cpu/minor/lsq.hh b/src/cpu/minor/lsq.hh index d4973f5a3..9ee40f5d3 100644 --- a/src/cpu/minor/lsq.hh +++ b/src/cpu/minor/lsq.hh @@ -399,7 +399,7 @@ class LSQ : public Named /** Fragment Requests corresponding to the address ranges of * each fragment */ - std::vector fragmentRequests; + std::vector fragmentRequests; /** Packets matching fragmentRequests to issue fragments to memory */ std::vector fragmentPackets; diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh index a2813b3dc..a7a095c82 100644 --- a/src/cpu/o3/lsq_unit.hh +++ b/src/cpu/o3/lsq_unit.hh @@ -510,11 +510,11 @@ class LSQUnit { public: /** Executes the load at the given index. */ - Fault read(Request *req, Request *sreqLow, Request *sreqHigh, + Fault read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh, int load_idx); /** Executes the store at the given index. */ - Fault write(Request *req, Request *sreqLow, Request *sreqHigh, + Fault write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh, uint8_t *data, int store_idx); /** Returns the index of the head load instruction. */ @@ -549,7 +549,7 @@ class LSQUnit { template Fault -LSQUnit::read(Request *req, Request *sreqLow, Request *sreqHigh, +LSQUnit::read(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh, int load_idx) { DynInstPtr load_inst = loadQueue[load_idx]; @@ -883,7 +883,7 @@ LSQUnit::read(Request *req, Request *sreqLow, Request *sreqHigh, template Fault -LSQUnit::write(Request *req, Request *sreqLow, Request *sreqHigh, +LSQUnit::write(RequestPtr req, RequestPtr sreqLow, RequestPtr sreqHigh, uint8_t *data, int store_idx) { assert(storeQueue[store_idx].inst); diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh index ca6a7f399..e8e2c1853 100644 --- a/src/cpu/o3/lsq_unit_impl.hh +++ b/src/cpu/o3/lsq_unit_impl.hh @@ -831,7 +831,7 @@ LSQUnit::writebackStores() DynInstPtr inst = storeQueue[storeWBIdx].inst; - Request *req = storeQueue[storeWBIdx].req; + RequestPtr req = storeQueue[storeWBIdx].req; RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow; RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh; diff --git a/src/cpu/simple/atomic.cc b/src/cpu/simple/atomic.cc index 7a368ab32..0e7c59f6a 100644 --- a/src/cpu/simple/atomic.cc +++ b/src/cpu/simple/atomic.cc @@ -331,7 +331,7 @@ AtomicSimpleCPU::readMem(Addr addr, uint8_t * data, unsigned size, SimpleThread* thread = t_info.thread; // use the CPU's statically allocated read request and packet objects - Request *req = &data_read_req; + RequestPtr req = &data_read_req; if (traceData) traceData->setMem(addr, size, flags); @@ -435,7 +435,7 @@ AtomicSimpleCPU::writeMem(uint8_t *data, unsigned size, Addr addr, } // use the CPU's statically allocated write request and packet objects - Request *req = &data_write_req; + RequestPtr req = &data_write_req; if (traceData) traceData->setMem(addr, size, flags); diff --git a/src/cpu/simple/base.cc b/src/cpu/simple/base.cc index 36a2cb06c..025c7a3ea 100644 --- a/src/cpu/simple/base.cc +++ b/src/cpu/simple/base.cc @@ -468,7 +468,7 @@ BaseSimpleCPU::checkForInterrupts() void -BaseSimpleCPU::setupFetchRequest(Request *req) +BaseSimpleCPU::setupFetchRequest(RequestPtr req) { SimpleExecContext &t_info = *threadInfo[curThread]; SimpleThread* thread = t_info.thread; diff --git a/src/cpu/simple/base.hh b/src/cpu/simple/base.hh index 15ab2aba4..64fa58d92 100644 --- a/src/cpu/simple/base.hh +++ b/src/cpu/simple/base.hh @@ -129,7 +129,7 @@ class BaseSimpleCPU : public BaseCPU void checkForInterrupts(); - void setupFetchRequest(Request *req); + void setupFetchRequest(RequestPtr req); void preExecute(); void postExecute(); void advancePC(const Fault &fault); diff --git a/src/cpu/simple/timing.cc b/src/cpu/simple/timing.cc index 083de2b40..657c2976f 100644 --- a/src/cpu/simple/timing.cc +++ b/src/cpu/simple/timing.cc @@ -620,7 +620,7 @@ TimingSimpleCPU::fetch() if (needToFetch) { _status = BaseSimpleCPU::Running; - Request *ifetch_req = new Request(); + RequestPtr ifetch_req = new Request(); ifetch_req->taskId(taskId()); ifetch_req->setContext(thread->contextId()); setupFetchRequest(ifetch_req); diff --git a/src/cpu/testers/directedtest/InvalidateGenerator.cc b/src/cpu/testers/directedtest/InvalidateGenerator.cc index c5c48f1ad..3319e8400 100644 --- a/src/cpu/testers/directedtest/InvalidateGenerator.cc +++ b/src/cpu/testers/directedtest/InvalidateGenerator.cc @@ -60,7 +60,7 @@ InvalidateGenerator::initiate() Packet::Command cmd; // For simplicity, requests are assumed to be 1 byte-sized - Request *req = new Request(m_address, 1, flags, masterId); + RequestPtr req = new Request(m_address, 1, flags, masterId); // // Based on the current state, issue a load or a store diff --git a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc index 386a49893..17ae04cdf 100644 --- a/src/cpu/testers/directedtest/SeriesRequestGenerator.cc +++ b/src/cpu/testers/directedtest/SeriesRequestGenerator.cc @@ -60,7 +60,7 @@ SeriesRequestGenerator::initiate() Request::Flags flags; // For simplicity, requests are assumed to be 1 byte-sized - Request *req = new Request(m_address, 1, flags, masterId); + RequestPtr req = new Request(m_address, 1, flags, masterId); Packet::Command cmd; bool do_write = (random_mt.random(0, 100) < m_percent_writes); diff --git a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc index 56edd842b..be1921aad 100644 --- a/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc +++ b/src/cpu/testers/garnet_synthetic_traffic/GarnetSyntheticTraffic.cc @@ -129,7 +129,7 @@ GarnetSyntheticTraffic::init() void GarnetSyntheticTraffic::completeRequest(PacketPtr pkt) { - Request *req = pkt->req; + RequestPtr req = pkt->req; DPRINTF(GarnetSyntheticTraffic, "Completed injection of %s packet for address %x\n", @@ -279,7 +279,7 @@ GarnetSyntheticTraffic::generatePkt() // MemCmd::Command requestType; - Request *req = nullptr; + RequestPtr req = nullptr; Request::Flags flags; // Inject in specific Vnet diff --git a/src/cpu/testers/memtest/memtest.cc b/src/cpu/testers/memtest/memtest.cc index ccd978c94..89b4d1159 100644 --- a/src/cpu/testers/memtest/memtest.cc +++ b/src/cpu/testers/memtest/memtest.cc @@ -136,7 +136,7 @@ MemTest::getMasterPort(const std::string &if_name, PortID idx) void MemTest::completeRequest(PacketPtr pkt, bool functional) { - Request *req = pkt->req; + RequestPtr req = pkt->req; assert(req->getSize() == 1); // this address is no longer outstanding @@ -246,7 +246,7 @@ MemTest::tick() bool do_functional = (random_mt.random(0, 100) < percentFunctional) && !uncacheable; - Request *req = new Request(paddr, 1, flags, masterId); + RequestPtr req = new Request(paddr, 1, flags, masterId); req->setContext(id); outstandingAddrs.insert(paddr); diff --git a/src/cpu/testers/rubytest/Check.cc b/src/cpu/testers/rubytest/Check.cc index 2ce79e72d..776d711a2 100644 --- a/src/cpu/testers/rubytest/Check.cc +++ b/src/cpu/testers/rubytest/Check.cc @@ -107,7 +107,7 @@ Check::initiatePrefetch() } // Prefetches are assumed to be 0 sized - Request *req = new Request(m_address, 0, flags, + RequestPtr req = new Request(m_address, 0, flags, m_tester_ptr->masterId(), curTick(), m_pc); req->setContext(index); @@ -146,7 +146,7 @@ Check::initiateFlush() Request::Flags flags; - Request *req = new Request(m_address, CHECK_SIZE, flags, + RequestPtr req = new Request(m_address, CHECK_SIZE, flags, m_tester_ptr->masterId(), curTick(), m_pc); Packet::Command cmd; @@ -179,7 +179,7 @@ Check::initiateAction() Addr writeAddr(m_address + m_store_count); // Stores are assumed to be 1 byte-sized - Request *req = new Request(writeAddr, 1, flags, m_tester_ptr->masterId(), + RequestPtr req = new Request(writeAddr, 1, flags, m_tester_ptr->masterId(), curTick(), m_pc); req->setContext(index); @@ -244,7 +244,7 @@ Check::initiateCheck() } // Checks are sized depending on the number of bytes written - Request *req = new Request(m_address, CHECK_SIZE, flags, + RequestPtr req = new Request(m_address, CHECK_SIZE, flags, m_tester_ptr->masterId(), curTick(), m_pc); req->setContext(index); diff --git a/src/cpu/testers/traffic_gen/base_gen.cc b/src/cpu/testers/traffic_gen/base_gen.cc index cd568f151..b5b4f5817 100644 --- a/src/cpu/testers/traffic_gen/base_gen.cc +++ b/src/cpu/testers/traffic_gen/base_gen.cc @@ -59,7 +59,7 @@ BaseGen::getPacket(Addr addr, unsigned size, const MemCmd& cmd, Request::FlagsType flags) { // Create new request - Request *req = new Request(addr, size, flags, masterID); + RequestPtr req = new Request(addr, size, flags, masterID); // Dummy PC to have PC-based prefetchers latch on; get entropy into higher // bits req->setPC(((Addr)masterID) << 2); diff --git a/src/gpu-compute/compute_unit.cc b/src/gpu-compute/compute_unit.cc index aa4f0a322..042347cf2 100644 --- a/src/gpu-compute/compute_unit.cc +++ b/src/gpu-compute/compute_unit.cc @@ -1178,7 +1178,7 @@ ComputeUnit::DTLBPort::recvTimingResp(PacketPtr pkt) if (!stride) break; - Request *prefetch_req = new Request(0, vaddr + stride * pf * + RequestPtr prefetch_req = new Request(0, vaddr + stride * pf * TheISA::PageBytes, sizeof(uint8_t), 0, computeUnit->masterId(), @@ -1801,7 +1801,7 @@ ComputeUnit::sendToLds(GPUDynInstPtr gpuDynInst) { // this is just a request to carry the GPUDynInstPtr // back and forth - Request *newRequest = new Request(); + RequestPtr newRequest = new Request(); newRequest->setPaddr(0x0); // ReadReq is not evaluted by the LDS but the Packet ctor requires this diff --git a/src/gpu-compute/fetch_unit.cc b/src/gpu-compute/fetch_unit.cc index c989d6748..36ef1e1e8 100644 --- a/src/gpu-compute/fetch_unit.cc +++ b/src/gpu-compute/fetch_unit.cc @@ -145,7 +145,7 @@ FetchUnit::initiateFetch(Wavefront *wavefront) } // set up virtual request - Request *req = new Request(0, vaddr, size, Request::INST_FETCH, + RequestPtr req = new Request(0, vaddr, size, Request::INST_FETCH, computeUnit->masterId(), 0, 0, 0); PacketPtr pkt = new Packet(req, MemCmd::ReadReq); diff --git a/src/gpu-compute/gpu_dyn_inst.hh b/src/gpu-compute/gpu_dyn_inst.hh index 8d259cba0..4b1c9fde9 100644 --- a/src/gpu-compute/gpu_dyn_inst.hh +++ b/src/gpu-compute/gpu_dyn_inst.hh @@ -382,7 +382,7 @@ class GPUDynInst : public GPUExecContext } void - setRequestFlags(Request *req, bool setMemOrder=true) + setRequestFlags(RequestPtr req, bool setMemOrder=true) { // currently these are the easy scopes to deduce if (isPrivateSeg()) { diff --git a/src/gpu-compute/shader.cc b/src/gpu-compute/shader.cc index d3453c2f9..8e7ba9ad5 100644 --- a/src/gpu-compute/shader.cc +++ b/src/gpu-compute/shader.cc @@ -338,7 +338,7 @@ Shader::AccessMem(uint64_t address, void *ptr, uint32_t size, int cu_id, for (ChunkGenerator gen(address, size, cuList.at(cu_id)->cacheLineSize()); !gen.done(); gen.next()) { - Request *req = new Request(0, gen.addr(), gen.size(), 0, + RequestPtr req = new Request(0, gen.addr(), gen.size(), 0, cuList[0]->masterId(), 0, 0, 0); doFunctionalAccess(req, cmd, data_buf, suppress_func_errors, cu_id); diff --git a/src/mem/abstract_mem.cc b/src/mem/abstract_mem.cc index b41c82b0d..1d112dc06 100644 --- a/src/mem/abstract_mem.cc +++ b/src/mem/abstract_mem.cc @@ -199,7 +199,7 @@ AbstractMemory::getAddrRange() const void AbstractMemory::trackLoadLocked(PacketPtr pkt) { - Request *req = pkt->req; + RequestPtr req = pkt->req; Addr paddr = LockedAddr::mask(req->getPaddr()); // first we check if we already have a locked addr for this @@ -230,7 +230,7 @@ AbstractMemory::trackLoadLocked(PacketPtr pkt) bool AbstractMemory::checkLockedAddrList(PacketPtr pkt) { - Request *req = pkt->req; + RequestPtr req = pkt->req; Addr paddr = LockedAddr::mask(req->getPaddr()); bool isLLSC = pkt->isLLSC(); diff --git a/src/mem/abstract_mem.hh b/src/mem/abstract_mem.hh index b57f73b4a..29c8c3f3e 100644 --- a/src/mem/abstract_mem.hh +++ b/src/mem/abstract_mem.hh @@ -79,12 +79,12 @@ class LockedAddr { static Addr mask(Addr paddr) { return (paddr & ~Addr_Mask); } // check for matching execution context - bool matchesContext(Request *req) const + bool matchesContext(RequestPtr req) const { return (contextId == req->contextId()); } - LockedAddr(Request *req) : addr(mask(req->getPaddr())), + LockedAddr(RequestPtr req) : addr(mask(req->getPaddr())), contextId(req->contextId()) {} @@ -140,7 +140,7 @@ class AbstractMemory : public MemObject // this method must be called on *all* stores since even // non-conditional stores must clear any matching lock addresses. bool writeOK(PacketPtr pkt) { - Request *req = pkt->req; + RequestPtr req = pkt->req; if (lockedAddrList.empty()) { // no locked addrs: nothing to check, store_conditional fails bool isLLSC = pkt->isLLSC(); diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc index a8f29e370..f753cc315 100644 --- a/src/mem/cache/base.cc +++ b/src/mem/cache/base.cc @@ -1278,7 +1278,7 @@ BaseCache::writebackBlk(CacheBlk *blk) writebacks[Request::wbMasterId]++; - Request *req = new Request(regenerateBlkAddr(blk), blkSize, 0, + RequestPtr req = new Request(regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); if (blk->isSecure()) req->setFlags(Request::SECURE); @@ -1313,7 +1313,7 @@ BaseCache::writebackBlk(CacheBlk *blk) PacketPtr BaseCache::writecleanBlk(CacheBlk *blk, Request::Flags dest, PacketId id) { - Request *req = new Request(regenerateBlkAddr(blk), blkSize, 0, + RequestPtr req = new Request(regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); if (blk->isSecure()) { req->setFlags(Request::SECURE); diff --git a/src/mem/cache/cache.cc b/src/mem/cache/cache.cc index 34f3dc5b9..86c1640e5 100644 --- a/src/mem/cache/cache.cc +++ b/src/mem/cache/cache.cc @@ -872,7 +872,8 @@ Cache::cleanEvictBlk(CacheBlk *blk) assert(!writebackClean); assert(blk && blk->isValid() && !blk->isDirty()); // Creating a zero sized write, a message to the snoop filter - Request *req = + + RequestPtr req = new Request(regenerateBlkAddr(blk), blkSize, 0, Request::wbMasterId); if (blk->isSecure()) diff --git a/src/mem/cache/prefetch/queued.cc b/src/mem/cache/prefetch/queued.cc index bf3a384ea..00d62f17f 100644 --- a/src/mem/cache/prefetch/queued.cc +++ b/src/mem/cache/prefetch/queued.cc @@ -223,7 +223,7 @@ QueuedPrefetcher::insert(AddrPriority &pf_info, bool is_secure) } /* Create a prefetch memory request */ - Request *pf_req = + RequestPtr pf_req = new Request(pf_info.first, blkSize, 0, masterId); if (is_secure) { -- cgit v1.2.3