diff options
author | Tony Gutierrez <anthony.gutierrez@amd.com> | 2016-10-26 22:47:11 -0400 |
---|---|---|
committer | Tony Gutierrez <anthony.gutierrez@amd.com> | 2016-10-26 22:47:11 -0400 |
commit | 7ac38849abaf6aeccf39137bc8acb9e44d192e82 (patch) | |
tree | 7658e9d741604b310f871756cf051558b30e115e /src/gpu-compute/global_memory_pipeline.cc | |
parent | e1ad8035a379cea98ecef92e78d2894f60b2eedd (diff) | |
download | gem5-7ac38849abaf6aeccf39137bc8acb9e44d192e82.tar.xz |
gpu-compute: remove inst enums and use bit flag for attributes
this patch removes the GPUStaticInst enums that were defined in GPU.py.
instead, a simple set of attribute flags that can be set in the base
instruction class are used. this will help unify the attributes of HSAIL
and machine ISA instructions within the model itself.
because the static instrution now carries the attributes, a GPUDynInst
must carry a pointer to a valid GPUStaticInst so a new static kernel launch
instruction is added, which carries the attributes needed to perform a
the kernel launch.
Diffstat (limited to 'src/gpu-compute/global_memory_pipeline.cc')
-rw-r--r-- | src/gpu-compute/global_memory_pipeline.cc | 23 |
1 files changed, 9 insertions, 14 deletions
diff --git a/src/gpu-compute/global_memory_pipeline.cc b/src/gpu-compute/global_memory_pipeline.cc index 102905ec8..ab3e8c47e 100644 --- a/src/gpu-compute/global_memory_pipeline.cc +++ b/src/gpu-compute/global_memory_pipeline.cc @@ -67,7 +67,7 @@ GlobalMemPipeline::exec() bool accessVrf = true; // check the VRF to see if the operands of a load (or load component // of an atomic) are accessible - if ((m) && (m->m_op==Enums::MO_LD || MO_A(m->m_op))) { + if ((m) && (m->isLoad() || m->isAtomicRet())) { Wavefront *w = computeUnit->wfList[m->simdId][m->wfSlotId]; accessVrf = @@ -127,10 +127,7 @@ GlobalMemPipeline::exec() // memory packets to DTLB if (!gmIssuedRequests.empty()) { GPUDynInstPtr mp = gmIssuedRequests.front(); - if (mp->m_op == Enums::MO_LD || - (mp->m_op >= Enums::MO_AAND && mp->m_op <= Enums::MO_AMIN) || - (mp->m_op >= Enums::MO_ANRAND && mp->m_op <= Enums::MO_ANRMIN)) { - + if (mp->isLoad() || mp->isAtomic()) { if (inflightLoads >= gmQueueSize) { return; } else { @@ -139,7 +136,7 @@ GlobalMemPipeline::exec() } else { if (inflightStores >= gmQueueSize) { return; - } else if (mp->m_op == Enums::MO_ST) { + } else if (mp->isStore()) { ++inflightStores; } } @@ -147,9 +144,8 @@ GlobalMemPipeline::exec() mp->initiateAcc(mp); gmIssuedRequests.pop(); - DPRINTF(GPUMem, "CU%d: WF[%d][%d] Popping 0 mem_op = %s\n", - computeUnit->cu_id, mp->simdId, mp->wfSlotId, - Enums::MemOpTypeStrings[mp->m_op]); + DPRINTF(GPUMem, "CU%d: WF[%d][%d] Popping 0 mem_op = \n", + computeUnit->cu_id, mp->simdId, mp->wfSlotId); } } @@ -160,12 +156,12 @@ GlobalMemPipeline::doGmReturn(GPUDynInstPtr m) Wavefront *w = computeUnit->wfList[m->simdId][m->wfSlotId]; // Return data to registers - if (m->m_op == Enums::MO_LD || MO_A(m->m_op) || MO_ANR(m->m_op)) { + if (m->isLoad() || m->isAtomic()) { gmReturnedLoads.pop(); assert(inflightLoads > 0); --inflightLoads; - if (m->m_op == Enums::MO_LD || MO_A(m->m_op)) { + if (m->isLoad() || m->isAtomicRet()) { std::vector<uint32_t> regVec; // iterate over number of destination register operands since // this is a load or atomic operation @@ -214,13 +210,12 @@ GlobalMemPipeline::doGmReturn(GPUDynInstPtr m) // Decrement outstanding register count computeUnit->shader->ScheduleAdd(&w->outstandingReqs, m->time, -1); - if (m->m_op == Enums::MO_ST || MO_A(m->m_op) || MO_ANR(m->m_op) || - MO_H(m->m_op)) { + if (m->isStore() || m->isAtomic()) { computeUnit->shader->ScheduleAdd(&w->outstandingReqsWrGm, m->time, -1); } - if (m->m_op == Enums::MO_LD || MO_A(m->m_op) || MO_ANR(m->m_op)) { + if (m->isLoad() || m->isAtomic()) { computeUnit->shader->ScheduleAdd(&w->outstandingReqsRdGm, m->time, -1); } |