summaryrefslogtreecommitdiff
path: root/src/gpu-compute
diff options
context:
space:
mode:
authorGabe Black <gabeblack@google.com>2018-01-08 23:37:57 -0800
committerGabe Black <gabeblack@google.com>2018-01-23 20:39:17 +0000
commita4e722725c90677d555675eca616c9d0990393f1 (patch)
treeed9a8268f73742fd4b4acbaf8e8434b67dc5fed7 /src/gpu-compute
parentdb8c55dede65e07cb9ea8e95c48badd2ea24462f (diff)
downloadgem5-a4e722725c90677d555675eca616c9d0990393f1.tar.xz
tarch, mem: Abstract the data stored in the SE page tables.
Rather than store the actual TLB entry that corresponds to a mapping, we can just store some abstracted information (address, a few flags) and then let the caller turn that into the appropriate entry. There could potentially be some small amount of overhead from creating entries vs. storing them and just installing them, but it's likely pretty minimal since that only happens on a TLB miss (ideally rare), and, if it is problematic, there could be some preallocated TLB entries which are just minimally filled in as necessary. This has the nice effect of finally making the page tables ISA agnostic. Change-Id: I11e630f60682f0a0029b0683eb8ff0135fbd4317 Reviewed-on: https://gem5-review.googlesource.com/7350 Reviewed-by: Gabe Black <gabeblack@google.com> Maintainer: Gabe Black <gabeblack@google.com>
Diffstat (limited to 'src/gpu-compute')
-rw-r--r--src/gpu-compute/gpu_tlb.cc50
1 files changed, 26 insertions, 24 deletions
diff --git a/src/gpu-compute/gpu_tlb.cc b/src/gpu-compute/gpu_tlb.cc
index 9cbf3e8fa..05d22dad6 100644
--- a/src/gpu-compute/gpu_tlb.cc
+++ b/src/gpu-compute/gpu_tlb.cc
@@ -808,18 +808,19 @@ namespace X86ISA
"at pc %#x.\n", vaddr, tc->instAddr());
Process *p = tc->getProcessPtr();
- TlbEntry *newEntry = p->pTable->lookup(vaddr);
+ const EmulationPageTable::Entry *pte =
+ p->pTable->lookup(vaddr);
- if (!newEntry && mode != BaseTLB::Execute) {
+ if (!pte && mode != BaseTLB::Execute) {
// penalize a "page fault" more
if (timing)
latency += missLatency2;
if (p->fixupStackFault(vaddr))
- newEntry = p->pTable->lookup(vaddr);
+ pte = p->pTable->lookup(vaddr);
}
- if (!newEntry) {
+ if (!pte) {
return std::make_shared<PageFault>(vaddr, true,
mode, true,
false);
@@ -827,11 +828,11 @@ namespace X86ISA
Addr alignedVaddr = p->pTable->pageAlign(vaddr);
DPRINTF(GPUTLB, "Mapping %#x to %#x\n",
- alignedVaddr, newEntry->pageStart());
+ alignedVaddr, pte->paddr);
- GpuTlbEntry gpuEntry;
- *(TlbEntry *)&gpuEntry = *newEntry;
- gpuEntry.valid = true;
+ GpuTlbEntry gpuEntry(
+ p->pTable->pid(), alignedVaddr,
+ pte->paddr, true);
entry = insert(alignedVaddr, gpuEntry);
}
@@ -1335,18 +1336,18 @@ namespace X86ISA
Addr alignedVaddr = p->pTable->pageAlign(vaddr);
assert(alignedVaddr == virtPageAddr);
#endif
- TlbEntry *newEntry = p->pTable->lookup(vaddr);
- if (!newEntry && sender_state->tlbMode != BaseTLB::Execute &&
+ const EmulationPageTable::Entry *pte = p->pTable->lookup(vaddr);
+ if (!pte && sender_state->tlbMode != BaseTLB::Execute &&
p->fixupStackFault(vaddr)) {
- newEntry = p->pTable->lookup(vaddr);
+ pte = p->pTable->lookup(vaddr);
}
- if (newEntry) {
+ if (pte) {
DPRINTF(GPUTLB, "Mapping %#x to %#x\n", alignedVaddr,
- newEntry->pageStart());
+ pte->paddr);
sender_state->tlbEntry =
- new GpuTlbEntry(0, newEntry->vaddr, newEntry->paddr, true);
+ new GpuTlbEntry(0, virtPageAddr, pte->paddr, true);
} else {
sender_state->tlbEntry =
new GpuTlbEntry(0, 0, 0, false);
@@ -1533,10 +1534,11 @@ namespace X86ISA
assert(alignedVaddr == virt_page_addr);
#endif
- TlbEntry *newEntry = p->pTable->lookup(vaddr);
- if (!newEntry && sender_state->tlbMode != BaseTLB::Execute &&
+ const EmulationPageTable::Entry *pte =
+ p->pTable->lookup(vaddr);
+ if (!pte && sender_state->tlbMode != BaseTLB::Execute &&
p->fixupStackFault(vaddr)) {
- newEntry = p->pTable->lookup(vaddr);
+ pte = p->pTable->lookup(vaddr);
}
if (!sender_state->prefetch) {
@@ -1545,23 +1547,23 @@ namespace X86ISA
assert(success);
DPRINTF(GPUTLB, "Mapping %#x to %#x\n", alignedVaddr,
- newEntry->pageStart());
+ pte->paddr);
sender_state->tlbEntry =
- new GpuTlbEntry(0, newEntry->vaddr,
- newEntry->paddr, success);
+ new GpuTlbEntry(0, virt_page_addr,
+ pte->paddr, success);
} else {
// If this was a prefetch, then do the normal thing if it
// was a successful translation. Otherwise, send an empty
// TLB entry back so that it can be figured out as empty and
// handled accordingly.
- if (newEntry) {
+ if (pte) {
DPRINTF(GPUTLB, "Mapping %#x to %#x\n", alignedVaddr,
- newEntry->pageStart());
+ pte->paddr);
sender_state->tlbEntry =
- new GpuTlbEntry(0, newEntry->vaddr,
- newEntry->paddr, success);
+ new GpuTlbEntry(0, virt_page_addr,
+ pte->paddr, success);
} else {
DPRINTF(GPUPrefetch, "Prefetch failed %#x\n",
alignedVaddr);