summaryrefslogtreecommitdiff
path: root/src/cpu/o3
diff options
context:
space:
mode:
authorGabor Dozsa <gabor.dozsa@arm.com>2020-01-06 10:55:36 +0000
committerGiacomo Travaglini <giacomo.travaglini@arm.com>2020-01-07 17:47:43 +0000
commit6816e3e39fd5a23098d9997062e7c35952ad95dd (patch)
tree8cb946d2a2c6772ef37096d0d3d2ebe2d8c58fef /src/cpu/o3
parente018030c237b827ae5766a0e1e89f3eed5fd155a (diff)
downloadgem5-6816e3e39fd5a23098d9997062e7c35952ad95dd.tar.xz
cpu: Use enums for O3CPU store value forwarding
This is aligning with MinorCPU, where an enum is tagging a Full, Partial and No address coverage. Change-Id: I0e0ba9b88c6f08c04430859e88135c61c56e6884 Reviewed-on: https://gem5-review.googlesource.com/c/public/gem5/+/23951 Reviewed-by: Bobby R. Bruce <bbruce@ucdavis.edu> Reviewed-by: Jason Lowe-Power <jason@lowepower.com> Maintainer: Jason Lowe-Power <jason@lowepower.com> Tested-by: kokoro <noreply+kokoro@google.com>
Diffstat (limited to 'src/cpu/o3')
-rw-r--r--src/cpu/o3/lsq_unit.hh54
1 files changed, 35 insertions, 19 deletions
diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh
index cd512ced7..7c3e0e026 100644
--- a/src/cpu/o3/lsq_unit.hh
+++ b/src/cpu/o3/lsq_unit.hh
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014,2017-2018 ARM Limited
+ * Copyright (c) 2012-2014,2017-2018,2020 ARM Limited
* All rights reserved
*
* The license below extends only to copyright in the software and shall
@@ -209,6 +209,14 @@ class LSQUnit
};
using LQEntry = LSQEntry;
+ /** Coverage of one address range with another */
+ enum class AddrRangeCoverage
+ {
+ PartialAddrRangeCoverage, /* Two ranges partly overlap */
+ FullAddrRangeCoverage, /* One range fully covers another */
+ NoAddrRangeCoverage /* Two ranges are disjoint */
+ };
+
public:
using LoadQueue = CircularQueue<LQEntry>;
using StoreQueue = CircularQueue<SQEntry>;
@@ -707,6 +715,8 @@ LSQUnit<Impl>::read(LSQRequest *req, int load_idx)
bool lower_load_has_store_part = req_s < st_e;
bool upper_load_has_store_part = req_e > st_s;
+ auto coverage = AddrRangeCoverage::NoAddrRangeCoverage;
+
// If the store entry is not atomic (atomic does not have valid
// data), the store has all of the data needed, and
// the load is not LLSC, then
@@ -715,6 +725,29 @@ LSQUnit<Impl>::read(LSQRequest *req, int load_idx)
store_has_lower_limit && store_has_upper_limit &&
!req->mainRequest()->isLLSC()) {
+ coverage = AddrRangeCoverage::FullAddrRangeCoverage;
+ } else if (
+ // This is the partial store-load forwarding case where a store
+ // has only part of the load's data and the load isn't LLSC
+ (!req->mainRequest()->isLLSC() &&
+ ((store_has_lower_limit && lower_load_has_store_part) ||
+ (store_has_upper_limit && upper_load_has_store_part) ||
+ (lower_load_has_store_part && upper_load_has_store_part))) ||
+ // The load is LLSC, and the store has all or part of the
+ // load's data
+ (req->mainRequest()->isLLSC() &&
+ ((store_has_lower_limit || upper_load_has_store_part) &&
+ (store_has_upper_limit || lower_load_has_store_part))) ||
+ // The store entry is atomic and has all or part of the load's
+ // data
+ (store_it->instruction()->isAtomic() &&
+ ((store_has_lower_limit || upper_load_has_store_part) &&
+ (store_has_upper_limit || lower_load_has_store_part)))) {
+
+ coverage = AddrRangeCoverage::PartialAddrRangeCoverage;
+ }
+
+ if (coverage == AddrRangeCoverage::FullAddrRangeCoverage) {
// Get shift amount for offset into the store's data.
int shift_amt = req->mainRequest()->getVaddr() -
store_it->instruction()->effAddr;
@@ -761,24 +794,7 @@ LSQUnit<Impl>::read(LSQRequest *req, int load_idx)
++lsqForwLoads;
return NoFault;
- } else if (
- // This is the partial store-load forwarding case where a store
- // has only part of the load's data and the load isn't LLSC
- (!req->mainRequest()->isLLSC() &&
- ((store_has_lower_limit && lower_load_has_store_part) ||
- (store_has_upper_limit && upper_load_has_store_part) ||
- (lower_load_has_store_part && upper_load_has_store_part))) ||
- // The load is LLSC, and the store has all or part of the
- // load's data
- (req->mainRequest()->isLLSC() &&
- ((store_has_lower_limit || upper_load_has_store_part) &&
- (store_has_upper_limit || lower_load_has_store_part))) ||
- // The store entry is atomic and has all or part of the load's
- // data
- (store_it->instruction()->isAtomic() &&
- ((store_has_lower_limit || upper_load_has_store_part) &&
- (store_has_upper_limit || lower_load_has_store_part)))) {
-
+ } else if (coverage == AddrRangeCoverage::PartialAddrRangeCoverage) {
// If it's already been written back, then don't worry about
// stalling on it.
if (store_it->completed()) {