summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorGabe Black <gblack@eecs.umich.edu>2011-07-31 19:21:17 -0700
committerGabe Black <gblack@eecs.umich.edu>2011-07-31 19:21:17 -0700
commit206c2e9a0ee04e00100dde25da9b15cbfbaac0d6 (patch)
tree518901a34efc50696a201d5640caca1948c2a5ae /src/cpu
parent6308ca27ff357fb9bbb1250d93a7058ef69c7602 (diff)
downloadgem5-206c2e9a0ee04e00100dde25da9b15cbfbaac0d6.tar.xz
O3: Implement memory mapped IPRs for O3.
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/o3/lsq_unit.hh38
-rw-r--r--src/cpu/o3/lsq_unit_impl.hh32
2 files changed, 64 insertions, 6 deletions
diff --git a/src/cpu/o3/lsq_unit.hh b/src/cpu/o3/lsq_unit.hh
index d83dc868f..2076d67d1 100644
--- a/src/cpu/o3/lsq_unit.hh
+++ b/src/cpu/o3/lsq_unit.hh
@@ -39,6 +39,7 @@
#include "arch/faults.hh"
#include "arch/locked_mem.hh"
+#include "arch/mmapped_ipr.hh"
#include "base/fast_alloc.hh"
#include "base/hashmap.hh"
#include "config/full_system.hh"
@@ -578,6 +579,43 @@ LSQUnit<Impl>::read(Request *req, Request *sreqLow, Request *sreqHigh,
load_inst->recordResult = true;
}
+ if (req->isMmappedIpr()) {
+ assert(!load_inst->memData);
+ load_inst->memData = new uint8_t[64];
+
+ ThreadContext *thread = cpu->tcBase(lsqID);
+ Tick delay;
+ PacketPtr data_pkt =
+ new Packet(req, MemCmd::ReadReq, Packet::Broadcast);
+
+ if (!TheISA::HasUnalignedMemAcc || !sreqLow) {
+ data_pkt->dataStatic(load_inst->memData);
+ delay = TheISA::handleIprRead(thread, data_pkt);
+ } else {
+ assert(sreqLow->isMmappedIpr() && sreqHigh->isMmappedIpr());
+ PacketPtr fst_data_pkt =
+ new Packet(sreqLow, MemCmd::ReadReq, Packet::Broadcast);
+ PacketPtr snd_data_pkt =
+ new Packet(sreqHigh, MemCmd::ReadReq, Packet::Broadcast);
+
+ fst_data_pkt->dataStatic(load_inst->memData);
+ snd_data_pkt->dataStatic(load_inst->memData + sreqLow->getSize());
+
+ delay = TheISA::handleIprRead(thread, fst_data_pkt);
+ unsigned delay2 = TheISA::handleIprRead(thread, snd_data_pkt);
+ if (delay2 > delay)
+ delay = delay2;
+
+ delete sreqLow;
+ delete sreqHigh;
+ delete fst_data_pkt;
+ delete snd_data_pkt;
+ }
+ WritebackEvent *wb = new WritebackEvent(load_inst, data_pkt, this);
+ cpu->schedule(wb, curTick() + delay);
+ return NoFault;
+ }
+
while (store_idx != -1) {
// End once we've reached the top of the LSQ
if (store_idx == storeWBIdx) {
diff --git a/src/cpu/o3/lsq_unit_impl.hh b/src/cpu/o3/lsq_unit_impl.hh
index 2c2b30b8a..79a20a673 100644
--- a/src/cpu/o3/lsq_unit_impl.hh
+++ b/src/cpu/o3/lsq_unit_impl.hh
@@ -716,6 +716,9 @@ LSQUnit<Impl>::writebackStores()
DynInstPtr inst = storeQueue[storeWBIdx].inst;
Request *req = storeQueue[storeWBIdx].req;
+ RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
+ RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
+
storeQueue[storeWBIdx].committed = true;
assert(!inst->memData);
@@ -741,9 +744,6 @@ LSQUnit<Impl>::writebackStores()
data_pkt->dataStatic(inst->memData);
data_pkt->senderState = state;
} else {
- RequestPtr sreqLow = storeQueue[storeWBIdx].sreqLow;
- RequestPtr sreqHigh = storeQueue[storeWBIdx].sreqHigh;
-
// Create two packets if the store is split in two.
data_pkt = new Packet(sreqLow, command, Packet::Broadcast);
snd_data_pkt = new Packet(sreqHigh, command, Packet::Broadcast);
@@ -794,20 +794,40 @@ LSQUnit<Impl>::writebackStores()
state->noWB = true;
}
- if (!sendStore(data_pkt)) {
+ bool split =
+ TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit;
+
+ ThreadContext *thread = cpu->tcBase(lsqID);
+
+ if (req->isMmappedIpr()) {
+ assert(!inst->isStoreConditional());
+ TheISA::handleIprWrite(thread, data_pkt);
+ delete data_pkt;
+ if (split) {
+ assert(snd_data_pkt->req->isMmappedIpr());
+ TheISA::handleIprWrite(thread, snd_data_pkt);
+ delete snd_data_pkt;
+ delete sreqLow;
+ delete sreqHigh;
+ }
+ delete state;
+ delete req;
+ completeStore(storeWBIdx);
+ incrStIdx(storeWBIdx);
+ } else if (!sendStore(data_pkt)) {
DPRINTF(IEW, "D-Cache became blocked when writing [sn:%lli], will"
"retry later\n",
inst->seqNum);
// Need to store the second packet, if split.
- if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
+ if (split) {
state->pktToSend = true;
state->pendingPacket = snd_data_pkt;
}
} else {
// If split, try to send the second packet too
- if (TheISA::HasUnalignedMemAcc && storeQueue[storeWBIdx].isSplit) {
+ if (split) {
assert(snd_data_pkt);
// Ensure there are enough ports to use.