summaryrefslogtreecommitdiff
path: root/src/mem/protocol/MOESI_hammer-dir.sm
diff options
context:
space:
mode:
authorBrad Beckmann <Brad.Beckmann@amd.com>2010-08-20 11:46:14 -0700
committerBrad Beckmann <Brad.Beckmann@amd.com>2010-08-20 11:46:14 -0700
commitaf6b97e3ee2d73fcb2d4bcdbdffc9a6534dfdac8 (patch)
tree62657e18174edde5d3cf8bb68908a17034cdb59d /src/mem/protocol/MOESI_hammer-dir.sm
parentf57053473ad369d5baf4a83d17913e5af393a8a8 (diff)
downloadgem5-af6b97e3ee2d73fcb2d4bcdbdffc9a6534dfdac8.tar.xz
ruby: Recycle latency fix for hammer
Patch allows each individual message buffer to have different recycle latencies and allows the overall recycle latency to be specified at the cmd line. The patch also adds profiling info to make sure no one processor's requests are recycled too much.
Diffstat (limited to 'src/mem/protocol/MOESI_hammer-dir.sm')
-rw-r--r--src/mem/protocol/MOESI_hammer-dir.sm40
1 files changed, 23 insertions, 17 deletions
diff --git a/src/mem/protocol/MOESI_hammer-dir.sm b/src/mem/protocol/MOESI_hammer-dir.sm
index d4b36cded..df3062c93 100644
--- a/src/mem/protocol/MOESI_hammer-dir.sm
+++ b/src/mem/protocol/MOESI_hammer-dir.sm
@@ -52,7 +52,7 @@ machine(Directory, "AMD Hammer-like protocol")
MessageBuffer unblockToDir, network="From", virtual_network="5", ordered="false";
MessageBuffer responseToDir, network="From", virtual_network="4", ordered="false";
- MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false";
+ MessageBuffer requestToDir, network="From", virtual_network="2", ordered="false", recycle_latency="1";
MessageBuffer dmaRequestToDir, network="From", virtual_network="0", ordered="true";
// STATES
@@ -309,6 +309,22 @@ machine(Directory, "AMD Hammer-like protocol")
}
}
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
+ } else {
+ DEBUG_EXPR(in_msg.Type);
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
in_port(requestQueue_in, RequestMsg, requestToDir) {
if (requestQueue_in.isReady()) {
peek(requestQueue_in, RequestMsg) {
@@ -333,22 +349,6 @@ machine(Directory, "AMD Hammer-like protocol")
}
}
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
- if (memQueue_in.isReady()) {
- peek(memQueue_in, MemoryMsg) {
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.Address);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
// Actions
action(r_setMRU, "\rr", desc="manually set the MRU bit for pf entry" ) {
@@ -766,6 +766,9 @@ machine(Directory, "AMD Hammer-like protocol")
}
action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Sender);
+ }
unblockNetwork_in.dequeue();
}
@@ -880,6 +883,9 @@ machine(Directory, "AMD Hammer-like protocol")
}
action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
+ peek(requestQueue_in, RequestMsg) {
+ APPEND_TRANSITION_COMMENT(in_msg.Requestor);
+ }
requestQueue_in.recycle();
}