summaryrefslogtreecommitdiff
path: root/src/mem/protocol/MOESI_CMP_directory-dir.sm
diff options
context:
space:
mode:
authorDerek Hower <drh5@cs.wisc.edu>2009-08-04 12:52:52 -0500
committerDerek Hower <drh5@cs.wisc.edu>2009-08-04 12:52:52 -0500
commit33b28fde7aca9bf1ae16b9db09e71ccd44d3ae76 (patch)
treefe2a4aee5517aed63f95e56ce4f085793826bdd4 /src/mem/protocol/MOESI_CMP_directory-dir.sm
parentc1e0bd1df4cf107bd543bcde9c9ab7be41d6dce3 (diff)
downloadgem5-33b28fde7aca9bf1ae16b9db09e71ccd44d3ae76.tar.xz
slicc: added MOESI_CMP_directory, DMA SequencerMsg, parameterized controllers
This changeset contains a lot of different changes that are too mingled to separate. They are: 1. Added MOESI_CMP_directory I made the changes necessary to bring back MOESI_CMP_directory, including adding a DMA controller. I got rid of MOESI_CMP_directory_m and made MOESI_CMP_directory use a memory controller. Added a new configuration for two level protocols in general, and MOESI_CMP_directory in particular. 2. DMA Sequencer uses a generic SequencerMsg I will eventually make the cache Sequencer use this type as well. It doesn't contain an offset field, just a physical address and a length. MI_example has been updated to deal with this. 3. Parameterized Controllers SLICC controllers can now take custom parameters to use for mapping, latencies, etc. Currently, only int parameters are supported.
Diffstat (limited to 'src/mem/protocol/MOESI_CMP_directory-dir.sm')
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dir.sm313
1 files changed, 283 insertions, 30 deletions
diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm
index a016836c2..edd67707e 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm
@@ -31,13 +31,15 @@
* $Id$
*/
-machine(Directory, "Directory protocol") {
+machine(Directory, "Directory protocol")
+: int directory_latency
+{
// ** IN QUEUES **
MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
-
+
MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
@@ -56,11 +58,16 @@ machine(Directory, "Directory protocol") {
OO, desc="Blocked, was in owned";
MO, desc="Blocked, going to owner or maybe modified";
MM, desc="Blocked, going to modified";
+ MM_DMA, desc="Blocked, going to I";
MI, desc="Blocked on a writeback";
MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
OS, desc="Blocked on a writeback";
OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
+
+ XI_M, desc="In a stable state, going to I, waiting for the memory controller";
+ XI_U, desc="In a stable state, going to I, waiting for an unblock";
+ OI_D, desc="In O, going to I, waiting for data";
}
// Events
@@ -75,6 +82,11 @@ machine(Directory, "Directory protocol") {
Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+ DMA_READ, desc="DMA Read";
+ DMA_WRITE, desc="DMA Write";
+ Data, desc="Data to directory";
}
// TYPES
@@ -88,15 +100,36 @@ machine(Directory, "Directory protocol") {
int WaitingUnblocks, desc="Number of acks we're waiting for";
}
+ structure(TBE, desc="...") {
+ Address address, desc="Address for this entry";
+ int Len, desc="Length of request";
+ DataBlock DataBlk, desc="DataBlk";
+ MachineID Requestor, desc="original requestor";
+ }
+
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ // to simulate detailed DRAM
+ external_type(MemoryControl, inport="yes", outport="yes") {
+
+ }
+
// ** OBJECTS **
- DirectoryMemory directory, constructor_hack="i";
+ DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
+ MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
+ TBETable TBEs, template_hack="<Directory_TBE>";
State getState(Address addr) {
return directory[addr].DirectoryState;
@@ -164,6 +197,7 @@ machine(Directory, "Directory protocol") {
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
out_port(goo1_out, ResponseMsg, goo1);
+ out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
@@ -188,6 +222,8 @@ machine(Directory, "Directory protocol") {
trigger(Event:Dirty_Writeback, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
trigger(Event:Clean_Writeback, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data, in_msg.Address);
} else {
error("Invalid message");
}
@@ -208,7 +244,27 @@ machine(Directory, "Directory protocol") {
trigger(Event:PUTO, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
trigger(Event:PUTO_SHARERS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
+ trigger(Event:DMA_READ, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
+ trigger(Event:DMA_WRITE, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
} else {
+ DEBUG_EXPR(in_msg.Type);
error("Invalid message");
}
}
@@ -219,7 +275,7 @@ machine(Directory, "Directory protocol") {
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
@@ -231,7 +287,7 @@ machine(Directory, "Directory protocol") {
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
@@ -254,26 +310,21 @@ machine(Directory, "Directory protocol") {
directory[address].Sharers.clear();
}
- action(d_sendData, "d", desc="Send data to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
- // enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ action(d_sendDataMsg, "d", desc="Send data to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
-
- if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- } else {
- out_msg.Type := CoherenceResponseType:DATA;
- }
-
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ //out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := false; // By definition, the block is now clean
- out_msg.Acks := directory[address].Sharers.count();
- if (directory[address].Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
+ out_msg.Acks := in_msg.Acks;
+ if (in_msg.ReadX) {
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ } else {
+ out_msg.Type := CoherenceResponseType:DATA;
}
out_msg.MessageSize := MessageSizeType:Response_Data;
}
@@ -289,7 +340,7 @@ machine(Directory, "Directory protocol") {
action(f_forwardRequest, "f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
@@ -303,11 +354,27 @@ machine(Directory, "Directory protocol") {
}
}
+ action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
peek(requestQueue_in, RequestMsg) {
if ((directory[in_msg.Address].Sharers.count() > 1) ||
((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := in_msg.Requestor;
@@ -338,7 +405,7 @@ machine(Directory, "Directory protocol") {
}
}
- action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
+ action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
@@ -366,6 +433,70 @@ machine(Directory, "Directory protocol") {
assert(directory[address].WaitingUnblocks >= 0);
}
+ action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue();
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // These are not used by memory but are passed back here with the read data:
+ out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0);
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ if (TBEs.isPresent(address)) {
+ out_msg.OriginalRequestorMachId := TBEs[address].Requestor;
+ }
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // Not used:
+ out_msg.ReadX := false;
+ out_msg.Acks := directory[address].Sharers.count(); // for dma requests
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // Not used:
+ out_msg.ReadX := false;
+ out_msg.Acks := directory[address].Sharers.count(); // for dma requests
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+
// action(z_stall, "z", desc="Cannot be handled right now.") {
// Special name recognized as do nothing case
// }
@@ -374,26 +505,106 @@ machine(Directory, "Directory protocol") {
requestQueue_in.recycle();
}
+ action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.Type := CoherenceResponseType:DMA_ACK;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
+ peek(requestQueue_in, RequestMsg) {
+ directory[address].DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
+ }
+ }
+
+ action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
+ directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(address), TBEs[address].Len);
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ peek (requestQueue_in, RequestMsg) {
+ TBEs.allocate(address);
+ TBEs[address].Len := in_msg.Len;
+ TBEs[address].DataBlk := in_msg.DataBlk;
+ TBEs[address].Requestor := in_msg.Requestor;
+ }
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ }
+
+
+
// TRANSITIONS
transition(I, GETX, MM) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, DMA_READ, XI_M) {
+ qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
+ transition(I, DMA_WRITE, XI_M) {
+ qw_queueMemoryWBRequest2;
+ l_writeDMADataToMemory;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(XI_M, Memory_Data, XI_U) {
+ d_sendDataMsg; // ack count may be zero
+ q_popMemQueue;
+ }
+
+ transition(XI_M, Memory_Ack, XI_U) {
+ a_sendDMAAck; // ack count may be zero
+ q_popMemQueue;
+ }
+
+ transition(XI_U, Exclusive_Unblock, I) {
+ cc_clearSharers;
+ c_clearOwner;
+ j_popIncomingUnblockQueue;
+ }
+
transition(S, GETX, MM) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
+ transition(S, DMA_READ, XI_M) {
+ qf_queueMemoryFetchRequest;
+ g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, DMA_WRITE, XI_M) {
+ qw_queueMemoryWBRequest2;
+ l_writeDMADataToMemory;
+ g_sendInvalidations; // the DMA will collect invalidations
+ i_popIncomingRequestQueue;
+ }
+
transition(I, GETS, IS) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
transition({S, SS}, GETS, SS) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
@@ -414,6 +625,27 @@ machine(Directory, "Directory protocol") {
i_popIncomingRequestQueue;
}
+ transition(O, DMA_READ, XI_U) {
+ f_forwardRequest; // this will cause the data to go to DMA directly
+ g_sendInvalidations; // this will cause acks to be sent to the DMA
+ i_popIncomingRequestQueue;
+ }
+
+ transition({O,M}, DMA_WRITE, OI_D) {
+ f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
+ g_sendInvalidations; // these go to the DMA Controller
+ v_allocateTBE;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(OI_D, Data, XI_M) {
+ qw_queueMemoryWBRequest;
+ l_writeDataToMemory;
+ l_writeDMADataToMemoryFromTBE;
+ w_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
transition({O, OO}, GETS, OO) {
f_forwardRequest;
n_incrementOutstanding;
@@ -425,6 +657,12 @@ machine(Directory, "Directory protocol") {
i_popIncomingRequestQueue;
}
+ // no exclusive unblock will show up to the directory
+ transition(M, DMA_READ, XI_U) {
+ f_forwardRequest; // this will cause the data to go to DMA directly
+ i_popIncomingRequestQueue;
+ }
+
transition(M, GETS, MO) {
f_forwardRequest;
i_popIncomingRequestQueue;
@@ -457,7 +695,7 @@ machine(Directory, "Directory protocol") {
}
- transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ}) {
zz_recycleRequest;
}
@@ -472,7 +710,7 @@ machine(Directory, "Directory protocol") {
j_popIncomingUnblockQueue;
}
- transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ}) {
zz_recycleRequest;
}
@@ -519,12 +757,14 @@ machine(Directory, "Directory protocol") {
c_clearOwner;
cc_clearSharers;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(MIS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
@@ -536,12 +776,14 @@ machine(Directory, "Directory protocol") {
transition(OS, Dirty_Writeback, S) {
c_clearOwner;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(OSS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
@@ -570,4 +812,15 @@ machine(Directory, "Directory protocol") {
transition({OS, OSS}, Unblock, O) {
j_popIncomingUnblockQueue;
}
+
+ transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
+ d_sendDataMsg;
+ q_popMemQueue;
+ }
+
+ transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Ack) {
+ //a_sendAck;
+ q_popMemQueue;
+ }
+
}