summaryrefslogtreecommitdiff
path: root/src/mem/protocol/MOESI_CMP_directory-dma.sm
diff options
context:
space:
mode:
authorDerek Hower <drh5@cs.wisc.edu>2009-08-04 12:52:52 -0500
committerDerek Hower <drh5@cs.wisc.edu>2009-08-04 12:52:52 -0500
commit33b28fde7aca9bf1ae16b9db09e71ccd44d3ae76 (patch)
treefe2a4aee5517aed63f95e56ce4f085793826bdd4 /src/mem/protocol/MOESI_CMP_directory-dma.sm
parentc1e0bd1df4cf107bd543bcde9c9ab7be41d6dce3 (diff)
downloadgem5-33b28fde7aca9bf1ae16b9db09e71ccd44d3ae76.tar.xz
slicc: added MOESI_CMP_directory, DMA SequencerMsg, parameterized controllers
This changeset contains a lot of different changes that are too mingled to separate. They are: 1. Added MOESI_CMP_directory I made the changes necessary to bring back MOESI_CMP_directory, including adding a DMA controller. I got rid of MOESI_CMP_directory_m and made MOESI_CMP_directory use a memory controller. Added a new configuration for two level protocols in general, and MOESI_CMP_directory in particular. 2. DMA Sequencer uses a generic SequencerMsg I will eventually make the cache Sequencer use this type as well. It doesn't contain an offset field, just a physical address and a length. MI_example has been updated to deal with this. 3. Parameterized Controllers SLICC controllers can now take custom parameters to use for mapping, latencies, etc. Currently, only int parameters are supported.
Diffstat (limited to 'src/mem/protocol/MOESI_CMP_directory-dma.sm')
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dma.sm267
1 files changed, 267 insertions, 0 deletions
diff --git a/src/mem/protocol/MOESI_CMP_directory-dma.sm b/src/mem/protocol/MOESI_CMP_directory-dma.sm
new file mode 100644
index 000000000..74246c730
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_directory-dma.sm
@@ -0,0 +1,267 @@
+
+machine(DMA, "DMA Controller")
+: int request_latency,
+ int response_latency
+{
+
+ MessageBuffer goo1, network="From", virtual_network="0", ordered="false";
+ MessageBuffer goo2, network="From", virtual_network="1", ordered="false";
+ MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false";
+
+ MessageBuffer foo1, network="To", virtual_network="0", ordered="false";
+ MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false";
+ MessageBuffer respToDir, network="To", virtual_network="2", ordered="false";
+
+ enumeration(State, desc="DMA states", default="DMA_State_READY") {
+ READY, desc="Ready to accept a new request";
+ BUSY_RD, desc="Busy: currently processing a request";
+ BUSY_WR, desc="Busy: currently processing a request";
+ }
+
+ enumeration(Event, desc="DMA events") {
+ ReadRequest, desc="A new read request";
+ WriteRequest, desc="A new write request";
+ Data, desc="Data from a DMA memory read";
+ DMA_Ack, desc="DMA write to memory completed";
+ Inv_Ack, desc="Invalidation Ack from a sharer";
+ All_Acks, desc="All acks received";
+ }
+
+ structure(TBE, desc="...") {
+ Address address, desc="Physical address";
+ int NumAcks, default="0", desc="Number of Acks pending";
+ DataBlock DataBlk, desc="Data";
+ }
+
+ external_type(DMASequencer) {
+ void ackCallback();
+ void dataCallback(DataBlock);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ MessageBuffer mandatoryQueue, ordered="false";
+ MessageBuffer triggerQueue, ordered="true";
+ DMASequencer dma_sequencer, factory='RubySystem::getDMASequencer(m_cfg["dma_sequencer"])';
+ TBETable TBEs, template_hack="<DMA_TBE>";
+ State cur_state;
+
+ State getState(Address addr) {
+ return cur_state;
+ }
+ void setState(Address addr, State state) {
+ cur_state := state;
+ }
+
+ out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
+ out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
+ out_port(foo1_out, ResponseMsg, foo1, desc="...");
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
+
+ in_port(goo1_in, RequestMsg, goo1) {
+ if (goo1_in.isReady()) {
+ peek(goo1_in, RequestMsg) {
+ assert(false);
+ }
+ }
+ }
+
+ in_port(goo2_in, RequestMsg, goo2) {
+ if (goo2_in.isReady()) {
+ peek(goo2_in, RequestMsg) {
+ assert(false);
+ }
+ }
+ }
+
+ in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
+ if (dmaRequestQueue_in.isReady()) {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ if (in_msg.Type == SequencerRequestType:LD ) {
+ trigger(Event:ReadRequest, in_msg.PhysicalAddress);
+ } else if (in_msg.Type == SequencerRequestType:ST) {
+ trigger(Event:WriteRequest, in_msg.PhysicalAddress);
+ } else {
+ error("Invalid request type");
+ }
+ }
+ }
+ }
+
+ in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
+ if (dmaResponseQueue_in.isReady()) {
+ peek( dmaResponseQueue_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
+ trigger(Event:DMA_Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Inv_Ack, in_msg.Address);
+ } else {
+ error("Invalid response type");
+ }
+ }
+ }
+ }
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
+ if (triggerQueue_in.isReady()) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_Acks, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:DMA_READ;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:DMA_WRITE;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
+ dma_sequencer.ackCallback();
+ }
+
+ action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
+ if (TBEs[address].NumAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.Address := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+ action(u_updateAckCount, "u", desc="Update ack count") {
+ peek(dmaResponseQueue_in, ResponseMsg) {
+ TBEs[address].NumAcks := TBEs[address].NumAcks - in_msg.Acks;
+ }
+ }
+
+ action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
+ enqueue(respToDirectory_out, ResponseMsg, latency=response_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop request queue") {
+ dmaRequestQueue_in.dequeue();
+ }
+
+ action(p_popResponseQueue, "\p", desc="Pop request queue") {
+ dmaResponseQueue_in.dequeue();
+ }
+
+ action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
+ triggerQueue_in.dequeue();
+ }
+
+ action(t_updateTBEData, "t", desc="Update TBE Data") {
+ peek(dmaResponseQueue_in, ResponseMsg) {
+ TBEs[address].DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
+ dma_sequencer.dataCallback(TBEs[address].DataBlk);
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ TBEs.allocate(address);
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ }
+
+ action(z_stall, "z", desc="dma is busy..stall") {
+ // do nothing
+ }
+
+
+
+ transition(READY, ReadRequest, BUSY_RD) {
+ s_sendReadRequest;
+ v_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_RD, Inv_Ack) {
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_RD, Data) {
+ t_updateTBEData;
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_RD, All_Acks, READY) {
+ d_dataCallbackFromTBE;
+ u_sendExclusiveUnblockToDir;
+ w_deallocateTBE;
+ p_popTriggerQueue;
+ }
+
+ transition(READY, WriteRequest, BUSY_WR) {
+ s_sendWriteRequest;
+ v_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_WR, Inv_Ack) {
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_WR, DMA_Ack) {
+ u_updateAckCount; // actually increases
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_WR, All_Acks, READY) {
+ a_ackCallback;
+ u_sendExclusiveUnblockToDir;
+ w_deallocateTBE;
+ p_popTriggerQueue;
+ }
+}