summaryrefslogtreecommitdiff
path: root/src/mem/protocol
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem/protocol')
-rw-r--r--src/mem/protocol/MI_example-cache.sm15
-rw-r--r--src/mem/protocol/MI_example-dir.sm44
-rw-r--r--src/mem/protocol/MI_example-dma.sm24
-rw-r--r--src/mem/protocol/MI_example-msg.sm1
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm137
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L2cache.sm86
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dir.sm313
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dma.sm267
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-msg.sm35
-rw-r--r--src/mem/protocol/MOESI_CMP_directory.slicc1
-rw-r--r--src/mem/protocol/MOESI_CMP_directory_m-dir.sm652
-rw-r--r--src/mem/protocol/MOESI_CMP_directory_m.slicc5
-rw-r--r--src/mem/protocol/RubySlicc_ComponentMapping.sm7
-rw-r--r--src/mem/protocol/RubySlicc_Exports.sm33
-rw-r--r--src/mem/protocol/RubySlicc_Profiler.sm2
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm5
-rw-r--r--src/mem/protocol/RubySlicc_Util.sm2
17 files changed, 754 insertions, 875 deletions
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
index 16a158f0d..915a0eb99 100644
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -1,5 +1,8 @@
-machine(L1Cache, "MI Example L1 Cache"): LATENCY_CACHE_RESPONSE_LATENCY LATENCY_ISSUE_LATENCY {
+machine(L1Cache, "MI Example L1 Cache")
+: int cache_response_latency,
+ int issue_latency
+{
// NETWORK BUFFERS
MessageBuffer requestFromCache, network="To", virtual_network="0", ordered="true";
@@ -188,8 +191,8 @@ machine(L1Cache, "MI Example L1 Cache"): LATENCY_CACHE_RESPONSE_LATENCY LATENCY
// ACTIONS
action(a_issueRequest, "a", desc="Issue a request") {
- enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
- out_msg.Address := address;
+ enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
+ out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
out_msg.Destination.add(map_Address_to_Directory(address));
@@ -198,7 +201,7 @@ machine(L1Cache, "MI Example L1 Cache"): LATENCY_CACHE_RESPONSE_LATENCY LATENCY
}
action(b_issuePUT, "b", desc="Issue a PUT request") {
- enqueue(requestNetwork_out, RequestMsg, latency="ISSUE_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency=issue_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.Requestor := machineID;
@@ -211,7 +214,7 @@ machine(L1Cache, "MI Example L1 Cache"): LATENCY_CACHE_RESPONSE_LATENCY LATENCY
action(e_sendData, "e", desc="Send data from cache to requestor") {
peek(forwardRequestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
@@ -224,7 +227,7 @@ machine(L1Cache, "MI Example L1 Cache"): LATENCY_CACHE_RESPONSE_LATENCY LATENCY
action(ee_sendDataFromTBE, "\e", desc="Send data from TBE to requestor") {
peek(forwardRequestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="CACHE_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=cache_response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm
index fa8903d47..c764634ec 100644
--- a/src/mem/protocol/MI_example-dir.sm
+++ b/src/mem/protocol/MI_example-dir.sm
@@ -1,5 +1,7 @@
-machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_DIRECTORY_LATENCY LATENCY_MEMORY_LATENCY {
+machine(Directory, "Directory protocol")
+: int directory_latency
+{
MessageBuffer forwardFromDir, network="To", virtual_network="2", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="1", ordered="false";
@@ -65,9 +67,9 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
// TBE entries for DMA requests
structure(TBE, desc="TBE entries for outstanding DMA requests") {
+ Address PhysicalAddress, desc="physical address";
State TBEState, desc="Transient State";
DataBlock DataBlk, desc="Data to be written (DMA write only)";
- int Offset, desc="...";
int Len, desc="...";
}
@@ -180,7 +182,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
@@ -192,7 +194,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(l_sendWriteBackAck, "la", desc="Send writeback ack to requestor") {
peek(memQueue_in, MemoryMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.OriginalRequestorMachId;
@@ -204,7 +206,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
@@ -240,7 +242,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(d_sendData, "d", desc="Send data to requestor") {
peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
@@ -265,7 +267,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(dr_sendDMAData, "dr", desc="Send Data to DMA controller from directory") {
peek(memQueue_in, MemoryMsg) {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
out_msg.PhysicalAddress := address;
out_msg.LineAddress := address;
out_msg.Type := DMAResponseType:DATA;
@@ -280,7 +282,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(drp_sendDMAData, "drp", desc="Send Data to DMA controller from incoming PUTX") {
peek(requestQueue_in, RequestMsg) {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
out_msg.PhysicalAddress := address;
out_msg.LineAddress := address;
out_msg.Type := DMAResponseType:DATA;
@@ -292,7 +294,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
}
action(da_sendDMAAck, "da", desc="Send Ack to DMA controller") {
- enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="MEMORY_LATENCY") {
+ enqueue(dmaResponseNetwork_out, DMAResponseMsg, latency="1") {
out_msg.PhysicalAddress := address;
out_msg.LineAddress := address;
out_msg.Type := DMAResponseType:ACK;
@@ -318,7 +320,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
APPEND_TRANSITION_COMMENT(directory[in_msg.Address].Owner);
APPEND_TRANSITION_COMMENT("Req: ");
APPEND_TRANSITION_COMMENT(in_msg.Requestor);
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
@@ -330,7 +332,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(inv_sendCacheInvalidate, "inv", desc="Invalidate a cache block") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
@@ -359,14 +361,14 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
}
action(dwt_writeDMADataFromTBE, "dwt", desc="DMA Write data to memory from TBE") {
- directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
+ directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
}
action(v_allocateTBE, "v", desc="Allocate TBE") {
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
TBEs[address].DataBlk := in_msg.DataBlk;
- TBEs[address].Offset := in_msg.Offset;
+ TBEs[address].PhysicalAddress := in_msg.PhysicalAddress;
TBEs[address].Len := in_msg.Len;
}
}
@@ -389,7 +391,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
@@ -403,7 +405,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_READ;
out_msg.Sender := machineID;
@@ -416,7 +418,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
}
// action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
// peek(dmaRequestQueue_in, DMARequestMsg) {
-// enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+// enqueue(memQueue_out, MemoryMsg, latency="1") {
// out_msg.Address := address;
// out_msg.Type := MemoryRequestType:MEMORY_WB;
// out_msg.OriginalRequestorMachId := machineID;
@@ -431,12 +433,12 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
//out_msg.OriginalRequestorMachId := machineID;
//out_msg.DataBlk := in_msg.DataBlk;
- out_msg.DataBlk.copyPartial(in_msg.DataBlk, in_msg.Offset, in_msg.Len);
+ out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
@@ -447,12 +449,12 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
//out_msg.DataBlk := in_msg.DataBlk;
- out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, TBEs[address].Offset, TBEs[address].Len);
+ out_msg.DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(TBEs[address].PhysicalAddress), TBEs[address].Len);
out_msg.MessageSize := in_msg.MessageSize;
//out_msg.Prefetch := in_msg.Prefetch;
@@ -465,7 +467,7 @@ machine(Directory, "Directory protocol") : LATENCY_TO_MEM_CTRL_LATENCY LATENCY_D
action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="TO_MEM_CTRL_LATENCY") {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
out_msg.Address := address;
out_msg.Type := MemoryRequestType:MEMORY_WB;
out_msg.OriginalRequestorMachId := in_msg.Requestor;
diff --git a/src/mem/protocol/MI_example-dma.sm b/src/mem/protocol/MI_example-dma.sm
index d5de18552..e883288df 100644
--- a/src/mem/protocol/MI_example-dma.sm
+++ b/src/mem/protocol/MI_example-dma.sm
@@ -1,5 +1,7 @@
-machine(DMA, "DMA Controller") {
+machine(DMA, "DMA Controller")
+: int request_latency
+{
MessageBuffer responseFromDir, network="From", virtual_network="4", ordered="true", no_vector="true";
MessageBuffer reqToDirectory, network="To", virtual_network="5", ordered="false", no_vector="true";
@@ -35,12 +37,12 @@ machine(DMA, "DMA Controller") {
out_port(reqToDirectory_out, DMARequestMsg, reqToDirectory, desc="...");
- in_port(dmaRequestQueue_in, DMARequestMsg, mandatoryQueue, desc="...") {
+ in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
if (dmaRequestQueue_in.isReady()) {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- if (in_msg.Type == DMARequestType:READ ) {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ if (in_msg.Type == SequencerRequestType:LD ) {
trigger(Event:ReadRequest, in_msg.LineAddress);
- } else if (in_msg.Type == DMARequestType:WRITE) {
+ } else if (in_msg.Type == SequencerRequestType:ST) {
trigger(Event:WriteRequest, in_msg.LineAddress);
} else {
error("Invalid request type");
@@ -64,9 +66,9 @@ machine(DMA, "DMA Controller") {
}
action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg) {
- out_msg.PhysicalAddress := address;
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:READ;
out_msg.DataBlk := in_msg.DataBlk;
@@ -78,9 +80,9 @@ machine(DMA, "DMA Controller") {
}
action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(reqToDirectory_out, DMARequestMsg) {
- out_msg.PhysicalAddress := address;
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, DMARequestMsg, latency=request_latency) {
+ out_msg.PhysicalAddress := in_msg.PhysicalAddress;
out_msg.LineAddress := in_msg.LineAddress;
out_msg.Type := DMARequestType:WRITE;
out_msg.DataBlk := in_msg.DataBlk;
diff --git a/src/mem/protocol/MI_example-msg.sm b/src/mem/protocol/MI_example-msg.sm
index 8c0afed2e..d4d557200 100644
--- a/src/mem/protocol/MI_example-msg.sm
+++ b/src/mem/protocol/MI_example-msg.sm
@@ -107,7 +107,6 @@ structure(DMARequestMsg, desc="...", interface="NetworkMessage") {
Address LineAddress, desc="Line address for this request";
NetDest Destination, desc="Destination";
DataBlock DataBlk, desc="DataBlk attached to this request";
- int Offset, desc="The offset into the datablock";
int Len, desc="The length of the request";
MessageSizeType MessageSize, desc="size category of the message";
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
index a65ade10f..8fe0d3baf 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -32,7 +32,11 @@
*
*/
-machine(L1Cache, "Directory protocol") {
+machine(L1Cache, "Directory protocol")
+ : int request_latency,
+ int l2_select_low_bit,
+ int l2_select_high_bit
+{
// NODE L1 CACHE
// From this node's L1 cache TO the network
@@ -125,7 +129,7 @@ machine(L1Cache, "Directory protocol") {
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
- void allocate(Address);
+ void allocate(Address, Entry);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
@@ -141,11 +145,11 @@ machine(L1Cache, "Directory protocol") {
MessageBuffer mandatoryQueue, ordered="false", abstract_chip_ptr="true";
- Sequencer sequencer, abstract_chip_ptr="true", constructor_hack="i";
+ Sequencer sequencer, factory='RubySystem::getSequencer(m_cfg["sequencer"])';
TBETable TBEs, template_hack="<L1Cache_TBE>";
- CacheMemory L1IcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1I"', abstract_chip_ptr="true";
- CacheMemory L1DcacheMemory, template_hack="<L1Cache_Entry>", constructor_hack='L1_CACHE_NUM_SETS_BITS,L1_CACHE_ASSOC,MachineType_L1Cache,int_to_string(i)+"_L1D"', abstract_chip_ptr="true";
+ CacheMemory L1IcacheMemory, factory='RubySystem::getCache(m_cfg["icache"])';
+ CacheMemory L1DcacheMemory, factory='RubySystem::getCache(m_cfg["dcache"])';
TimerTable useTimerTable;
Entry getCacheEntry(Address addr), return_by_ref="yes" {
@@ -305,7 +309,7 @@ machine(L1Cache, "Directory protocol") {
assert(in_msg.Destination.isElement(machineID));
DEBUG_EXPR("MRM_DEBUG: L1 received");
DEBUG_EXPR(in_msg.Type);
- if (in_msg.Type == CoherenceRequestType:GETX) {
+if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_READ || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID && in_msg.RequestorMachine == MachineType:L1Cache) {
trigger(Event:Own_GETX, in_msg.Address);
} else {
@@ -357,40 +361,40 @@ machine(L1Cache, "Directory protocol") {
// ** INSTRUCTION ACCESS ***
// Check to see if it is in the OTHER L1
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.Address);
+ trigger(Event:L1_Replacement, in_msg.LineAddress);
}
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The tag matches for the L1, so the L1 asks the L2 for it.
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
- if (L1IcacheMemory.cacheAvail(in_msg.Address)) {
+ if (L1IcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 so let's see if the L2 has it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
// No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L1_Replacement, L1IcacheMemory.cacheProbe(in_msg.LineAddress));
}
}
} else {
// *** DATA ACCESS ***
// Check to see if it is in the OTHER L1
- if (L1IcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1IcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The block is in the wrong L1, put the request on the queue to the shared L2
- trigger(Event:L1_Replacement, in_msg.Address);
+ trigger(Event:L1_Replacement, in_msg.LineAddress);
}
- if (L1DcacheMemory.isTagPresent(in_msg.Address)) {
+ if (L1DcacheMemory.isTagPresent(in_msg.LineAddress)) {
// The tag matches for the L1, so the L1 ask the L2 for it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
- if (L1DcacheMemory.cacheAvail(in_msg.Address)) {
+ if (L1DcacheMemory.cacheAvail(in_msg.LineAddress)) {
// L1 does't have the line, but we have space for it in the L1 let's see if the L2 has it
- trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.Address);
+ trigger(mandatory_request_type_to_event(in_msg.Type), in_msg.LineAddress);
} else {
// No room in the L1, so we need to make room in the L1
- trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.Address));
+ trigger(Event:L1_Replacement, L1DcacheMemory.cacheProbe(in_msg.LineAddress));
}
}
}
@@ -403,11 +407,12 @@ machine(L1Cache, "Directory protocol") {
action(a_issueGETS, "a", desc="Issue GETS") {
peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency= request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Request_Control;
out_msg.AccessMode := in_msg.AccessMode;
out_msg.Prefetch := in_msg.Prefetch;
@@ -417,11 +422,12 @@ machine(L1Cache, "Directory protocol") {
action(b_issueGETX, "b", desc="Issue GETX") {
peek(mandatoryQueue_in, CacheMsg) {
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Request_Control;
out_msg.AccessMode := in_msg.AccessMode;
out_msg.Prefetch := in_msg.Prefetch;
@@ -430,34 +436,37 @@ machine(L1Cache, "Directory protocol") {
}
action(d_issuePUTX, "d", desc="Issue PUTX") {
- // enqueue(writebackNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
+ enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(dd_issuePUTO, "\d", desc="Issue PUTO") {
- // enqueue(writebackNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
+ enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTO;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
action(dd_issuePUTS, "\ds", desc="Issue PUTS") {
- // enqueue(writebackNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
- enqueue(requestNetwork_out, RequestMsg, latency="L1_REQUEST_LATENCY") {
+ // enqueue(writebackNetwork_out, RequestMsg, latency=request_latency) {
+ enqueue(requestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTS;
out_msg.Requestor := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Writeback_Control;
}
}
@@ -465,11 +474,12 @@ machine(L1Cache, "Directory protocol") {
action(e_sendData, "e", desc="Send data from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L2Cache) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.DataBlk := getCacheEntry(address).DataBlk;
// out_msg.Dirty := getCacheEntry(address).Dirty;
out_msg.Dirty := false;
@@ -480,7 +490,7 @@ machine(L1Cache, "Directory protocol") {
DEBUG_EXPR(in_msg.Address);
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
@@ -497,11 +507,12 @@ machine(L1Cache, "Directory protocol") {
}
action(e_sendDataToL2, "ee", desc="Send data from cache to requestor") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.Dirty := getCacheEntry(address).Dirty;
out_msg.Acks := 0; // irrelevant
@@ -513,12 +524,13 @@ machine(L1Cache, "Directory protocol") {
action(ee_sendDataExclusive, "\e", desc="Send data from cache to requestor, don't keep a shared copy") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L2Cache) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.DataBlk := getCacheEntry(address).DataBlk;
out_msg.Dirty := getCacheEntry(address).Dirty;
out_msg.Acks := in_msg.Acks;
@@ -527,7 +539,7 @@ machine(L1Cache, "Directory protocol") {
DEBUG_EXPR("Sending exclusive data to L2");
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
@@ -546,7 +558,7 @@ machine(L1Cache, "Directory protocol") {
action(f_sendAck, "f", desc="Send ack from cache to requestor") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L1Cache) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
@@ -557,12 +569,13 @@ machine(L1Cache, "Directory protocol") {
}
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(in_msg.Address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.Acks := 0 - 1; // -1
out_msg.MessageSize := MessageSizeType:Response_Control;
}
@@ -571,21 +584,23 @@ machine(L1Cache, "Directory protocol") {
}
action(g_sendUnblock, "g", desc="Send unblock to memory") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Unblock_Control;
}
}
action(gg_sendUnblockExclusive, "\g", desc="Send unblock exclusive to memory") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.MessageSize := MessageSizeType:Unblock_Control;
}
}
@@ -627,7 +642,6 @@ machine(L1Cache, "Directory protocol") {
action(m_decrementNumberOfMessages, "m", desc="Decrement the number of messages for which we're waiting") {
peek(responseToL1Cache_in, ResponseMsg) {
DEBUG_EXPR("MRM_DEBUG: L1 decrementNumberOfMessages");
- DEBUG_EXPR(id);
DEBUG_EXPR(in_msg.Acks);
TBEs[address].NumPendingMsgs := TBEs[address].NumPendingMsgs - in_msg.Acks;
}
@@ -660,7 +674,7 @@ machine(L1Cache, "Directory protocol") {
action(q_sendDataFromTBEToCache, "q", desc="Send data from TBE to cache") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L1Cache) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
@@ -673,11 +687,12 @@ machine(L1Cache, "Directory protocol") {
}
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.DataBlk := TBEs[address].DataBlk;
// out_msg.Dirty := TBEs[address].Dirty;
out_msg.Dirty := false;
@@ -691,7 +706,7 @@ machine(L1Cache, "Directory protocol") {
action(q_sendExclusiveDataFromTBEToCache, "qq", desc="Send data from TBE to cache") {
peek(requestNetwork_in, RequestMsg) {
if (in_msg.RequestorMachine == MachineType:L1Cache) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
@@ -703,11 +718,12 @@ machine(L1Cache, "Directory protocol") {
}
}
else {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address,machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.DataBlk := TBEs[address].DataBlk;
out_msg.Dirty := TBEs[address].Dirty;
out_msg.Acks := in_msg.Acks;
@@ -720,11 +736,12 @@ machine(L1Cache, "Directory protocol") {
// L2 will usually request data for a writeback
action(qq_sendWBDataFromTBEToL2, "\q", desc="Send data from TBE to L2") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L1_REQUEST_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L1Cache;
- out_msg.Destination.add(map_L1CacheMachId_to_L2Cache(address, machineID));
+ out_msg.Destination.add(mapAddressToRange(address, MachineType:L2Cache,
+ l2_select_low_bit, l2_select_high_bit));
out_msg.Dirty := TBEs[address].Dirty;
if (TBEs[address].Dirty) {
out_msg.Type := CoherenceResponseType:WRITEBACK_DIRTY_DATA;
@@ -770,13 +787,13 @@ machine(L1Cache, "Directory protocol") {
action(ii_allocateL1DCacheBlock, "\i", desc="Set L1 D-cache tag equal to tag of block B.") {
if (L1DcacheMemory.isTagPresent(address) == false) {
- L1DcacheMemory.allocate(address);
+ L1DcacheMemory.allocate(address, new Entry);
}
}
action(jj_allocateL1ICacheBlock, "\j", desc="Set L1 I-cache tag equal to tag of block B.") {
if (L1IcacheMemory.isTagPresent(address) == false) {
- L1IcacheMemory.allocate(address);
+ L1IcacheMemory.allocate(address, new Entry);
}
}
@@ -784,7 +801,7 @@ machine(L1Cache, "Directory protocol") {
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
peek(mandatoryQueue_in, CacheMsg) {
- profile_miss(in_msg, id);
+ // profile_miss(in_msg);
}
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
index fa01f925c..68d3a2cd3 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
@@ -32,7 +32,10 @@
*
*/
-machine(L2Cache, "Token protocol") {
+machine(L2Cache, "Token protocol")
+: int response_latency,
+ int request_latency
+{
// L2 BANK QUEUES
// From local bank of L2 cache TO the network
@@ -208,7 +211,7 @@ machine(L2Cache, "Token protocol") {
external_type(CacheMemory) {
bool cacheAvail(Address);
Address cacheProbe(Address);
- void allocate(Address);
+ void allocate(Address, Entry);
void deallocate(Address);
Entry lookup(Address);
void changePermission(Address, AccessPermission);
@@ -225,13 +228,15 @@ machine(L2Cache, "Token protocol") {
TBETable L2_TBEs, template_hack="<L2Cache_TBE>";
- CacheMemory L2cacheMemory, template_hack="<L2Cache_Entry>", constructor_hack='L2_CACHE_NUM_SETS_BITS,L2_CACHE_ASSOC,MachineType_L2Cache,int_to_string(i)+"_L2"';
+ CacheMemory L2cacheMemory, factory='RubySystem::getCache(m_cfg["cache"])';
PerfectCacheMemory localDirectory, template_hack="<L2Cache_DirEntry>";
Entry getL2CacheEntry(Address addr), return_by_ref="yes" {
if (L2cacheMemory.isTagPresent(addr)) {
return L2cacheMemory[addr];
+ } else {
+ return L2cacheMemory[addr];
}
}
@@ -579,7 +584,7 @@ machine(L2Cache, "Token protocol") {
in_port(requestNetwork_in, RequestMsg, GlobalRequestToL2Cache) {
if (requestNetwork_in.isReady()) {
peek(requestNetwork_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:GETX) {
+ if (in_msg.Type == CoherenceRequestType:GETX || in_msg.Type == CoherenceRequestType:DMA_READ || in_msg.Type == CoherenceRequestType:DMA_WRITE) {
if (in_msg.Requestor == machineID) {
trigger(Event:Own_GETX, in_msg.Address);
} else {
@@ -675,7 +680,7 @@ machine(L2Cache, "Token protocol") {
action(a_issueGETS, "a", desc="issue local request globally") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.RequestorMachine := MachineType:L2Cache;
@@ -688,7 +693,7 @@ machine(L2Cache, "Token protocol") {
action(a_issueGETX, "\a", desc="issue local request globally") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.RequestorMachine := MachineType:L2Cache;
@@ -700,7 +705,7 @@ machine(L2Cache, "Token protocol") {
}
action(b_issuePUTX, "b", desc="Issue PUTX") {
- enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTX;
out_msg.RequestorMachine := MachineType:L2Cache;
@@ -711,7 +716,7 @@ machine(L2Cache, "Token protocol") {
}
action(b_issuePUTO, "\b", desc="Issue PUTO") {
- enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTO;
out_msg.Requestor := machineID;
@@ -723,7 +728,7 @@ machine(L2Cache, "Token protocol") {
/* PUTO, but local sharers exist */
action(b_issuePUTO_ls, "\bb", desc="Issue PUTO") {
- enqueue(globalRequestNetwork_out, RequestMsg, latency="L2_REQUEST_LATENCY") {
+ enqueue(globalRequestNetwork_out, RequestMsg, latency=request_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:PUTO_SHARERS;
out_msg.Requestor := machineID;
@@ -734,7 +739,7 @@ machine(L2Cache, "Token protocol") {
}
action(c_sendDataFromTBEToL1GETS, "c", desc="Send data from TBE to L1 requestors in TBE") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
@@ -750,7 +755,7 @@ machine(L2Cache, "Token protocol") {
}
action(c_sendDataFromTBEToL1GETX, "\c", desc="Send data from TBE to L1 requestors in TBE") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
@@ -766,7 +771,7 @@ machine(L2Cache, "Token protocol") {
}
action(c_sendExclusiveDataFromTBEToL1GETS, "\cc", desc="Send data from TBE to L1 requestors in TBE") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
@@ -779,7 +784,7 @@ machine(L2Cache, "Token protocol") {
}
action(c_sendDataFromTBEToFwdGETX, "cc", desc="Send data from TBE to external GETX") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
@@ -793,7 +798,7 @@ machine(L2Cache, "Token protocol") {
}
action(c_sendDataFromTBEToFwdGETS, "ccc", desc="Send data from TBE to external GETX") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
@@ -810,7 +815,7 @@ machine(L2Cache, "Token protocol") {
}
action(c_sendExclusiveDataFromTBEToFwdGETS, "\ccc", desc="Send data from TBE to external GETX") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
@@ -827,7 +832,7 @@ machine(L2Cache, "Token protocol") {
action(d_sendDataToL1GETS, "d", desc="Send data directly to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
@@ -845,7 +850,7 @@ machine(L2Cache, "Token protocol") {
action(d_sendDataToL1GETX, "\d", desc="Send data and a token from TBE to L1 requestor") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
@@ -863,7 +868,7 @@ machine(L2Cache, "Token protocol") {
action(dd_sendDataToFwdGETX, "dd", desc="send data") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
@@ -882,7 +887,7 @@ machine(L2Cache, "Token protocol") {
action(dd_sendDataToFwdGETS, "\dd", desc="send data") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA;
out_msg.Sender := machineID;
@@ -900,7 +905,7 @@ machine(L2Cache, "Token protocol") {
action(dd_sendExclusiveDataToFwdGETS, "\d\d", desc="send data") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
out_msg.Sender := machineID;
@@ -913,7 +918,7 @@ machine(L2Cache, "Token protocol") {
}
action(e_sendAck, "e", desc="Send ack with the tokens we've collected thus far.") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
@@ -927,7 +932,7 @@ machine(L2Cache, "Token protocol") {
action(e_sendAckToL1Requestor, "\e", desc="Send ack with the tokens we've collected thus far.") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
@@ -940,7 +945,7 @@ machine(L2Cache, "Token protocol") {
}
action(e_sendAckToL1RequestorFromTBE, "eee", desc="Send ack with the tokens we've collected thus far.") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:ACK;
out_msg.Sender := machineID;
@@ -955,14 +960,13 @@ machine(L2Cache, "Token protocol") {
L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
DEBUG_EXPR(address);
DEBUG_EXPR(getLocalSharers(address));
- DEBUG_EXPR(id);
DEBUG_EXPR(L2_TBEs[address].NumIntPendingAcks);
if (isLocalOwnerValid(address)) {
L2_TBEs[address].NumIntPendingAcks := L2_TBEs[address].NumIntPendingAcks + 1;
DEBUG_EXPR(getLocalOwner(address));
}
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
@@ -982,7 +986,7 @@ machine(L2Cache, "Token protocol") {
L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
if (countLocalSharers(address) > 0) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
@@ -1013,7 +1017,7 @@ machine(L2Cache, "Token protocol") {
L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
}
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := in_msg.Requestor;
@@ -1038,7 +1042,7 @@ machine(L2Cache, "Token protocol") {
L2_TBEs[address].NumIntPendingAcks := countLocalSharers(address);
}
}
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := L2_TBEs[address].L1_GetX_ID;
@@ -1051,7 +1055,7 @@ machine(L2Cache, "Token protocol") {
action(f_sendUnblock, "f", desc="Send unblock to global directory") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK;
out_msg.Destination.add(map_Address_to_Directory(address));
@@ -1063,7 +1067,7 @@ machine(L2Cache, "Token protocol") {
action(f_sendExclusiveUnblock, "\f", desc="Send unblock to global directory") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
out_msg.Destination.add(map_Address_to_Directory(address));
@@ -1140,7 +1144,7 @@ machine(L2Cache, "Token protocol") {
action(j_forwardGlobalRequestToLocalOwner, "j", desc="Forward external request to local owner") {
peek(requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := machineID;
@@ -1156,7 +1160,7 @@ machine(L2Cache, "Token protocol") {
action(k_forwardLocalGETSToLocalSharer, "k", desc="Forward local request to local sharer/owner") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := in_msg.Requestor;
@@ -1169,7 +1173,7 @@ machine(L2Cache, "Token protocol") {
}
action(k_forwardLocalGETXToLocalOwner, "\k", desc="Forward local request to local owner") {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := L2_TBEs[address].L1_GetX_ID;
@@ -1183,7 +1187,7 @@ machine(L2Cache, "Token protocol") {
// same as previous except that it assumes to TBE is present to get number of acks
action(kk_forwardLocalGETXToLocalExclusive, "kk", desc="Forward local request to local owner") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
out_msg.Type := CoherenceRequestType:GETX;
out_msg.Requestor := in_msg.Requestor;
@@ -1197,7 +1201,7 @@ machine(L2Cache, "Token protocol") {
action(kk_forwardLocalGETSToLocalOwner, "\kk", desc="Forward local request to local owner") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
out_msg.Type := CoherenceRequestType:GETS;
out_msg.Requestor := in_msg.Requestor;
@@ -1211,7 +1215,7 @@ machine(L2Cache, "Token protocol") {
action(l_writebackAckNeedData, "l", desc="Send writeback ack to L1 requesting data") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
// out_msg.Type := CoherenceResponseType:WRITEBACK_SEND_DATA;
out_msg.Type := CoherenceRequestType:WB_ACK_DATA;
@@ -1225,7 +1229,7 @@ machine(L2Cache, "Token protocol") {
action(l_writebackAckDropData, "\l", desc="Send writeback ack to L1 indicating to drop data") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
// out_msg.Type := CoherenceResponseType:WRITEBACK_ACK;
out_msg.Type := CoherenceRequestType:WB_ACK;
@@ -1239,7 +1243,7 @@ machine(L2Cache, "Token protocol") {
action(ll_writebackNack, "\ll", desc="Send writeback nack to L1") {
peek(L1requestNetwork_in, RequestMsg) {
- enqueue( localRequestNetwork_out, RequestMsg, latency="L2_RESPONSE_LATENCY" ) {
+ enqueue( localRequestNetwork_out, RequestMsg, latency=response_latency ) {
out_msg.Address := in_msg.Address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := machineID;
@@ -1305,7 +1309,7 @@ machine(L2Cache, "Token protocol") {
action( qq_sendDataFromTBEToMemory, "qq", desc="Send data from TBE to directory") {
- enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ enqueue(responseNetwork_out, ResponseMsg, latency=response_latency) {
out_msg.Address := address;
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:L2Cache;
@@ -1372,7 +1376,7 @@ machine(L2Cache, "Token protocol") {
}
action(vv_allocateL2CacheBlock, "\v", desc="Set L2 cache tag equal to tag of block B.") {
- L2cacheMemory.allocate(address);
+ L2cacheMemory.allocate(address, new Entry);
}
action(rr_deallocateL2CacheBlock, "\r", desc="Deallocate L2 cache block. Sets the cache to not present, allowing a replacement in parallel with a fetch.") {
@@ -1389,7 +1393,7 @@ machine(L2Cache, "Token protocol") {
action(uu_profileMiss, "\u", desc="Profile the demand miss") {
peek(L1requestNetwork_in, RequestMsg) {
// AccessModeType not implemented
- profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
+ // profile_L2Cache_miss(convertToGenericType(in_msg.Type), in_msg.AccessMode, MessageSizeTypeToInt(in_msg.MessageSize), in_msg.Prefetch, machineIDToNodeID(in_msg.Requestor));
}
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm
index a016836c2..edd67707e 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm
@@ -31,13 +31,15 @@
* $Id$
*/
-machine(Directory, "Directory protocol") {
+machine(Directory, "Directory protocol")
+: int directory_latency
+{
// ** IN QUEUES **
MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
-
+
MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
@@ -56,11 +58,16 @@ machine(Directory, "Directory protocol") {
OO, desc="Blocked, was in owned";
MO, desc="Blocked, going to owner or maybe modified";
MM, desc="Blocked, going to modified";
+ MM_DMA, desc="Blocked, going to I";
MI, desc="Blocked on a writeback";
MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
OS, desc="Blocked on a writeback";
OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
+
+ XI_M, desc="In a stable state, going to I, waiting for the memory controller";
+ XI_U, desc="In a stable state, going to I, waiting for an unblock";
+ OI_D, desc="In O, going to I, waiting for data";
}
// Events
@@ -75,6 +82,11 @@ machine(Directory, "Directory protocol") {
Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
+ Memory_Data, desc="Fetched data from memory arrives";
+ Memory_Ack, desc="Writeback Ack from memory arrives";
+ DMA_READ, desc="DMA Read";
+ DMA_WRITE, desc="DMA Write";
+ Data, desc="Data to directory";
}
// TYPES
@@ -88,15 +100,36 @@ machine(Directory, "Directory protocol") {
int WaitingUnblocks, desc="Number of acks we're waiting for";
}
+ structure(TBE, desc="...") {
+ Address address, desc="Address for this entry";
+ int Len, desc="Length of request";
+ DataBlock DataBlk, desc="DataBlk";
+ MachineID Requestor, desc="original requestor";
+ }
+
external_type(DirectoryMemory) {
Entry lookup(Address);
bool isPresent(Address);
}
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ // to simulate detailed DRAM
+ external_type(MemoryControl, inport="yes", outport="yes") {
+
+ }
+
// ** OBJECTS **
- DirectoryMemory directory, constructor_hack="i";
+ DirectoryMemory directory, factory='RubySystem::getDirectory(m_cfg["directory_name"])';
+ MemoryControl memBuffer, factory='RubySystem::getMemoryControl(m_cfg["memory_controller_name"])';
+ TBETable TBEs, template_hack="<Directory_TBE>";
State getState(Address addr) {
return directory[addr].DirectoryState;
@@ -164,6 +197,7 @@ machine(Directory, "Directory protocol") {
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
out_port(goo1_out, ResponseMsg, goo1);
+ out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
@@ -188,6 +222,8 @@ machine(Directory, "Directory protocol") {
trigger(Event:Dirty_Writeback, in_msg.Address);
} else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
trigger(Event:Clean_Writeback, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data, in_msg.Address);
} else {
error("Invalid message");
}
@@ -208,7 +244,27 @@ machine(Directory, "Directory protocol") {
trigger(Event:PUTO, in_msg.Address);
} else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
trigger(Event:PUTO_SHARERS, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_READ) {
+ trigger(Event:DMA_READ, in_msg.Address);
+ } else if (in_msg.Type == CoherenceRequestType:DMA_WRITE) {
+ trigger(Event:DMA_WRITE, in_msg.Address);
+ } else {
+ error("Invalid message");
+ }
+ }
+ }
+ }
+
+ // off-chip memory request/response is done
+ in_port(memQueue_in, MemoryMsg, memBuffer) {
+ if (memQueue_in.isReady()) {
+ peek(memQueue_in, MemoryMsg) {
+ if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
+ trigger(Event:Memory_Data, in_msg.Address);
+ } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
+ trigger(Event:Memory_Ack, in_msg.Address);
} else {
+ DEBUG_EXPR(in_msg.Type);
error("Invalid message");
}
}
@@ -219,7 +275,7 @@ machine(Directory, "Directory protocol") {
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
@@ -231,7 +287,7 @@ machine(Directory, "Directory protocol") {
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
@@ -254,26 +310,21 @@ machine(Directory, "Directory protocol") {
directory[address].Sharers.clear();
}
- action(d_sendData, "d", desc="Send data to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="MEMORY_LATENCY") {
- // enqueue(responseNetwork_out, ResponseMsg, latency="L2_RESPONSE_LATENCY") {
+ action(d_sendDataMsg, "d", desc="Send data to requestor") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
out_msg.Address := address;
-
- if (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0) {
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- } else {
- out_msg.Type := CoherenceResponseType:DATA;
- }
-
out_msg.Sender := machineID;
out_msg.SenderMachine := MachineType:Directory;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ //out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.DataBlk := in_msg.DataBlk;
out_msg.Dirty := false; // By definition, the block is now clean
- out_msg.Acks := directory[address].Sharers.count();
- if (directory[address].Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
+ out_msg.Acks := in_msg.Acks;
+ if (in_msg.ReadX) {
+ out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
+ } else {
+ out_msg.Type := CoherenceResponseType:DATA;
}
out_msg.MessageSize := MessageSizeType:Response_Data;
}
@@ -289,7 +340,7 @@ machine(Directory, "Directory protocol") {
action(f_forwardRequest, "f", desc="Forward request to owner") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
@@ -303,11 +354,27 @@ machine(Directory, "Directory protocol") {
}
}
+ action(f_forwardRequestDirIsRequestor, "\f", desc="Forward request to owner") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
+ out_msg.Address := address;
+ out_msg.Type := in_msg.Type;
+ out_msg.Requestor := machineID;
+ out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ out_msg.MessageSize := MessageSizeType:Forwarded_Control;
+ }
+ }
+ }
+
action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
peek(requestQueue_in, RequestMsg) {
if ((directory[in_msg.Address].Sharers.count() > 1) ||
((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
+ enqueue(forwardNetwork_out, RequestMsg, latency=directory_latency) {
out_msg.Address := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := in_msg.Requestor;
@@ -338,7 +405,7 @@ machine(Directory, "Directory protocol") {
}
}
- action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
+ action(ll_checkDataInMemory, "\ld", desc="Check PUTX/PUTO data is same as in the memory") {
peek(unblockNetwork_in, ResponseMsg) {
assert(in_msg.Dirty == false);
assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
@@ -366,6 +433,70 @@ machine(Directory, "Directory protocol") {
assert(directory[address].WaitingUnblocks >= 0);
}
+ action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
+ memQueue_in.dequeue();
+ }
+
+ action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_READ;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := directory[in_msg.Address].DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // These are not used by memory but are passed back here with the read data:
+ out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0);
+ out_msg.Acks := directory[address].Sharers.count();
+ if (directory[address].Sharers.isElement(in_msg.Requestor)) {
+ out_msg.Acks := out_msg.Acks - 1;
+ }
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
+ peek(unblockNetwork_in, ResponseMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ if (TBEs.isPresent(address)) {
+ out_msg.OriginalRequestorMachId := TBEs[address].Requestor;
+ }
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // Not used:
+ out_msg.ReadX := false;
+ out_msg.Acks := directory[address].Sharers.count(); // for dma requests
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+ action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
+ peek(requestQueue_in, RequestMsg) {
+ enqueue(memQueue_out, MemoryMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Type := MemoryRequestType:MEMORY_WB;
+ out_msg.Sender := machineID;
+ out_msg.OriginalRequestorMachId := in_msg.Requestor;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.MessageSize := in_msg.MessageSize;
+ //out_msg.Prefetch := false;
+ // Not used:
+ out_msg.ReadX := false;
+ out_msg.Acks := directory[address].Sharers.count(); // for dma requests
+ DEBUG_EXPR(out_msg);
+ }
+ }
+ }
+
+
// action(z_stall, "z", desc="Cannot be handled right now.") {
// Special name recognized as do nothing case
// }
@@ -374,26 +505,106 @@ machine(Directory, "Directory protocol") {
requestQueue_in.recycle();
}
+ action(a_sendDMAAck, "\a", desc="Send DMA Ack that write completed, along with Inv Ack count") {
+ peek(memQueue_in, MemoryMsg) {
+ enqueue(responseNetwork_out, ResponseMsg, latency="1") {
+ out_msg.Address := address;
+ out_msg.Sender := machineID;
+ out_msg.SenderMachine := MachineType:Directory;
+ out_msg.Destination.add(in_msg.OriginalRequestorMachId);
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Acks := in_msg.Acks;
+ out_msg.Type := CoherenceResponseType:DMA_ACK;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(l_writeDMADataToMemory, "\l", desc="Write data from a DMA_WRITE to memory") {
+ peek(requestQueue_in, RequestMsg) {
+ directory[address].DataBlk.copyPartial(in_msg.DataBlk, addressOffset(in_msg.Address), in_msg.Len);
+ }
+ }
+
+ action(l_writeDMADataToMemoryFromTBE, "\ll", desc="Write data from a DMA_WRITE to memory") {
+ directory[address].DataBlk.copyPartial(TBEs[address].DataBlk, addressOffset(address), TBEs[address].Len);
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ peek (requestQueue_in, RequestMsg) {
+ TBEs.allocate(address);
+ TBEs[address].Len := in_msg.Len;
+ TBEs[address].DataBlk := in_msg.DataBlk;
+ TBEs[address].Requestor := in_msg.Requestor;
+ }
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ }
+
+
+
// TRANSITIONS
transition(I, GETX, MM) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(I, DMA_READ, XI_M) {
+ qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
+ transition(I, DMA_WRITE, XI_M) {
+ qw_queueMemoryWBRequest2;
+ l_writeDMADataToMemory;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(XI_M, Memory_Data, XI_U) {
+ d_sendDataMsg; // ack count may be zero
+ q_popMemQueue;
+ }
+
+ transition(XI_M, Memory_Ack, XI_U) {
+ a_sendDMAAck; // ack count may be zero
+ q_popMemQueue;
+ }
+
+ transition(XI_U, Exclusive_Unblock, I) {
+ cc_clearSharers;
+ c_clearOwner;
+ j_popIncomingUnblockQueue;
+ }
+
transition(S, GETX, MM) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
g_sendInvalidations;
i_popIncomingRequestQueue;
}
+ transition(S, DMA_READ, XI_M) {
+ qf_queueMemoryFetchRequest;
+ g_sendInvalidations; // the DMA will collect the invalidations then send an Unblock Exclusive
+ i_popIncomingRequestQueue;
+ }
+
+ transition(S, DMA_WRITE, XI_M) {
+ qw_queueMemoryWBRequest2;
+ l_writeDMADataToMemory;
+ g_sendInvalidations; // the DMA will collect invalidations
+ i_popIncomingRequestQueue;
+ }
+
transition(I, GETS, IS) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
i_popIncomingRequestQueue;
}
transition({S, SS}, GETS, SS) {
- d_sendData;
+ qf_queueMemoryFetchRequest;
n_incrementOutstanding;
i_popIncomingRequestQueue;
}
@@ -414,6 +625,27 @@ machine(Directory, "Directory protocol") {
i_popIncomingRequestQueue;
}
+ transition(O, DMA_READ, XI_U) {
+ f_forwardRequest; // this will cause the data to go to DMA directly
+ g_sendInvalidations; // this will cause acks to be sent to the DMA
+ i_popIncomingRequestQueue;
+ }
+
+ transition({O,M}, DMA_WRITE, OI_D) {
+ f_forwardRequestDirIsRequestor; // need the modified data before we can proceed
+ g_sendInvalidations; // these go to the DMA Controller
+ v_allocateTBE;
+ i_popIncomingRequestQueue;
+ }
+
+ transition(OI_D, Data, XI_M) {
+ qw_queueMemoryWBRequest;
+ l_writeDataToMemory;
+ l_writeDMADataToMemoryFromTBE;
+ w_deallocateTBE;
+ j_popIncomingUnblockQueue;
+ }
+
transition({O, OO}, GETS, OO) {
f_forwardRequest;
n_incrementOutstanding;
@@ -425,6 +657,12 @@ machine(Directory, "Directory protocol") {
i_popIncomingRequestQueue;
}
+ // no exclusive unblock will show up to the directory
+ transition(M, DMA_READ, XI_U) {
+ f_forwardRequest; // this will cause the data to go to DMA directly
+ i_popIncomingRequestQueue;
+ }
+
transition(M, GETS, MO) {
f_forwardRequest;
i_popIncomingRequestQueue;
@@ -457,7 +695,7 @@ machine(Directory, "Directory protocol") {
}
- transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ}) {
zz_recycleRequest;
}
@@ -472,7 +710,7 @@ machine(Directory, "Directory protocol") {
j_popIncomingUnblockQueue;
}
- transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
+ transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX, DMA_READ}) {
zz_recycleRequest;
}
@@ -519,12 +757,14 @@ machine(Directory, "Directory protocol") {
c_clearOwner;
cc_clearSharers;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(MIS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
@@ -536,12 +776,14 @@ machine(Directory, "Directory protocol") {
transition(OS, Dirty_Writeback, S) {
c_clearOwner;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
transition(OSS, Dirty_Writeback, S) {
c_moveOwnerToSharer;
l_writeDataToMemory;
+ qw_queueMemoryWBRequest;
j_popIncomingUnblockQueue;
}
@@ -570,4 +812,15 @@ machine(Directory, "Directory protocol") {
transition({OS, OSS}, Unblock, O) {
j_popIncomingUnblockQueue;
}
+
+ transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
+ d_sendDataMsg;
+ q_popMemQueue;
+ }
+
+ transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Ack) {
+ //a_sendAck;
+ q_popMemQueue;
+ }
+
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-dma.sm b/src/mem/protocol/MOESI_CMP_directory-dma.sm
new file mode 100644
index 000000000..74246c730
--- /dev/null
+++ b/src/mem/protocol/MOESI_CMP_directory-dma.sm
@@ -0,0 +1,267 @@
+
+machine(DMA, "DMA Controller")
+: int request_latency,
+ int response_latency
+{
+
+ MessageBuffer goo1, network="From", virtual_network="0", ordered="false";
+ MessageBuffer goo2, network="From", virtual_network="1", ordered="false";
+ MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false";
+
+ MessageBuffer foo1, network="To", virtual_network="0", ordered="false";
+ MessageBuffer reqToDir, network="To", virtual_network="1", ordered="false";
+ MessageBuffer respToDir, network="To", virtual_network="2", ordered="false";
+
+ enumeration(State, desc="DMA states", default="DMA_State_READY") {
+ READY, desc="Ready to accept a new request";
+ BUSY_RD, desc="Busy: currently processing a request";
+ BUSY_WR, desc="Busy: currently processing a request";
+ }
+
+ enumeration(Event, desc="DMA events") {
+ ReadRequest, desc="A new read request";
+ WriteRequest, desc="A new write request";
+ Data, desc="Data from a DMA memory read";
+ DMA_Ack, desc="DMA write to memory completed";
+ Inv_Ack, desc="Invalidation Ack from a sharer";
+ All_Acks, desc="All acks received";
+ }
+
+ structure(TBE, desc="...") {
+ Address address, desc="Physical address";
+ int NumAcks, default="0", desc="Number of Acks pending";
+ DataBlock DataBlk, desc="Data";
+ }
+
+ external_type(DMASequencer) {
+ void ackCallback();
+ void dataCallback(DataBlock);
+ }
+
+ external_type(TBETable) {
+ TBE lookup(Address);
+ void allocate(Address);
+ void deallocate(Address);
+ bool isPresent(Address);
+ }
+
+ MessageBuffer mandatoryQueue, ordered="false";
+ MessageBuffer triggerQueue, ordered="true";
+ DMASequencer dma_sequencer, factory='RubySystem::getDMASequencer(m_cfg["dma_sequencer"])';
+ TBETable TBEs, template_hack="<DMA_TBE>";
+ State cur_state;
+
+ State getState(Address addr) {
+ return cur_state;
+ }
+ void setState(Address addr, State state) {
+ cur_state := state;
+ }
+
+ out_port(reqToDirectory_out, RequestMsg, reqToDir, desc="...");
+ out_port(respToDirectory_out, ResponseMsg, respToDir, desc="...");
+ out_port(foo1_out, ResponseMsg, foo1, desc="...");
+ out_port(triggerQueue_out, TriggerMsg, triggerQueue, desc="...");
+
+ in_port(goo1_in, RequestMsg, goo1) {
+ if (goo1_in.isReady()) {
+ peek(goo1_in, RequestMsg) {
+ assert(false);
+ }
+ }
+ }
+
+ in_port(goo2_in, RequestMsg, goo2) {
+ if (goo2_in.isReady()) {
+ peek(goo2_in, RequestMsg) {
+ assert(false);
+ }
+ }
+ }
+
+ in_port(dmaRequestQueue_in, SequencerMsg, mandatoryQueue, desc="...") {
+ if (dmaRequestQueue_in.isReady()) {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ if (in_msg.Type == SequencerRequestType:LD ) {
+ trigger(Event:ReadRequest, in_msg.PhysicalAddress);
+ } else if (in_msg.Type == SequencerRequestType:ST) {
+ trigger(Event:WriteRequest, in_msg.PhysicalAddress);
+ } else {
+ error("Invalid request type");
+ }
+ }
+ }
+ }
+
+ in_port(dmaResponseQueue_in, ResponseMsg, responseFromDir, desc="...") {
+ if (dmaResponseQueue_in.isReady()) {
+ peek( dmaResponseQueue_in, ResponseMsg) {
+ if (in_msg.Type == CoherenceResponseType:DMA_ACK) {
+ trigger(Event:DMA_Ack, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:DATA_EXCLUSIVE) {
+ trigger(Event:Data, in_msg.Address);
+ } else if (in_msg.Type == CoherenceResponseType:ACK) {
+ trigger(Event:Inv_Ack, in_msg.Address);
+ } else {
+ error("Invalid response type");
+ }
+ }
+ }
+ }
+
+ // Trigger Queue
+ in_port(triggerQueue_in, TriggerMsg, triggerQueue) {
+ if (triggerQueue_in.isReady()) {
+ peek(triggerQueue_in, TriggerMsg) {
+ if (in_msg.Type == TriggerType:ALL_ACKS) {
+ trigger(Event:All_Acks, in_msg.Address);
+ } else {
+ error("Unexpected message");
+ }
+ }
+ }
+ }
+
+ action(s_sendReadRequest, "s", desc="Send a DMA read request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:DMA_READ;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(s_sendWriteRequest, "\s", desc="Send a DMA write request to memory") {
+ peek(dmaRequestQueue_in, SequencerMsg) {
+ enqueue(reqToDirectory_out, RequestMsg, latency=request_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceRequestType:DMA_WRITE;
+ out_msg.DataBlk := in_msg.DataBlk;
+ out_msg.Len := in_msg.Len;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.Requestor := machineID;
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+ }
+
+ action(a_ackCallback, "a", desc="Notify dma controller that write request completed") {
+ dma_sequencer.ackCallback();
+ }
+
+ action(o_checkForCompletion, "o", desc="Check if we have received all the messages required for completion") {
+ if (TBEs[address].NumAcks == 0) {
+ enqueue(triggerQueue_out, TriggerMsg) {
+ out_msg.Address := address;
+ out_msg.Type := TriggerType:ALL_ACKS;
+ }
+ }
+ }
+
+ action(u_updateAckCount, "u", desc="Update ack count") {
+ peek(dmaResponseQueue_in, ResponseMsg) {
+ TBEs[address].NumAcks := TBEs[address].NumAcks - in_msg.Acks;
+ }
+ }
+
+ action( u_sendExclusiveUnblockToDir, "\u", desc="send exclusive unblock to directory") {
+ enqueue(respToDirectory_out, ResponseMsg, latency=response_latency) {
+ out_msg.Address := address;
+ out_msg.Type := CoherenceResponseType:UNBLOCK_EXCLUSIVE;
+ out_msg.Destination.add(map_Address_to_Directory(address));
+ out_msg.MessageSize := MessageSizeType:Writeback_Control;
+ }
+ }
+
+ action(p_popRequestQueue, "p", desc="Pop request queue") {
+ dmaRequestQueue_in.dequeue();
+ }
+
+ action(p_popResponseQueue, "\p", desc="Pop request queue") {
+ dmaResponseQueue_in.dequeue();
+ }
+
+ action(p_popTriggerQueue, "pp", desc="Pop trigger queue") {
+ triggerQueue_in.dequeue();
+ }
+
+ action(t_updateTBEData, "t", desc="Update TBE Data") {
+ peek(dmaResponseQueue_in, ResponseMsg) {
+ TBEs[address].DataBlk := in_msg.DataBlk;
+ }
+ }
+
+ action(d_dataCallbackFromTBE, "/d", desc="data callback with data from TBE") {
+ dma_sequencer.dataCallback(TBEs[address].DataBlk);
+ }
+
+ action(v_allocateTBE, "v", desc="Allocate TBE entry") {
+ TBEs.allocate(address);
+ }
+
+ action(w_deallocateTBE, "w", desc="Deallocate TBE entry") {
+ TBEs.deallocate(address);
+ }
+
+ action(z_stall, "z", desc="dma is busy..stall") {
+ // do nothing
+ }
+
+
+
+ transition(READY, ReadRequest, BUSY_RD) {
+ s_sendReadRequest;
+ v_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_RD, Inv_Ack) {
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_RD, Data) {
+ t_updateTBEData;
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_RD, All_Acks, READY) {
+ d_dataCallbackFromTBE;
+ u_sendExclusiveUnblockToDir;
+ w_deallocateTBE;
+ p_popTriggerQueue;
+ }
+
+ transition(READY, WriteRequest, BUSY_WR) {
+ s_sendWriteRequest;
+ v_allocateTBE;
+ p_popRequestQueue;
+ }
+
+ transition(BUSY_WR, Inv_Ack) {
+ u_updateAckCount;
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_WR, DMA_Ack) {
+ u_updateAckCount; // actually increases
+ o_checkForCompletion;
+ p_popResponseQueue;
+ }
+
+ transition(BUSY_WR, All_Acks, READY) {
+ a_ackCallback;
+ u_sendExclusiveUnblockToDir;
+ w_deallocateTBE;
+ p_popTriggerQueue;
+ }
+}
diff --git a/src/mem/protocol/MOESI_CMP_directory-msg.sm b/src/mem/protocol/MOESI_CMP_directory-msg.sm
index 08b4abec3..edbff0c96 100644
--- a/src/mem/protocol/MOESI_CMP_directory-msg.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-msg.sm
@@ -44,6 +44,9 @@ enumeration(CoherenceRequestType, desc="...") {
WB_ACK_DATA, desc="Writeback ack";
WB_NACK, desc="Writeback neg. ack";
INV, desc="Invalidation";
+
+ DMA_READ, desc="DMA Read";
+ DMA_WRITE, desc="DMA Write";
}
// CoherenceResponseType
@@ -56,6 +59,8 @@ enumeration(CoherenceResponseType, desc="...") {
WRITEBACK_CLEAN_DATA, desc="Clean writeback (contains data)";
WRITEBACK_CLEAN_ACK, desc="Clean writeback (contains no data)";
WRITEBACK_DIRTY_DATA, desc="Dirty writeback (contains data)";
+
+ DMA_ACK, desc="Ack that a DMA write completed";
}
// TriggerType
@@ -72,10 +77,12 @@ structure(TriggerMsg, desc="...", interface="Message") {
// RequestMsg (and also forwarded requests)
structure(RequestMsg, desc="...", interface="NetworkMessage") {
Address Address, desc="Physical address for this request";
+ int Len, desc="Length of Request";
CoherenceRequestType Type, desc="Type of request (GetS, GetX, PutX, etc)";
MachineID Requestor, desc="Node who initiated the request";
MachineType RequestorMachine, desc="type of component";
NetDest Destination, desc="Multicast destination mask";
+ DataBlock DataBlk, desc="data for the cache line (DMA WRITE request)";
int Acks, desc="How many acks to expect";
MessageSizeType MessageSize, desc="size category of the message";
AccessModeType AccessMode, desc="user/supervisor access type";
@@ -95,32 +102,4 @@ structure(ResponseMsg, desc="...", interface="NetworkMessage") {
MessageSizeType MessageSize, desc="size category of the message";
}
-GenericRequestType convertToGenericType(CoherenceRequestType type) {
- if(type == CoherenceRequestType:PUTX) {
- return GenericRequestType:PUTX;
- } else if(type == CoherenceRequestType:GETS) {
- return GenericRequestType:GETS;
- } else if(type == CoherenceRequestType:GETX) {
- return GenericRequestType:GETX;
- } else if(type == CoherenceRequestType:PUTS) {
- return GenericRequestType:PUTS;
- } else if(type == CoherenceRequestType:PUTX) {
- return GenericRequestType:PUTS;
- } else if(type == CoherenceRequestType:PUTO) {
- return GenericRequestType:PUTO;
- } else if(type == CoherenceRequestType:PUTO_SHARERS) {
- return GenericRequestType:PUTO;
- } else if(type == CoherenceRequestType:INV) {
- return GenericRequestType:INV;
- } else if(type == CoherenceRequestType:WB_ACK) {
- return GenericRequestType:WB_ACK;
- } else if(type == CoherenceRequestType:WB_ACK_DATA) {
- return GenericRequestType:WB_ACK;
- } else if(type == CoherenceRequestType:WB_NACK) {
- return GenericRequestType:NACK;
- } else {
- DEBUG_EXPR(type);
- error("invalid CoherenceRequestType");
- }
-}
diff --git a/src/mem/protocol/MOESI_CMP_directory.slicc b/src/mem/protocol/MOESI_CMP_directory.slicc
index c552d7157..f288aa4b0 100644
--- a/src/mem/protocol/MOESI_CMP_directory.slicc
+++ b/src/mem/protocol/MOESI_CMP_directory.slicc
@@ -1,5 +1,6 @@
MOESI_CMP_directory-msg.sm
MOESI_CMP_directory-L2cache.sm
MOESI_CMP_directory-L1cache.sm
+MOESI_CMP_directory-dma.sm
MOESI_CMP_directory-dir.sm
standard_CMP-protocol.sm
diff --git a/src/mem/protocol/MOESI_CMP_directory_m-dir.sm b/src/mem/protocol/MOESI_CMP_directory_m-dir.sm
deleted file mode 100644
index 3a4d875c1..000000000
--- a/src/mem/protocol/MOESI_CMP_directory_m-dir.sm
+++ /dev/null
@@ -1,652 +0,0 @@
-
-/*
- * Copyright (c) 1999-2005 Mark D. Hill and David A. Wood
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions are
- * met: redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer;
- * redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution;
- * neither the name of the copyright holders nor the names of its
- * contributors may be used to endorse or promote products derived from
- * this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
- * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
- * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
- * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
- * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
- * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
- * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * $Id$
- */
-
-machine(Directory, "Directory protocol") {
-
- // ** IN QUEUES **
- MessageBuffer foo1, network="From", virtual_network="0", ordered="false"; // a mod-L2 bank -> this Dir
- MessageBuffer requestToDir, network="From", virtual_network="1", ordered="false"; // a mod-L2 bank -> this Dir
- MessageBuffer responseToDir, network="From", virtual_network="2", ordered="false"; // a mod-L2 bank -> this Dir
-
- MessageBuffer goo1, network="To", virtual_network="0", ordered="false";
- MessageBuffer forwardFromDir, network="To", virtual_network="1", ordered="false";
- MessageBuffer responseFromDir, network="To", virtual_network="2", ordered="false"; // Dir -> mod-L2 bank
-
-
- // STATES
- enumeration(State, desc="Directory states", default="Directory_State_I") {
- // Base states
- I, desc="Invalid";
- S, desc="Shared";
- O, desc="Owner";
- M, desc="Modified";
-
- IS, desc="Blocked, was in idle";
- SS, desc="Blocked, was in shared";
- OO, desc="Blocked, was in owned";
- MO, desc="Blocked, going to owner or maybe modified";
- MM, desc="Blocked, going to modified";
-
- MI, desc="Blocked on a writeback";
- MIS, desc="Blocked on a writeback, but don't remove from sharers when received";
- OS, desc="Blocked on a writeback";
- OSS, desc="Blocked on a writeback, but don't remove from sharers when received";
- }
-
- // Events
- enumeration(Event, desc="Directory events") {
- GETX, desc="A GETX arrives";
- GETS, desc="A GETS arrives";
- PUTX, desc="A PUTX arrives";
- PUTO, desc="A PUTO arrives";
- PUTO_SHARERS, desc="A PUTO arrives, but don't remove from sharers list";
- Unblock, desc="An unblock message arrives";
- Last_Unblock, desc="An unblock message arrives, we're not waiting for any additional unblocks";
- Exclusive_Unblock, desc="The processor become the exclusive owner (E or M) of the line";
- Clean_Writeback, desc="The final message as part of a PutX/PutS, no data";
- Dirty_Writeback, desc="The final message as part of a PutX/PutS, contains data";
- Memory_Data, desc="Fetched data from memory arrives";
- Memory_Ack, desc="Writeback Ack from memory arrives";
- }
-
- // TYPES
-
- // DirectoryEntry
- structure(Entry, desc="...") {
- State DirectoryState, desc="Directory state";
- DataBlock DataBlk, desc="data for the block";
- NetDest Sharers, desc="Sharers for this block";
- NetDest Owner, desc="Owner of this block";
- int WaitingUnblocks, desc="Number of acks we're waiting for";
- }
-
- external_type(DirectoryMemory) {
- Entry lookup(Address);
- bool isPresent(Address);
- }
-
- // to simulate detailed DRAM
- external_type(MemoryControl, inport="yes", outport="yes") {
-
- }
-
-
- // ** OBJECTS **
-
- DirectoryMemory directory, constructor_hack="i";
- MemoryControl memBuffer, constructor_hack="i";
-
- State getState(Address addr) {
- return directory[addr].DirectoryState;
- }
-
- void setState(Address addr, State state) {
- if (directory.isPresent(addr)) {
-
- if (state == State:I) {
- assert(directory[addr].Owner.count() == 0);
- assert(directory[addr].Sharers.count() == 0);
- }
-
- if (state == State:S) {
- assert(directory[addr].Owner.count() == 0);
- }
-
- if (state == State:O) {
- assert(directory[addr].Owner.count() == 1);
- assert(directory[addr].Sharers.isSuperset(directory[addr].Owner) == false);
- }
-
- if (state == State:M) {
- assert(directory[addr].Owner.count() == 1);
- assert(directory[addr].Sharers.count() == 0);
- }
-
- if ((state != State:SS) && (state != State:OO)) {
- assert(directory[addr].WaitingUnblocks == 0);
- }
-
- if ( (directory[addr].DirectoryState != State:I) && (state == State:I) ) {
- directory[addr].DirectoryState := state;
- // disable coherence checker
- // sequencer.checkCoherence(addr);
- }
- else {
- directory[addr].DirectoryState := state;
- }
- }
- }
-
- // if no sharers, then directory can be considered both a sharer and exclusive w.r.t. coherence checking
- bool isBlockShared(Address addr) {
- if (directory.isPresent(addr)) {
- if (directory[addr].DirectoryState == State:I) {
- return true;
- }
- }
- return false;
- }
-
- bool isBlockExclusive(Address addr) {
- if (directory.isPresent(addr)) {
- if (directory[addr].DirectoryState == State:I) {
- return true;
- }
- }
- return false;
- }
-
-
- // ** OUT_PORTS **
- out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
- out_port(responseNetwork_out, ResponseMsg, responseFromDir);
-// out_port(requestQueue_out, ResponseMsg, requestFromDir); // For recycling requests
- out_port(goo1_out, ResponseMsg, goo1);
- out_port(memQueue_out, MemoryMsg, memBuffer);
-
- // ** IN_PORTS **
-
- in_port(foo1_in, ResponseMsg, foo1) {
-
- }
-
- // in_port(unblockNetwork_in, ResponseMsg, unblockToDir) {
- // if (unblockNetwork_in.isReady()) {
- in_port(unblockNetwork_in, ResponseMsg, responseToDir) {
- if (unblockNetwork_in.isReady()) {
- peek(unblockNetwork_in, ResponseMsg) {
- if (in_msg.Type == CoherenceResponseType:UNBLOCK) {
- if (directory[in_msg.Address].WaitingUnblocks == 1) {
- trigger(Event:Last_Unblock, in_msg.Address);
- } else {
- trigger(Event:Unblock, in_msg.Address);
- }
- } else if (in_msg.Type == CoherenceResponseType:UNBLOCK_EXCLUSIVE) {
- trigger(Event:Exclusive_Unblock, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_DIRTY_DATA) {
- trigger(Event:Dirty_Writeback, in_msg.Address);
- } else if (in_msg.Type == CoherenceResponseType:WRITEBACK_CLEAN_ACK) {
- trigger(Event:Clean_Writeback, in_msg.Address);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- in_port(requestQueue_in, RequestMsg, requestToDir) {
- if (requestQueue_in.isReady()) {
- peek(requestQueue_in, RequestMsg) {
- if (in_msg.Type == CoherenceRequestType:GETS) {
- trigger(Event:GETS, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:GETX) {
- trigger(Event:GETX, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:PUTX) {
- trigger(Event:PUTX, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:PUTO) {
- trigger(Event:PUTO, in_msg.Address);
- } else if (in_msg.Type == CoherenceRequestType:PUTO_SHARERS) {
- trigger(Event:PUTO_SHARERS, in_msg.Address);
- } else {
- error("Invalid message");
- }
- }
- }
- }
-
- // off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
- if (memQueue_in.isReady()) {
- peek(memQueue_in, MemoryMsg) {
- if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
- trigger(Event:Memory_Data, in_msg.Address);
- } else if (in_msg.Type == MemoryRequestType:MEMORY_WB) {
- trigger(Event:Memory_Ack, in_msg.Address);
- } else {
- DEBUG_EXPR(in_msg.Type);
- error("Invalid message");
- }
- }
- }
- }
-
- // Actions
-
- action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:WB_ACK;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:WB_NACK;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.add(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Writeback_Control;
- }
- }
- }
-
- action(c_clearOwner, "c", desc="Clear the owner field") {
- directory[address].Owner.clear();
- }
-
- action(c_moveOwnerToSharer, "cc", desc="Move owner to sharers") {
- directory[address].Sharers.addNetDest(directory[address].Owner);
- directory[address].Owner.clear();
- }
-
- action(cc_clearSharers, "\c", desc="Clear the sharers field") {
- directory[address].Sharers.clear();
- }
-
- action(d_sendDataMsg, "d", desc="Send data to requestor") {
- peek(memQueue_in, MemoryMsg) {
- enqueue(responseNetwork_out, ResponseMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Sender := machineID;
- out_msg.SenderMachine := MachineType:Directory;
- out_msg.Destination.add(in_msg.OriginalRequestorMachId);
- //out_msg.DataBlk := directory[in_msg.Address].DataBlk;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.Dirty := false; // By definition, the block is now clean
- out_msg.Acks := in_msg.Acks;
- if (in_msg.ReadX) {
- out_msg.Type := CoherenceResponseType:DATA_EXCLUSIVE;
- } else {
- out_msg.Type := CoherenceResponseType:DATA;
- }
- out_msg.MessageSize := MessageSizeType:Response_Data;
- }
- }
- }
-
- action(e_ownerIsUnblocker, "e", desc="The owner is now the unblocker") {
- peek(unblockNetwork_in, ResponseMsg) {
- directory[address].Owner.clear();
- directory[address].Owner.add(in_msg.Sender);
- }
- }
-
- action(f_forwardRequest, "f", desc="Forward request to owner") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := in_msg.Type;
- out_msg.Requestor := in_msg.Requestor;
- out_msg.Destination.addNetDest(directory[in_msg.Address].Owner);
- out_msg.Acks := directory[address].Sharers.count();
- if (directory[address].Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- out_msg.MessageSize := MessageSizeType:Forwarded_Control;
- }
- }
- }
-
- action(g_sendInvalidations, "g", desc="Send invalidations to sharers, not including the requester") {
- peek(requestQueue_in, RequestMsg) {
- if ((directory[in_msg.Address].Sharers.count() > 1) ||
- ((directory[in_msg.Address].Sharers.count() > 0) && (directory[in_msg.Address].Sharers.isElement(in_msg.Requestor) == false))) {
- enqueue(forwardNetwork_out, RequestMsg, latency="DIRECTORY_LATENCY") {
- out_msg.Address := address;
- out_msg.Type := CoherenceRequestType:INV;
- out_msg.Requestor := in_msg.Requestor;
- // out_msg.Destination := directory[in_msg.Address].Sharers;
- out_msg.Destination.addNetDest(directory[in_msg.Address].Sharers);
- out_msg.Destination.remove(in_msg.Requestor);
- out_msg.MessageSize := MessageSizeType:Invalidate_Control;
- }
- }
- }
- }
-
- action(i_popIncomingRequestQueue, "i", desc="Pop incoming request queue") {
- requestQueue_in.dequeue();
- }
-
- action(j_popIncomingUnblockQueue, "j", desc="Pop incoming unblock queue") {
- unblockNetwork_in.dequeue();
- }
-
- action(l_writeDataToMemory, "l", desc="Write PUTX/PUTO data to memory") {
- peek(unblockNetwork_in, ResponseMsg) {
- assert(in_msg.Dirty);
- assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
- directory[in_msg.Address].DataBlk := in_msg.DataBlk;
- DEBUG_EXPR(in_msg.Address);
- DEBUG_EXPR(in_msg.DataBlk);
- }
- }
-
- action(ll_checkDataInMemory, "\l", desc="Check PUTX/PUTO data is same as in the memory") {
- peek(unblockNetwork_in, ResponseMsg) {
- assert(in_msg.Dirty == false);
- assert(in_msg.MessageSize == MessageSizeType:Writeback_Control);
-
- // NOTE: The following check would not be valid in a real
- // implementation. We include the data in the "dataless"
- // message so we can assert the clean data matches the datablock
- // in memory
- assert(directory[in_msg.Address].DataBlk == in_msg.DataBlk);
- }
- }
-
- action(m_addUnlockerToSharers, "m", desc="Add the unlocker to the sharer list") {
- peek(unblockNetwork_in, ResponseMsg) {
- directory[address].Sharers.add(in_msg.Sender);
- }
- }
-
- action(n_incrementOutstanding, "n", desc="Increment outstanding requests") {
- directory[address].WaitingUnblocks := directory[address].WaitingUnblocks + 1;
- }
-
- action(o_decrementOutstanding, "o", desc="Decrement outstanding requests") {
- directory[address].WaitingUnblocks := directory[address].WaitingUnblocks - 1;
- assert(directory[address].WaitingUnblocks >= 0);
- }
-
- action(q_popMemQueue, "q", desc="Pop off-chip request queue") {
- memQueue_in.dequeue();
- }
-
- action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
- peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.DataBlk := directory[in_msg.Address].DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := false;
- // These are not used by memory but are passed back here with the read data:
- out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS && directory[address].Sharers.count() == 0);
- out_msg.Acks := directory[address].Sharers.count();
- if (directory[address].Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- DEBUG_EXPR(out_msg);
- }
- }
- }
-
- action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
- peek(unblockNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, latency="1") {
- out_msg.Address := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- //out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := false;
- // Not used:
- out_msg.ReadX := false;
- out_msg.Acks := 0;
- DEBUG_EXPR(out_msg);
- }
- }
- }
-
-
- // action(z_stall, "z", desc="Cannot be handled right now.") {
- // Special name recognized as do nothing case
- // }
-
- action(zz_recycleRequest, "\z", desc="Recycle the request queue") {
- requestQueue_in.recycle();
- }
-
- // TRANSITIONS
-
- transition(I, GETX, MM) {
- qf_queueMemoryFetchRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(S, GETX, MM) {
- qf_queueMemoryFetchRequest;
- g_sendInvalidations;
- i_popIncomingRequestQueue;
- }
-
- transition(I, GETS, IS) {
- qf_queueMemoryFetchRequest;
- i_popIncomingRequestQueue;
- }
-
- transition({S, SS}, GETS, SS) {
- qf_queueMemoryFetchRequest;
- n_incrementOutstanding;
- i_popIncomingRequestQueue;
- }
-
- transition({I, S}, PUTO) {
- b_sendWriteBackNack;
- i_popIncomingRequestQueue;
- }
-
- transition({I, S, O}, PUTX) {
- b_sendWriteBackNack;
- i_popIncomingRequestQueue;
- }
-
- transition(O, GETX, MM) {
- f_forwardRequest;
- g_sendInvalidations;
- i_popIncomingRequestQueue;
- }
-
- transition({O, OO}, GETS, OO) {
- f_forwardRequest;
- n_incrementOutstanding;
- i_popIncomingRequestQueue;
- }
-
- transition(M, GETX, MM) {
- f_forwardRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(M, GETS, MO) {
- f_forwardRequest;
- i_popIncomingRequestQueue;
- }
-
- transition(M, PUTX, MI) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- // happens if M->O transition happens on-chip
- transition(M, PUTO, MI) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- transition(M, PUTO_SHARERS, MIS) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- transition(O, PUTO, OS) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
- transition(O, PUTO_SHARERS, OSS) {
- a_sendWriteBackAck;
- i_popIncomingRequestQueue;
- }
-
-
- transition({MM, MO, MI, MIS, OS, OSS}, {GETS, GETX, PUTO, PUTO_SHARERS, PUTX}) {
- zz_recycleRequest;
- }
-
- transition({MM, MO}, Exclusive_Unblock, M) {
- cc_clearSharers;
- e_ownerIsUnblocker;
- j_popIncomingUnblockQueue;
- }
-
- transition(MO, Unblock, O) {
- m_addUnlockerToSharers;
- j_popIncomingUnblockQueue;
- }
-
- transition({IS, SS, OO}, {GETX, PUTO, PUTO_SHARERS, PUTX}) {
- zz_recycleRequest;
- }
-
- transition(IS, GETS) {
- zz_recycleRequest;
- }
-
- transition(IS, Unblock, S) {
- m_addUnlockerToSharers;
- j_popIncomingUnblockQueue;
- }
-
- transition(IS, Exclusive_Unblock, M) {
- cc_clearSharers;
- e_ownerIsUnblocker;
- j_popIncomingUnblockQueue;
- }
-
- transition(SS, Unblock) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(SS, Last_Unblock, S) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(OO, Unblock) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(OO, Last_Unblock, O) {
- m_addUnlockerToSharers;
- o_decrementOutstanding;
- j_popIncomingUnblockQueue;
- }
-
- transition(MI, Dirty_Writeback, I) {
- c_clearOwner;
- cc_clearSharers;
- l_writeDataToMemory;
- qw_queueMemoryWBRequest;
- j_popIncomingUnblockQueue;
- }
-
- transition(MIS, Dirty_Writeback, S) {
- c_moveOwnerToSharer;
- l_writeDataToMemory;
- qw_queueMemoryWBRequest;
- j_popIncomingUnblockQueue;
- }
-
- transition(MIS, Clean_Writeback, S) {
- c_moveOwnerToSharer;
- j_popIncomingUnblockQueue;
- }
-
- transition(OS, Dirty_Writeback, S) {
- c_clearOwner;
- l_writeDataToMemory;
- qw_queueMemoryWBRequest;
- j_popIncomingUnblockQueue;
- }
-
- transition(OSS, Dirty_Writeback, S) {
- c_moveOwnerToSharer;
- l_writeDataToMemory;
- qw_queueMemoryWBRequest;
- j_popIncomingUnblockQueue;
- }
-
- transition(OSS, Clean_Writeback, S) {
- c_moveOwnerToSharer;
- j_popIncomingUnblockQueue;
- }
-
- transition(MI, Clean_Writeback, I) {
- c_clearOwner;
- cc_clearSharers;
- ll_checkDataInMemory;
- j_popIncomingUnblockQueue;
- }
-
- transition(OS, Clean_Writeback, S) {
- c_clearOwner;
- ll_checkDataInMemory;
- j_popIncomingUnblockQueue;
- }
-
- transition({MI, MIS}, Unblock, M) {
- j_popIncomingUnblockQueue;
- }
-
- transition({OS, OSS}, Unblock, O) {
- j_popIncomingUnblockQueue;
- }
-
- transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Data) {
- d_sendDataMsg;
- q_popMemQueue;
- }
-
- transition({I, S, O, M, IS, SS, OO, MO, MM, MI, MIS, OS, OSS}, Memory_Ack) {
- //a_sendAck;
- q_popMemQueue;
- }
-
-}
diff --git a/src/mem/protocol/MOESI_CMP_directory_m.slicc b/src/mem/protocol/MOESI_CMP_directory_m.slicc
deleted file mode 100644
index 3abe8603a..000000000
--- a/src/mem/protocol/MOESI_CMP_directory_m.slicc
+++ /dev/null
@@ -1,5 +0,0 @@
-MOESI_CMP_directory-msg.sm
-MOESI_CMP_directory-L2cache.sm
-MOESI_CMP_directory-L1cache.sm
-MOESI_CMP_directory_m-dir.sm
-standard_CMP-protocol.sm
diff --git a/src/mem/protocol/RubySlicc_ComponentMapping.sm b/src/mem/protocol/RubySlicc_ComponentMapping.sm
index 022bb6862..559e54a8c 100644
--- a/src/mem/protocol/RubySlicc_ComponentMapping.sm
+++ b/src/mem/protocol/RubySlicc_ComponentMapping.sm
@@ -30,14 +30,11 @@
// Mapping functions
// NodeID map_address_to_node(Address addr);
+MachineID mapAddressToRange(Address addr, MachineType type, int low, int high);
MachineID map_Address_to_DMA(Address addr);
MachineID map_Address_to_Directory(Address addr);
NodeID map_Address_to_DirectoryNode(Address addr);
-MachineID map_Address_to_CentralArbiterNode(Address addr);
-NodeID oldmap_L1RubyNode_to_L2Cache(Address addr, NodeID L1RubyNode);
-MachineID map_L1CacheMachId_to_L2Cache(Address addr, MachineID L1CacheMachId);
-MachineID map_L2ChipId_to_L2Cache(Address addr, NodeID L2ChipId);
-// MachineID map_L1RubyNode_to_Arb(NodeID L1RubyNode);
+
MachineID getL1MachineID(NodeID L1RubyNode);
NodeID getChipID(MachineID L2machID);
diff --git a/src/mem/protocol/RubySlicc_Exports.sm b/src/mem/protocol/RubySlicc_Exports.sm
index a8b58b96c..412fd0de0 100644
--- a/src/mem/protocol/RubySlicc_Exports.sm
+++ b/src/mem/protocol/RubySlicc_Exports.sm
@@ -39,7 +39,10 @@ external_type(string, primitive="yes");
external_type(uint64, primitive="yes");
external_type(Time, primitive="yes", default="0");
external_type(Address);
-
+external_type(DataBlock, desc="..."){
+ void clear();
+ void copyPartial(DataBlock, int, int);
+}
// Declarations of external types that are common to all protocols
@@ -131,12 +134,12 @@ enumeration(CacheRequestType, desc="...", default="CacheRequestType_NULL") {
IO, desc="I/O";
REPLACEMENT, desc="Replacement";
COMMIT, desc="Commit version";
- LD_XACT, desc="Transactional Load";
- LDX_XACT, desc="Transactional Load-Intend-To-Modify";
- ST_XACT, desc="Transactional Store";
- BEGIN_XACT, desc="Begin Transaction";
- COMMIT_XACT, desc="Commit Transaction";
- ABORT_XACT, desc="Abort Transaction";
+ NULL, desc="Invalid request type";
+}
+
+enumeration(SequencerRequestType, desc="...", default="SequencerRequestType_NULL") {
+ LD, desc="Load";
+ ST, desc="Store";
NULL, desc="Invalid request type";
}
@@ -167,7 +170,9 @@ enumeration(GenericRequestType, desc="...", default="GenericRequestType_NULL") {
ST_XACT, desc="Transactional Store";
BEGIN_XACT, desc="Begin Transaction";
COMMIT_XACT, desc="Commit Transaction";
- ABORT_XACT, desc="Abort Transaction";
+ ABORT_XACT, desc="Abort Transaction";
+ DMA_READ, desc="DMA READ";
+ DMA_WRITE, desc="DMA WRITE";
NULL, desc="null request type";
}
@@ -232,6 +237,18 @@ structure(CacheMsg, desc="...", interface="Message") {
PrefetchBit Prefetch, desc="Is this a prefetch request";
}
+// CacheMsg
+structure(SequencerMsg, desc="...", interface="Message") {
+ Address LineAddress, desc="Line address for this request";
+ Address PhysicalAddress, desc="Physical address for this request";
+ SequencerRequestType Type, desc="Type of request (LD, ST, etc)";
+ Address ProgramCounter, desc="Program counter of the instruction that caused the miss";
+ AccessModeType AccessMode, desc="user/supervisor access type";
+ DataBlock DataBlk, desc="Data";
+ int Len, desc="size in bytes of access";
+ PrefetchBit Prefetch, desc="Is this a prefetch request";
+}
+
// MaskPredictorType
enumeration(MaskPredictorType, "MaskPredictorType_Undefined", desc="...") {
Undefined, desc="Undefined";
diff --git a/src/mem/protocol/RubySlicc_Profiler.sm b/src/mem/protocol/RubySlicc_Profiler.sm
index 7a7fbdae1..d360af160 100644
--- a/src/mem/protocol/RubySlicc_Profiler.sm
+++ b/src/mem/protocol/RubySlicc_Profiler.sm
@@ -34,7 +34,7 @@ void profileCacheCLBsize(int size, int numStaleI);
void profileMemoryCLBsize(int size, int numStaleI);
// used by 2level exclusive cache protocols
-void profile_miss(CacheMsg msg, NodeID id);
+void profile_miss(CacheMsg msg);
// used by non-fast path protocols
void profile_L1Cache_miss(CacheMsg msg, NodeID l1cacheID);
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
index aa5648a9e..9679b7b6f 100644
--- a/src/mem/protocol/RubySlicc_Types.sm
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -29,11 +29,6 @@
// External Types
-external_type(DataBlock, desc="..."){
- void clear();
- void copyPartial(DataBlock, int, int);
-}
-
external_type(MessageBuffer, buffer="yes", inport="yes", outport="yes");
external_type(OutPort, primitive="yes");
diff --git a/src/mem/protocol/RubySlicc_Util.sm b/src/mem/protocol/RubySlicc_Util.sm
index 7f7ebf5ed..b37725402 100644
--- a/src/mem/protocol/RubySlicc_Util.sm
+++ b/src/mem/protocol/RubySlicc_Util.sm
@@ -57,5 +57,5 @@ int N_tokens();
bool distributedPersistentEnabled();
Address setOffset(Address addr, int offset);
Address makeLineAddress(Address addr);
-
+int addressOffset(Address addr);