summaryrefslogtreecommitdiff
path: root/src/mem/protocol
diff options
context:
space:
mode:
authorNilay Vaish <nilay@cs.wisc.edu>2014-11-06 05:42:21 -0600
committerNilay Vaish <nilay@cs.wisc.edu>2014-11-06 05:42:21 -0600
commit3022d463fbe1f969aadf7284ade996539c9454f9 (patch)
tree7cd252e05ba750a4abe282db2d53957189e19173 /src/mem/protocol
parent68ddfab8a4fa6f56c5f8bff6d91facd39abe353b (diff)
downloadgem5-3022d463fbe1f969aadf7284ade996539c9454f9.tar.xz
ruby: interface with classic memory controller
This patch is the final in the series. The whole series and this patch in particular were written with the aim of interfacing ruby's directory controller with the memory controller in the classic memory system. This is being done since ruby's memory controller has not being kept up to date with the changes going on in DRAMs. Classic's memory controller is more up to date and supports multiple different types of DRAM. This also brings classic and ruby ever more close. The patch also changes ruby's memory controller to expose the same interface.
Diffstat (limited to 'src/mem/protocol')
-rw-r--r--src/mem/protocol/MESI_Two_Level-dir.sm81
-rw-r--r--src/mem/protocol/MI_example-dir.sm76
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dir.sm85
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dir.sm90
-rw-r--r--src/mem/protocol/MOESI_hammer-dir.sm106
-rw-r--r--src/mem/protocol/RubySlicc_Defines.sm16
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm6
7 files changed, 142 insertions, 318 deletions
diff --git a/src/mem/protocol/MESI_Two_Level-dir.sm b/src/mem/protocol/MESI_Two_Level-dir.sm
index 939ae2a36..fa9d1f3d3 100644
--- a/src/mem/protocol/MESI_Two_Level-dir.sm
+++ b/src/mem/protocol/MESI_Two_Level-dir.sm
@@ -28,7 +28,6 @@
machine(Directory, "MESI Two Level directory protocol")
: DirectoryMemory * directory;
- MemoryControl * memBuffer;
Cycles to_mem_ctrl_latency := 1;
Cycles directory_latency := 6;
@@ -154,17 +153,21 @@ machine(Directory, "MESI Two Level directory protocol")
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
- memBuffer.functionalRead(pkt);
+ functionalMemoryRead(pkt);
}
}
int functionalWrite(Address addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
- testAndWrite(addr, tbe.DataBlk, pkt);
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
}
- return memBuffer.functionalWrite(pkt);
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
}
void setAccessPermission(Address addr, State state) {
@@ -182,7 +185,6 @@ machine(Directory, "MESI Two Level directory protocol")
// ** OUT_PORTS **
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
- out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
@@ -223,7 +225,7 @@ machine(Directory, "MESI Two Level directory protocol")
}
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer, rank = 2) {
+ in_port(memQueue_in, MemoryMsg, responseFromMemory, rank = 2) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
@@ -300,46 +302,21 @@ machine(Directory, "MESI Two Level directory protocol")
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.Prefetch := in_msg.Prefetch;
-
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Sender;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := in_msg.Prefetch;
-
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWrite(in_msg.Sender, address, to_mem_ctrl_latency,
+ in_msg.DataBlk);
}
}
//added by SS for dma
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := machineID;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_mem_ctrl_latency);
}
}
@@ -359,16 +336,11 @@ machine(Directory, "MESI Two Level directory protocol")
}
}
- action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
- peek(requestNetwork_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.OriginalRequestorMachId := machineID;
- out_msg.DataBlk.copyPartial(in_msg.DataBlk, addressOffset(address), in_msg.Len);
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ action(qw_queueMemoryWBRequest_partial, "qwp",
+ desc="Queue off-chip writeback request") {
+ peek(requestNetwork_in, RequestMsg) {
+ queueMemoryWritePartial(machineID, address, to_mem_ctrl_latency,
+ in_msg.DataBlk, in_msg.Len);
}
}
@@ -424,22 +396,11 @@ machine(Directory, "MESI Two Level directory protocol")
}
}
- action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
+ action(qw_queueMemoryWBRequest_partialTBE, "qwt",
+ desc="Queue off-chip writeback request") {
peek(responseNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, to_mem_ctrl_latency) {
- assert(is_valid(tbe));
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.OriginalRequestorMachId := in_msg.Sender;
- //out_msg.DataBlk := in_msg.DataBlk;
- //out_msg.DataBlk.copyPartial(tbe.DataBlk, tbe.Offset, tbe.Len);
- out_msg.DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
-
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := in_msg.Prefetch;
-
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWritePartial(in_msg.Sender, tbe.PhysicalAddress,
+ to_mem_ctrl_latency, tbe.DataBlk, tbe.Len);
}
}
diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm
index 60662080a..def7053ea 100644
--- a/src/mem/protocol/MI_example-dir.sm
+++ b/src/mem/protocol/MI_example-dir.sm
@@ -29,8 +29,8 @@
machine(Directory, "Directory protocol")
: DirectoryMemory * directory;
- MemoryControl * memBuffer;
Cycles directory_latency := 12;
+ Cycles to_memory_controller_latency := 1;
MessageBuffer * forwardFromDir, network="To", virtual_network="3",
ordered="false", vnet_type="forward";
@@ -178,17 +178,21 @@ machine(Directory, "Directory protocol")
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
- memBuffer.functionalRead(pkt);
+ functionalMemoryRead(pkt);
}
}
int functionalWrite(Address addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
- testAndWrite(addr, tbe.DataBlk, pkt);
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
}
- return memBuffer.functionalWrite(pkt);
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
}
// ** OUT_PORTS **
@@ -197,10 +201,7 @@ machine(Directory, "Directory protocol")
out_port(requestQueue_out, ResponseMsg, requestToDir); // For recycling requests
out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
-//added by SS
- out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
-
in_port(dmaRequestQueue_in, DMARequestMsg, dmaRequestToDir) {
if (dmaRequestQueue_in.isReady()) {
peek(dmaRequestQueue_in, DMARequestMsg) {
@@ -239,7 +240,7 @@ machine(Directory, "Directory protocol")
//added by SS
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
+ in_port(memQueue_in, MemoryMsg, responseFromMemory) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
TBE tbe := TBEs[in_msg.Addr];
@@ -440,73 +441,36 @@ machine(Directory, "Directory protocol")
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc,"%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(qf_queueMemoryFetchRequestDMA, "qfd", desc="Queue off-chip fetch request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- //out_msg.OriginalRequestorMachId := machineID;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc,"%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(qw_queueMemoryWBRequest_partial, "qwp", desc="Queue off-chip writeback request") {
- peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.DataBlk.copyPartial(
- in_msg.DataBlk, addressOffset(in_msg.PhysicalAddress), in_msg.Len);
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc,"%s\n", out_msg);
- }
+ peek(dmaRequestQueue_in, DMARequestMsg) {
+ queueMemoryWritePartial(in_msg.Requestor, address,
+ to_memory_controller_latency, in_msg.DataBlk,
+ in_msg.Len);
}
}
action(qw_queueMemoryWBRequest_partialTBE, "qwt", desc="Queue off-chip writeback request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- assert(is_valid(tbe));
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
-
- // get incoming data
- out_msg.DataBlk.copyPartial(
- tbe.DataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc,"%s\n", out_msg);
- }
+ queueMemoryWritePartial(in_msg.Requestor, address,
+ to_memory_controller_latency, tbe.DataBlk,
+ tbe.Len);
}
}
-
action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
-
- DPRINTF(RubySlicc,"%s\n", out_msg);
- }
+ queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm
index a6b93fa54..3e19897f3 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm
@@ -28,8 +28,8 @@
machine(Directory, "Directory protocol")
: DirectoryMemory * directory;
- MemoryControl * memBuffer;
Cycles directory_latency := 6;
+ Cycles to_memory_controller_latency := 1;
// Message Queues
MessageBuffer * requestToDir, network="From", virtual_network="1",
@@ -191,11 +191,13 @@ machine(Directory, "Directory protocol")
}
void functionalRead(Address addr, Packet *pkt) {
- memBuffer.functionalRead(pkt);
+ functionalMemoryRead(pkt);
}
int functionalWrite(Address addr, Packet *pkt) {
- return memBuffer.functionalWrite(pkt);
+ int num_functional_writes := 0;
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
}
// if no sharers, then directory can be considered
@@ -222,7 +224,6 @@ machine(Directory, "Directory protocol")
// ** OUT_PORTS **
out_port(forwardNetwork_out, RequestMsg, forwardFromDir);
out_port(responseNetwork_out, ResponseMsg, responseFromDir);
- out_port(memQueue_out, MemoryMsg, memBuffer);
// ** IN_PORTS **
@@ -286,7 +287,7 @@ machine(Directory, "Directory protocol")
}
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
+ in_port(memQueue_in, MemoryMsg, responseFromMemory) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
@@ -465,41 +466,18 @@ machine(Directory, "Directory protocol")
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := false;
- // These are not used by memory but are passed back here with the read data:
- out_msg.ReadX := (in_msg.Type == CoherenceRequestType:GETS &&
- getDirectoryEntry(address).Sharers.count() == 0);
- out_msg.Acks := getDirectoryEntry(address).Sharers.count();
- if (getDirectoryEntry(address).Sharers.isElement(in_msg.Requestor)) {
- out_msg.Acks := out_msg.Acks - 1;
- }
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(qw_queueMemoryWBRequest, "qw", desc="Queue off-chip writeback request") {
peek(unblockNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- if (is_valid(tbe)) {
- out_msg.OriginalRequestorMachId := tbe.Requestor;
- }
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := false;
- // Not used:
- out_msg.ReadX := false;
- out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
- DPRINTF(RubySlicc, "%s\n", out_msg);
+ if (is_valid(tbe)) {
+ queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
+ in_msg.DataBlk);
+ } else {
+ queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
}
@@ -507,41 +485,18 @@ machine(Directory, "Directory protocol")
action(qw_queueMemoryWBRequestFromMessageAndTBE, "qwmt",
desc="Queue off-chip writeback request") {
peek(unblockNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- if (is_valid(tbe)) {
- out_msg.OriginalRequestorMachId := tbe.Requestor;
- }
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.DataBlk.copyPartial(tbe.DataBlk,
- addressOffset(tbe.PhysicalAddress), tbe.Len);
-
- out_msg.MessageSize := in_msg.MessageSize;
- // Not used:
- out_msg.ReadX := false;
- out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ DataBlock DataBlk := in_msg.DataBlk;
+ DataBlk.copyPartial(tbe.DataBlk, addressOffset(tbe.PhysicalAddress),
+ tbe.Len);
+ queueMemoryWrite(tbe.Requestor, address, to_memory_controller_latency,
+ DataBlk);
}
}
action(qw_queueMemoryWBRequest2, "/qw", desc="Queue off-chip writeback request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.DataBlk := in_msg.DataBlk;
- out_msg.MessageSize := in_msg.MessageSize;
- //out_msg.Prefetch := false;
- // Not used:
- out_msg.ReadX := false;
- out_msg.Acks := getDirectoryEntry(address).Sharers.count(); // for dma requests
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWrite(in_msg.Requestor, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm
index 8d6abd93c..a70ef6073 100644
--- a/src/mem/protocol/MOESI_CMP_token-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dir.sm
@@ -28,12 +28,12 @@
machine(Directory, "Token protocol")
: DirectoryMemory * directory;
- MemoryControl * memBuffer;
int l2_select_num_bits;
Cycles directory_latency := 5;
bool distributed_persistent := "True";
Cycles fixed_timeout_latency := 100;
Cycles reissue_wakeup_latency := 10;
+ Cycles to_memory_controller_latency := 1;
// Message Queues from dir to other controllers / network
MessageBuffer * dmaResponseFromDir, network="To", virtual_network="5",
@@ -148,8 +148,7 @@ machine(Directory, "Token protocol")
structure(TBE, desc="TBE entries for outstanding DMA requests") {
Address PhysicalAddress, desc="physical address";
State TBEState, desc="Transient State";
- DataBlock DmaDataBlk, desc="DMA Data to be written. Partial blocks need to merged with system memory";
- DataBlock DataBlk, desc="The current view of system memory";
+ DataBlock DataBlk, desc="Current view of the associated address range";
int Len, desc="...";
MachineID DmaRequestor, desc="DMA requestor";
bool WentPersistent, desc="Did the DMA request require a persistent request";
@@ -250,17 +249,21 @@ machine(Directory, "Token protocol")
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
- memBuffer.functionalRead(pkt);
+ functionalMemoryRead(pkt);
}
}
int functionalWrite(Address addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
- testAndWrite(addr, tbe.DataBlk, pkt);
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
}
- return memBuffer.functionalWrite(pkt);
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
}
// ** OUT_PORTS **
@@ -269,15 +272,9 @@ machine(Directory, "Token protocol")
out_port(requestNetwork_out, RequestMsg, requestFromDir);
out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
- //
- // Memory buffer for memory controller to DIMM communication
- //
- out_port(memQueue_out, MemoryMsg, memBuffer);
-
// ** IN_PORTS **
-
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer) {
+ in_port(memQueue_in, MemoryMsg, responseFromMemory) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
if (in_msg.Type == MemoryRequestType:MEMORY_READ) {
@@ -653,73 +650,39 @@ machine(Directory, "Token protocol")
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestNetwork_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(qp_queueMemoryForPersistent, "qp", desc="Queue off-chip fetch request") {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := persistentTable.findSmallest(address);
- out_msg.MessageSize := MessageSizeType:Request_Control;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(persistentTable.findSmallest(address), address,
+ to_memory_controller_latency);
}
action(fd_memoryDma, "fd", desc="Queue off-chip fetch request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(lq_queueMemoryWbRequest, "lq", desc="Write data to memory") {
peek(responseNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.MessageSize := in_msg.MessageSize;
- out_msg.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
action(ld_queueMemoryDmaWriteFromTbe, "ld", desc="Write DMA data to memory") {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- // first, initialize the data blk to the current version of system memory
- out_msg.DataBlk := tbe.DataBlk;
- // then add the dma write data
- out_msg.DataBlk.copyPartial(
- tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWritePartial(tbe.DmaRequestor, address,
+ to_memory_controller_latency, tbe.DataBlk,
+ tbe.Len);
}
- action(lr_queueMemoryDmaReadWriteback, "lr", desc="Write DMA data from read to memory") {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- // first, initialize the data blk to the current version of system memory
- out_msg.DataBlk := tbe.DataBlk;
- DPRINTF(RubySlicc, "%s\n", out_msg);
+ action(lr_queueMemoryDmaReadWriteback, "lr",
+ desc="Write DMA data from read to memory") {
+ peek(responseNetwork_in, ResponseMsg) {
+ queueMemoryWrite(machineID, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
@@ -727,7 +690,7 @@ machine(Directory, "Token protocol")
peek(dmaRequestQueue_in, DMARequestMsg) {
TBEs.allocate(address);
set_tbe(TBEs[address]);
- tbe.DmaDataBlk := in_msg.DataBlk;
+ tbe.DataBlk := in_msg.DataBlk;
tbe.PhysicalAddress := in_msg.PhysicalAddress;
tbe.Len := in_msg.Len;
tbe.DmaRequestor := in_msg.Requestor;
@@ -769,7 +732,10 @@ machine(Directory, "Token protocol")
action(rd_recordDataInTbe, "rd", desc="Record data in TBE") {
peek(responseNetwork_in, ResponseMsg) {
+ DataBlock DataBlk := tbe.DataBlk;
tbe.DataBlk := in_msg.DataBlk;
+ tbe.DataBlk.copyPartial(DataBlk, addressOffset(tbe.PhysicalAddress),
+ tbe.Len);
}
}
diff --git a/src/mem/protocol/MOESI_hammer-dir.sm b/src/mem/protocol/MOESI_hammer-dir.sm
index 43d48c6d2..e04573128 100644
--- a/src/mem/protocol/MOESI_hammer-dir.sm
+++ b/src/mem/protocol/MOESI_hammer-dir.sm
@@ -36,8 +36,8 @@
machine(Directory, "AMD Hammer-like protocol")
: DirectoryMemory * directory;
CacheMemory * probeFilter;
- MemoryControl * memBuffer;
- Cycles memory_controller_latency := 2;
+ Cycles from_memory_controller_latency := 2;
+ Cycles to_memory_controller_latency := 1;
bool probe_filter_enabled := "False";
bool full_bit_dir_enabled := "False";
@@ -271,17 +271,21 @@ machine(Directory, "AMD Hammer-like protocol")
if(is_valid(tbe)) {
testAndRead(addr, tbe.DataBlk, pkt);
} else {
- memBuffer.functionalRead(pkt);
+ functionalMemoryRead(pkt);
}
}
int functionalWrite(Address addr, Packet *pkt) {
+ int num_functional_writes := 0;
+
TBE tbe := TBEs[addr];
if(is_valid(tbe)) {
- testAndWrite(addr, tbe.DataBlk, pkt);
+ num_functional_writes := num_functional_writes +
+ testAndWrite(addr, tbe.DataBlk, pkt);
}
- return memBuffer.functionalWrite(pkt);
+ num_functional_writes := num_functional_writes + functionalMemoryWrite(pkt);
+ return num_functional_writes;
}
Event cache_request_to_event(CoherenceRequestType type) {
@@ -305,11 +309,6 @@ machine(Directory, "AMD Hammer-like protocol")
out_port(dmaResponseNetwork_out, DMAResponseMsg, dmaResponseFromDir);
out_port(triggerQueue_out, TriggerMsg, triggerQueue);
- //
- // Memory buffer for memory controller to DIMM communication
- //
- out_port(memQueue_out, MemoryMsg, memBuffer);
-
// ** IN_PORTS **
// Trigger Queue
@@ -389,7 +388,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
// off-chip memory request/response is done
- in_port(memQueue_in, MemoryMsg, memBuffer, rank=2) {
+ in_port(memQueue_in, MemoryMsg, responseFromMemory, rank=2) {
if (memQueue_in.isReady()) {
peek(memQueue_in, MemoryMsg) {
PfEntry pf_entry := getProbeFilterEntry(in_msg.Addr);
@@ -503,7 +502,7 @@ machine(Directory, "AMD Hammer-like protocol")
action(a_sendWriteBackAck, "a", desc="Send writeback ack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:WB_ACK;
out_msg.Requestor := in_msg.Requestor;
@@ -516,7 +515,7 @@ machine(Directory, "AMD Hammer-like protocol")
action(oc_sendBlockAck, "oc", desc="Send block ack to the owner") {
peek(requestQueue_in, RequestMsg) {
if (((probe_filter_enabled || full_bit_dir_enabled) && (in_msg.Requestor == cache_entry.Owner)) || machineCount(MachineType:L1Cache) == 1) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:BLOCK_ACK;
out_msg.Requestor := in_msg.Requestor;
@@ -529,7 +528,7 @@ machine(Directory, "AMD Hammer-like protocol")
action(b_sendWriteBackNack, "b", desc="Send writeback nack to requestor") {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:WB_NACK;
out_msg.Requestor := in_msg.Requestor;
@@ -847,27 +846,13 @@ machine(Directory, "AMD Hammer-like protocol")
action(qf_queueMemoryFetchRequest, "qf", desc="Queue off-chip fetch request") {
peek(requestQueue_in, RequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
action(qd_queueMemoryRequestFromDmaRead, "qd", desc="Queue off-chip fetch request") {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_READ;
- out_msg.Sender := machineID;
- out_msg.OriginalRequestorMachId := in_msg.Requestor;
- out_msg.MessageSize := in_msg.MessageSize;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryRead(in_msg.Requestor, address, to_memory_controller_latency);
}
}
@@ -880,7 +865,7 @@ machine(Directory, "AMD Hammer-like protocol")
fwd_set := cache_entry.Sharers;
fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
if (fwd_set.count() > 0) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
@@ -895,7 +880,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
} else {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
@@ -915,7 +900,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (full_bit_dir_enabled) {
assert(cache_entry.Sharers.count() > 0);
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
@@ -924,7 +909,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
}
} else {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:INV;
out_msg.Requestor := machineID;
@@ -937,7 +922,7 @@ machine(Directory, "AMD Hammer-like protocol")
action(io_invalidateOwnerRequest, "io", desc="invalidate all copies") {
if (machineCount(MachineType:L1Cache) > 1) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
assert(is_valid(cache_entry));
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:INV;
@@ -956,7 +941,7 @@ machine(Directory, "AMD Hammer-like protocol")
fwd_set := cache_entry.Sharers;
fwd_set.remove(machineIDToNodeID(in_msg.Requestor));
if (fwd_set.count() > 0) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
@@ -969,7 +954,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
}
} else {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
@@ -1005,7 +990,7 @@ machine(Directory, "AMD Hammer-like protocol")
// decouple the two.
//
peek(unblockNetwork_in, ResponseMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
assert(is_valid(tbe));
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:MERGED_GETS;
@@ -1026,7 +1011,7 @@ machine(Directory, "AMD Hammer-like protocol")
assert(machineCount(MachineType:L1Cache) > 1);
if (probe_filter_enabled || full_bit_dir_enabled) {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
assert(is_valid(cache_entry));
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
@@ -1040,7 +1025,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
} else {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
@@ -1060,7 +1045,7 @@ machine(Directory, "AMD Hammer-like protocol")
if (probe_filter_enabled || full_bit_dir_enabled) {
peek(requestQueue_in, RequestMsg) {
if (in_msg.Requestor != cache_entry.Owner) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
assert(is_valid(cache_entry));
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
@@ -1075,7 +1060,7 @@ machine(Directory, "AMD Hammer-like protocol")
}
} else {
peek(requestQueue_in, RequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := in_msg.Type;
out_msg.Requestor := in_msg.Requestor;
@@ -1094,7 +1079,7 @@ machine(Directory, "AMD Hammer-like protocol")
assert(is_valid(tbe));
if (tbe.NumPendingMsgs > 0) {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:GETX;
//
@@ -1113,7 +1098,7 @@ machine(Directory, "AMD Hammer-like protocol")
assert(is_valid(tbe));
if (tbe.NumPendingMsgs > 0) {
peek(dmaRequestQueue_in, DMARequestMsg) {
- enqueue(forwardNetwork_out, RequestMsg, memory_controller_latency) {
+ enqueue(forwardNetwork_out, RequestMsg, from_memory_controller_latency) {
out_msg.Addr := address;
out_msg.Type := CoherenceRequestType:GETS;
//
@@ -1221,38 +1206,21 @@ machine(Directory, "AMD Hammer-like protocol")
action(l_queueMemoryWBRequest, "lq", desc="Write PUTX data to memory") {
peek(unblockNetwork_in, ResponseMsg) {
- enqueue(memQueue_out, MemoryMsg, 1) {
- assert(in_msg.Dirty);
- assert(in_msg.MessageSize == MessageSizeType:Writeback_Data);
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.DataBlk := in_msg.DataBlk;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWrite(in_msg.Sender, address, to_memory_controller_latency,
+ in_msg.DataBlk);
}
}
action(ld_queueMemoryDmaWrite, "ld", desc="Write DMA data to memory") {
- enqueue(memQueue_out, MemoryMsg, 1) {
- assert(is_valid(tbe));
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- // first, initialize the data blk to the current version of system memory
- out_msg.DataBlk := tbe.DataBlk;
- // then add the dma write data
- out_msg.DataBlk.copyPartial(tbe.DmaDataBlk, addressOffset(tbe.PhysicalAddress), tbe.Len);
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ assert(is_valid(tbe));
+ queueMemoryWritePartial(tbe.DmaRequestor, tbe.PhysicalAddress,
+ to_memory_controller_latency, tbe.DmaDataBlk,
+ tbe.Len);
}
action(ly_queueMemoryWriteFromTBE, "ly", desc="Write data to memory from TBE") {
- enqueue(memQueue_out, MemoryMsg, 1) {
- assert(is_valid(tbe));
- out_msg.Addr := address;
- out_msg.Type := MemoryRequestType:MEMORY_WB;
- out_msg.DataBlk := tbe.DataBlk;
- DPRINTF(RubySlicc, "%s\n", out_msg);
- }
+ queueMemoryWrite(machineID, address, to_memory_controller_latency,
+ tbe.DataBlk);
}
action(ll_checkIncomingWriteback, "\l", desc="Check PUTX/PUTO response message") {
diff --git a/src/mem/protocol/RubySlicc_Defines.sm b/src/mem/protocol/RubySlicc_Defines.sm
index 1480790bf..514a307df 100644
--- a/src/mem/protocol/RubySlicc_Defines.sm
+++ b/src/mem/protocol/RubySlicc_Defines.sm
@@ -31,3 +31,19 @@
NodeID version;
MachineID machineID;
NodeID clusterID;
+MessageBuffer responseFromMemory, ordered="false";
+
+// Functions implemented in the AbstractController class for
+// making timing access to the memory maintained by the
+// memory controllers.
+void queueMemoryRead(MachineID id, Address addr, Cycles latency);
+void queueMemoryWrite(MachineID id, Address addr, Cycles latency,
+ DataBlock block);
+void queueMemoryWritePartial(MachineID id, Address addr, Cycles latency,
+ DataBlock block, int size);
+
+// Functions implemented in the AbstractController class for
+// making functional access to the memory maintained by the
+// memory controllers.
+void functionalMemoryRead(Packet *pkt);
+bool functionalMemoryWrite(Packet *pkt);
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
index 2d0658e68..fb506781c 100644
--- a/src/mem/protocol/RubySlicc_Types.sm
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -161,12 +161,6 @@ structure (WireBuffer, inport="yes", outport="yes", external = "yes") {
}
-structure (MemoryControl, inport="yes", outport="yes", external = "yes") {
- void recordRequestType(CacheRequestType);
- void functionalRead(Packet *pkt);
- int functionalWrite(Packet *pkt);
-}
-
structure (DMASequencer, external = "yes") {
void ackCallback();
void dataCallback(DataBlock);