summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/protocol/MESI_CMP_directory-L1cache.sm6
-rw-r--r--src/mem/protocol/MESI_CMP_directory-L2cache.sm6
-rw-r--r--src/mem/protocol/MESI_CMP_directory-dir.sm4
-rw-r--r--src/mem/protocol/MESI_CMP_directory-dma.sm2
-rw-r--r--src/mem/protocol/MI_example-cache.sm4
-rw-r--r--src/mem/protocol/MI_example-dir.sm2
-rw-r--r--src/mem/protocol/MI_example-dma.sm2
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L1cache.sm5
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-L2cache.sm4
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dir.sm2
-rw-r--r--src/mem/protocol/MOESI_CMP_directory-dma.sm4
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L1cache.sm34
-rw-r--r--src/mem/protocol/MOESI_CMP_token-L2cache.sm4
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dir.sm9
-rw-r--r--src/mem/protocol/MOESI_CMP_token-dma.sm2
-rw-r--r--src/mem/protocol/MOESI_hammer-cache.sm6
-rw-r--r--src/mem/protocol/MOESI_hammer-dir.sm2
-rw-r--r--src/mem/protocol/MOESI_hammer-dma.sm2
-rw-r--r--src/mem/protocol/Network_test-cache.sm2
-rw-r--r--src/mem/protocol/RubySlicc_Exports.sm1
-rw-r--r--src/mem/protocol/RubySlicc_Types.sm2
-rw-r--r--src/mem/protocol/RubySlicc_Util.sm1
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.cc28
-rw-r--r--src/mem/ruby/buffers/MessageBuffer.hh18
-rw-r--r--src/mem/ruby/buffers/MessageBufferNode.hh18
-rw-r--r--src/mem/ruby/common/Consumer.cc7
-rw-r--r--src/mem/ruby/common/Consumer.hh5
-rw-r--r--src/mem/ruby/network/BasicLink.hh2
-rw-r--r--src/mem/ruby/network/BasicLink.py2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/InputUnit_d.hh2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc10
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/OutputUnit_d.hh2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/Router_d.cc6
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/SWallocator_d.cc2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc2
-rw-r--r--src/mem/ruby/network/garnet/fixed-pipeline/VCallocator_d.cc2
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/GarnetLink.py2
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc12
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh3
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/Router.cc16
-rw-r--r--src/mem/ruby/network/garnet/flexible-pipeline/Router.hh2
-rw-r--r--src/mem/ruby/network/simple/PerfectSwitch.cc2
-rw-r--r--src/mem/ruby/network/simple/Switch.cc6
-rw-r--r--src/mem/ruby/network/simple/Switch.hh2
-rw-r--r--src/mem/ruby/network/simple/Throttle.cc10
-rw-r--r--src/mem/ruby/network/simple/Throttle.hh17
-rw-r--r--src/mem/ruby/slicc_interface/AbstractCacheEntry.hh2
-rw-r--r--src/mem/ruby/slicc_interface/AbstractController.hh2
-rw-r--r--src/mem/ruby/slicc_interface/Controller.py2
-rw-r--r--src/mem/ruby/slicc_interface/RubySlicc_Util.hh5
-rw-r--r--src/mem/ruby/system/Cache.py2
-rw-r--r--src/mem/ruby/system/CacheMemory.hh4
-rw-r--r--src/mem/ruby/system/RubyMemoryControl.cc4
-rw-r--r--src/mem/ruby/system/RubyMemoryControl.hh6
-rw-r--r--src/mem/ruby/system/RubyMemoryControl.py6
-rw-r--r--src/mem/ruby/system/Sequencer.cc2
-rw-r--r--src/mem/ruby/system/TimerTable.cc5
-rw-r--r--src/mem/ruby/system/TimerTable.hh7
-rw-r--r--src/mem/ruby/system/WireBuffer.cc12
-rw-r--r--src/mem/ruby/system/WireBuffer.hh2
-rw-r--r--src/mem/slicc/ast/EnqueueStatementAST.py6
-rw-r--r--src/mem/slicc/ast/FuncCallExprAST.py4
-rw-r--r--src/mem/slicc/ast/InfixOperatorExprAST.py43
-rw-r--r--src/mem/slicc/symbols/StateMachine.py13
-rw-r--r--src/mem/slicc/symbols/Type.py2
67 files changed, 218 insertions, 199 deletions
diff --git a/src/mem/protocol/MESI_CMP_directory-L1cache.sm b/src/mem/protocol/MESI_CMP_directory-L1cache.sm
index bcfb20297..113421842 100644
--- a/src/mem/protocol/MESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MESI_CMP_directory-L1cache.sm
@@ -33,9 +33,9 @@ machine(L1Cache, "MESI Directory L1 Cache CMP")
CacheMemory * L1DcacheMemory,
Prefetcher * prefetcher = 'NULL',
int l2_select_num_bits,
- int l1_request_latency = 2,
- int l1_response_latency = 2,
- int to_l2_latency = 1,
+ Cycles l1_request_latency = 2,
+ Cycles l1_response_latency = 2,
+ Cycles to_l2_latency = 1,
bool send_evictions,
bool enable_prefetch = "False"
{
diff --git a/src/mem/protocol/MESI_CMP_directory-L2cache.sm b/src/mem/protocol/MESI_CMP_directory-L2cache.sm
index 849714c49..645b2d94c 100644
--- a/src/mem/protocol/MESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MESI_CMP_directory-L2cache.sm
@@ -34,9 +34,9 @@
machine(L2Cache, "MESI Directory L2 Cache CMP")
: CacheMemory * L2cacheMemory,
- int l2_request_latency = 2,
- int l2_response_latency = 2,
- int to_l1_latency = 1
+ Cycles l2_request_latency = 2,
+ Cycles l2_response_latency = 2,
+ Cycles to_l1_latency = 1
{
// L2 BANK QUEUES
// From local bank of L2 cache TO the network
diff --git a/src/mem/protocol/MESI_CMP_directory-dir.sm b/src/mem/protocol/MESI_CMP_directory-dir.sm
index 0dbbafafa..38054136d 100644
--- a/src/mem/protocol/MESI_CMP_directory-dir.sm
+++ b/src/mem/protocol/MESI_CMP_directory-dir.sm
@@ -38,8 +38,8 @@
machine(Directory, "MESI_CMP_filter_directory protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
- int to_mem_ctrl_latency = 1,
- int directory_latency = 6
+ Cycles to_mem_ctrl_latency = 1,
+ Cycles directory_latency = 6
{
MessageBuffer requestToDir, network="From", virtual_network="0", ordered="false", vnet_type="request";
diff --git a/src/mem/protocol/MESI_CMP_directory-dma.sm b/src/mem/protocol/MESI_CMP_directory-dma.sm
index 374463164..2f8d9d4f2 100644
--- a/src/mem/protocol/MESI_CMP_directory-dma.sm
+++ b/src/mem/protocol/MESI_CMP_directory-dma.sm
@@ -29,7 +29,7 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
- int request_latency = 6
+ Cycles request_latency = 6
{
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response", no_vector="true";
diff --git a/src/mem/protocol/MI_example-cache.sm b/src/mem/protocol/MI_example-cache.sm
index 91f060a38..e62317efa 100644
--- a/src/mem/protocol/MI_example-cache.sm
+++ b/src/mem/protocol/MI_example-cache.sm
@@ -30,8 +30,8 @@
machine(L1Cache, "MI Example L1 Cache")
: Sequencer * sequencer,
CacheMemory * cacheMemory,
- int cache_response_latency = 12,
- int issue_latency = 2,
+ Cycles cache_response_latency = 12,
+ Cycles issue_latency = 2,
bool send_evictions
{
diff --git a/src/mem/protocol/MI_example-dir.sm b/src/mem/protocol/MI_example-dir.sm
index edb571c1f..e1fd53594 100644
--- a/src/mem/protocol/MI_example-dir.sm
+++ b/src/mem/protocol/MI_example-dir.sm
@@ -30,7 +30,7 @@
machine(Directory, "Directory protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
- int directory_latency = 12
+ Cycles directory_latency = 12
{
MessageBuffer forwardFromDir, network="To", virtual_network="3", ordered="false", vnet_type="forward";
diff --git a/src/mem/protocol/MI_example-dma.sm b/src/mem/protocol/MI_example-dma.sm
index c7c576a2c..5d67da465 100644
--- a/src/mem/protocol/MI_example-dma.sm
+++ b/src/mem/protocol/MI_example-dma.sm
@@ -29,7 +29,7 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
- int request_latency = 6
+ Cycles request_latency = 6
{
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response", no_vector="true";
diff --git a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
index f6ed32009..101b7abd6 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L1cache.sm
@@ -37,7 +37,8 @@ machine(L1Cache, "Directory protocol")
CacheMemory * L1IcacheMemory,
CacheMemory * L1DcacheMemory,
int l2_select_num_bits,
- int request_latency = 2,
+ Cycles request_latency = 2,
+ Cycles use_timeout_latency = 50,
bool send_evictions
{
@@ -696,7 +697,7 @@ machine(L1Cache, "Directory protocol")
}
action(o_scheduleUseTimeout, "oo", desc="Schedule a use timeout.") {
- useTimerTable.set(address, 50);
+ useTimerTable.set(address, use_timeout_latency);
}
action(ub_dmaUnblockL2Cache, "ub", desc="Send dma ack to l2 cache") {
diff --git a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
index ba78cff9f..6c61d3eb6 100644
--- a/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-L2cache.sm
@@ -34,8 +34,8 @@
machine(L2Cache, "Token protocol")
: CacheMemory * L2cacheMemory,
- int response_latency = 2,
- int request_latency = 2
+ Cycles response_latency = 2,
+ Cycles request_latency = 2
{
// L2 BANK QUEUES
diff --git a/src/mem/protocol/MOESI_CMP_directory-dir.sm b/src/mem/protocol/MOESI_CMP_directory-dir.sm
index f458fccd8..57ef10ae1 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dir.sm
@@ -33,7 +33,7 @@
machine(Directory, "Directory protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
- int directory_latency = 6
+ Cycles directory_latency = 6
{
// ** IN QUEUES **
diff --git a/src/mem/protocol/MOESI_CMP_directory-dma.sm b/src/mem/protocol/MOESI_CMP_directory-dma.sm
index 6d10305ea..18ab25f65 100644
--- a/src/mem/protocol/MOESI_CMP_directory-dma.sm
+++ b/src/mem/protocol/MOESI_CMP_directory-dma.sm
@@ -1,8 +1,8 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
- int request_latency = 14,
- int response_latency = 14
+ Cycles request_latency = 14,
+ Cycles response_latency = 14
{
MessageBuffer responseFromDir, network="From", virtual_network="2", ordered="false", vnet_type="response";
diff --git a/src/mem/protocol/MOESI_CMP_token-L1cache.sm b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
index 5533a34dc..365a963b9 100644
--- a/src/mem/protocol/MOESI_CMP_token-L1cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L1cache.sm
@@ -38,10 +38,14 @@ machine(L1Cache, "Token protocol")
CacheMemory * L1DcacheMemory,
int l2_select_num_bits,
int N_tokens,
- int l1_request_latency = 2,
- int l1_response_latency = 2,
+
+ Cycles l1_request_latency = 2,
+ Cycles l1_response_latency = 2,
int retry_threshold = 1,
- int fixed_timeout_latency = 100,
+ Cycles fixed_timeout_latency = 100,
+ Cycles reissue_wakeup_latency = 10,
+ Cycles use_timeout_latency = 50,
+
bool dynamic_timeout_enabled = true,
bool no_mig_atomic = true,
bool send_evictions
@@ -195,19 +199,20 @@ machine(L1Cache, "Token protocol")
int outstandingRequests, default="0";
int outstandingPersistentRequests, default="0";
- int averageLatencyHysteresis, default="(8)"; // Constant that provides hysteresis for calculated the estimated average
- int averageLatencyCounter, default="(500 << (*m_L1Cache_averageLatencyHysteresis_ptr))";
+ // Constant that provides hysteresis for calculated the estimated average
+ int averageLatencyHysteresis, default="(8)";
+ Cycles averageLatencyCounter,
+ default="(Cycles(500) << (*m_L1Cache_averageLatencyHysteresis_ptr))";
- int averageLatencyEstimate() {
+ Cycles averageLatencyEstimate() {
DPRINTF(RubySlicc, "%d\n",
(averageLatencyCounter >> averageLatencyHysteresis));
//profile_average_latency_estimate( (averageLatencyCounter >> averageLatencyHysteresis) );
return averageLatencyCounter >> averageLatencyHysteresis;
}
- void updateAverageLatencyEstimate(int latency) {
+ void updateAverageLatencyEstimate(Cycles latency) {
DPRINTF(RubySlicc, "%d\n", latency);
- assert(latency >= 0);
// By subtracting the current average and then adding the most
// recent sample, we calculate an estimate of the recent average.
@@ -781,7 +786,7 @@ machine(L1Cache, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, 10);
+ reissueTimerTable.set(address, reissue_wakeup_latency);
}
} else {
@@ -834,7 +839,7 @@ machine(L1Cache, "Token protocol")
// Set a wakeup timer
if (dynamic_timeout_enabled) {
- reissueTimerTable.set(address, 1.25 * averageLatencyEstimate());
+ reissueTimerTable.set(address, (5 * averageLatencyEstimate()) / 4);
} else {
reissueTimerTable.set(address, fixed_timeout_latency);
}
@@ -902,10 +907,9 @@ machine(L1Cache, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, 10);
+ reissueTimerTable.set(address, reissue_wakeup_latency);
}
-
} else {
// Make a normal request
enqueue(requestNetwork_out, RequestMsg, latency = l1_request_latency) {
@@ -961,7 +965,7 @@ machine(L1Cache, "Token protocol")
// Set a wakeup timer
if (dynamic_timeout_enabled) {
- reissueTimerTable.set(address, 1.25 * averageLatencyEstimate());
+ reissueTimerTable.set(address, (5 * averageLatencyEstimate()) / 4);
} else {
reissueTimerTable.set(address, fixed_timeout_latency);
}
@@ -1381,7 +1385,7 @@ machine(L1Cache, "Token protocol")
}
action(o_scheduleUseTimeout, "o", desc="Schedule a use timeout.") {
- useTimerTable.set(address, 50);
+ useTimerTable.set(address, use_timeout_latency);
}
action(p_informL2AboutTokenLoss, "p", desc="Inform L2 about loss of all tokens") {
@@ -1448,7 +1452,7 @@ machine(L1Cache, "Token protocol")
// Update average latency
if (tbe.IssueCount <= 1) {
if (tbe.ExternalResponse == true) {
- updateAverageLatencyEstimate(time_to_int(curCycle()) - time_to_int(tbe.IssueTime));
+ updateAverageLatencyEstimate(TimeToCycles(curCycle() - tbe.IssueTime));
}
}
diff --git a/src/mem/protocol/MOESI_CMP_token-L2cache.sm b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
index 411390a5e..da8bcc6fa 100644
--- a/src/mem/protocol/MOESI_CMP_token-L2cache.sm
+++ b/src/mem/protocol/MOESI_CMP_token-L2cache.sm
@@ -35,8 +35,8 @@
machine(L2Cache, "Token protocol")
: CacheMemory * L2cacheMemory,
int N_tokens,
- int l2_request_latency = 5,
- int l2_response_latency = 5,
+ Cycles l2_request_latency = 5,
+ Cycles l2_response_latency = 5,
bool filtering_enabled = true
{
diff --git a/src/mem/protocol/MOESI_CMP_token-dir.sm b/src/mem/protocol/MOESI_CMP_token-dir.sm
index 5a604555e..84e888d55 100644
--- a/src/mem/protocol/MOESI_CMP_token-dir.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dir.sm
@@ -36,9 +36,10 @@ machine(Directory, "Token protocol")
: DirectoryMemory * directory,
MemoryControl * memBuffer,
int l2_select_num_bits,
- int directory_latency = 5,
+ Cycles directory_latency = 5,
bool distributed_persistent = true,
- int fixed_timeout_latency = 100
+ Cycles fixed_timeout_latency = 100,
+ Cycles reissue_wakeup_latency = 10
{
MessageBuffer dmaResponseFromDir, network="To", virtual_network="5", ordered="true", vnet_type="response";
@@ -470,7 +471,7 @@ machine(Directory, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, 10);
+ reissueTimerTable.set(address, reissue_wakeup_latency);
}
}
@@ -540,7 +541,7 @@ machine(Directory, "Token protocol")
// IssueCount.
// Set a wakeup timer
- reissueTimerTable.set(address, 10);
+ reissueTimerTable.set(address, reissue_wakeup_latency);
}
}
diff --git a/src/mem/protocol/MOESI_CMP_token-dma.sm b/src/mem/protocol/MOESI_CMP_token-dma.sm
index 98666998a..637778fcf 100644
--- a/src/mem/protocol/MOESI_CMP_token-dma.sm
+++ b/src/mem/protocol/MOESI_CMP_token-dma.sm
@@ -29,7 +29,7 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
- int request_latency = 6
+ Cycles request_latency = 6
{
MessageBuffer responseFromDir, network="From", virtual_network="5", ordered="true", vnet_type="response", no_vector="true";
diff --git a/src/mem/protocol/MOESI_hammer-cache.sm b/src/mem/protocol/MOESI_hammer-cache.sm
index e4d01df65..fc2a9da90 100644
--- a/src/mem/protocol/MOESI_hammer-cache.sm
+++ b/src/mem/protocol/MOESI_hammer-cache.sm
@@ -38,9 +38,9 @@ machine(L1Cache, "AMD Hammer-like protocol")
CacheMemory * L1IcacheMemory,
CacheMemory * L1DcacheMemory,
CacheMemory * L2cacheMemory,
- int cache_response_latency = 10,
- int issue_latency = 2,
- int l2_cache_hit_latency = 10,
+ Cycles cache_response_latency = 10,
+ Cycles issue_latency = 2,
+ Cycles l2_cache_hit_latency = 10,
bool no_mig_atomic = true,
bool send_evictions
{
diff --git a/src/mem/protocol/MOESI_hammer-dir.sm b/src/mem/protocol/MOESI_hammer-dir.sm
index f923ddeab..40f4db73e 100644
--- a/src/mem/protocol/MOESI_hammer-dir.sm
+++ b/src/mem/protocol/MOESI_hammer-dir.sm
@@ -37,7 +37,7 @@ machine(Directory, "AMD Hammer-like protocol")
: DirectoryMemory * directory,
CacheMemory * probeFilter,
MemoryControl * memBuffer,
- int memory_controller_latency = 2,
+ Cycles memory_controller_latency = 2,
bool probe_filter_enabled = false,
bool full_bit_dir_enabled = false
{
diff --git a/src/mem/protocol/MOESI_hammer-dma.sm b/src/mem/protocol/MOESI_hammer-dma.sm
index bfb3cb98d..fd7734677 100644
--- a/src/mem/protocol/MOESI_hammer-dma.sm
+++ b/src/mem/protocol/MOESI_hammer-dma.sm
@@ -29,7 +29,7 @@
machine(DMA, "DMA Controller")
: DMASequencer * dma_sequencer,
- int request_latency = 6
+ Cycles request_latency = 6
{
MessageBuffer responseFromDir, network="From", virtual_network="1", ordered="true", vnet_type="response", no_vector="true";
diff --git a/src/mem/protocol/Network_test-cache.sm b/src/mem/protocol/Network_test-cache.sm
index 1e49e1d7b..81e648783 100644
--- a/src/mem/protocol/Network_test-cache.sm
+++ b/src/mem/protocol/Network_test-cache.sm
@@ -33,7 +33,7 @@
machine(L1Cache, "Network_test L1 Cache")
: Sequencer * sequencer,
- int issue_latency = 2
+ Cycles issue_latency = 2
{
// NETWORK BUFFERS
diff --git a/src/mem/protocol/RubySlicc_Exports.sm b/src/mem/protocol/RubySlicc_Exports.sm
index 6c8cfc832..8ca1ed32c 100644
--- a/src/mem/protocol/RubySlicc_Exports.sm
+++ b/src/mem/protocol/RubySlicc_Exports.sm
@@ -37,6 +37,7 @@ external_type(Time, primitive="yes", default="0");
external_type(PacketPtr, primitive="yes");
external_type(Packet, primitive="yes");
external_type(Address);
+external_type(Cycles, primitive="yes");
structure(DataBlock, external = "yes", desc="..."){
void clear();
diff --git a/src/mem/protocol/RubySlicc_Types.sm b/src/mem/protocol/RubySlicc_Types.sm
index b4601e9e6..0f6cd0b96 100644
--- a/src/mem/protocol/RubySlicc_Types.sm
+++ b/src/mem/protocol/RubySlicc_Types.sm
@@ -171,7 +171,7 @@ structure (DMASequencer, external = "yes") {
structure (TimerTable, inport="yes", external = "yes") {
bool isReady();
Address readyAddress();
- void set(Address, int);
+ void set(Address, Cycles);
void unset(Address);
bool isSet(Address);
}
diff --git a/src/mem/protocol/RubySlicc_Util.sm b/src/mem/protocol/RubySlicc_Util.sm
index 65f47d5aa..238884503 100644
--- a/src/mem/protocol/RubySlicc_Util.sm
+++ b/src/mem/protocol/RubySlicc_Util.sm
@@ -33,6 +33,7 @@ void error(std::string msg);
void assert(bool condition);
int random(int number);
Time zero_time();
+Cycles TimeToCycles(Time t);
NodeID intToID(int nodenum);
int IDToInt(NodeID id);
int time_to_int(Time time);
diff --git a/src/mem/ruby/buffers/MessageBuffer.cc b/src/mem/ruby/buffers/MessageBuffer.cc
index 446586f5a..f39e35738 100644
--- a/src/mem/ruby/buffers/MessageBuffer.cc
+++ b/src/mem/ruby/buffers/MessageBuffer.cc
@@ -39,6 +39,7 @@ using namespace std;
using m5::stl_helpers::operator<<;
MessageBuffer::MessageBuffer(const string &name)
+ : m_last_arrival_time(0)
{
m_msg_counter = 0;
m_consumer_ptr = NULL;
@@ -48,7 +49,6 @@ MessageBuffer::MessageBuffer(const string &name)
m_strict_fifo = true;
m_size = 0;
m_max_size = -1;
- m_last_arrival_time = 0;
m_randomization = true;
m_size_last_time_size_checked = 0;
m_time_last_time_size_checked = 0;
@@ -139,19 +139,19 @@ MessageBuffer::peekAtHeadOfQueue() const
}
// FIXME - move me somewhere else
-int
+Cycles
random_time()
{
- int time = 1;
- time += random() & 0x3; // [0...3]
+ Cycles time(1);
+ time += Cycles(random() & 0x3); // [0...3]
if ((random() & 0x7) == 0) { // 1 in 8 chance
- time += 100 + (random() % 0xf); // 100 + [1...15]
+ time += Cycles(100 + (random() % 0xf)); // 100 + [1...15]
}
return time;
}
void
-MessageBuffer::enqueue(MsgPtr message, Time delta)
+MessageBuffer::enqueue(MsgPtr message, Cycles delta)
{
m_msg_counter++;
m_size++;
@@ -170,8 +170,9 @@ MessageBuffer::enqueue(MsgPtr message, Time delta)
// Calculate the arrival time of the message, that is, the first
// cycle the message can be dequeued.
assert(delta>0);
- Time current_time = m_clockobj_ptr->curCycle();
- Time arrival_time = 0;
+ Cycles current_time(m_clockobj_ptr->curCycle());
+ Cycles arrival_time(0);
+
if (!RubySystem::getRandomization() || (m_randomization == false)) {
// No randomization
arrival_time = current_time + delta;
@@ -304,6 +305,7 @@ MessageBuffer::recycle()
MessageBufferNode node = m_prio_heap.front();
pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
+
node.m_time = m_clockobj_ptr->curCycle() + m_recycle_latency;
m_prio_heap.back() = node;
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
@@ -317,6 +319,7 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
{
DPRINTF(RubyQueue, "ReanalyzeMessages\n");
assert(m_stall_msg_map.count(addr) > 0);
+ Cycles nextCycle(m_clockobj_ptr->curCycle() + Cycles(1));
//
// Put all stalled messages associated with this address back on the
@@ -324,8 +327,7 @@ MessageBuffer::reanalyzeMessages(const Address& addr)
//
while(!m_stall_msg_map[addr].empty()) {
m_msg_counter++;
- MessageBufferNode msgNode(m_clockobj_ptr->curCycle() + 1,
- m_msg_counter,
+ MessageBufferNode msgNode(nextCycle, m_msg_counter,
m_stall_msg_map[addr].front());
m_prio_heap.push_back(msgNode);
@@ -342,6 +344,7 @@ void
MessageBuffer::reanalyzeAllMessages()
{
DPRINTF(RubyQueue, "ReanalyzeAllMessages %s\n");
+ Cycles nextCycle(m_clockobj_ptr->curCycle() + Cycles(1));
//
// Put all stalled messages associated with this address back on the
@@ -353,14 +356,13 @@ MessageBuffer::reanalyzeAllMessages()
while(!(map_iter->second).empty()) {
m_msg_counter++;
- MessageBufferNode msgNode(m_clockobj_ptr->curCycle() + 1,
- m_msg_counter,
+ MessageBufferNode msgNode(nextCycle, m_msg_counter,
(map_iter->second).front());
m_prio_heap.push_back(msgNode);
push_heap(m_prio_heap.begin(), m_prio_heap.end(),
greater<MessageBufferNode>());
-
+
m_consumer_ptr->scheduleEventAbsolute(msgNode.m_time);
(map_iter->second).pop_front();
}
diff --git a/src/mem/ruby/buffers/MessageBuffer.hh b/src/mem/ruby/buffers/MessageBuffer.hh
index 581f03453..5bad45f2d 100644
--- a/src/mem/ruby/buffers/MessageBuffer.hh
+++ b/src/mem/ruby/buffers/MessageBuffer.hh
@@ -54,11 +54,8 @@ class MessageBuffer
std::string name() const { return m_name; }
- void
- setRecycleLatency(int recycle_latency)
- {
- m_recycle_latency = recycle_latency;
- }
+ void setRecycleLatency(Cycles recycle_latency)
+ { m_recycle_latency = recycle_latency; }
void reanalyzeMessages(const Address& addr);
void reanalyzeAllMessages();
@@ -74,7 +71,7 @@ class MessageBuffer
std::pop_heap(m_prio_heap.begin(), m_prio_heap.end(),
std::greater<MessageBufferNode>());
m_prio_heap.pop_back();
- enqueue(node.m_msgptr, 1);
+ enqueue(node.m_msgptr, Cycles(1));
}
bool areNSlotsAvailable(int n);
@@ -114,8 +111,8 @@ class MessageBuffer
return m_prio_heap.front().m_msgptr;
}
- void enqueue(MsgPtr message) { enqueue(message, 1); }
- void enqueue(MsgPtr message, Time delta);
+ void enqueue(MsgPtr message) { enqueue(message, Cycles(1)); }
+ void enqueue(MsgPtr message, Cycles delta);
//! returns delay ticks of the message.
Time dequeue_getDelayCycles(MsgPtr& message);
@@ -160,7 +157,7 @@ class MessageBuffer
private:
//added by SS
- int m_recycle_latency;
+ Cycles m_recycle_latency;
// Private Methods
Time setAndReturnDelayCycles(MsgPtr message);
@@ -204,7 +201,8 @@ class MessageBuffer
bool m_strict_fifo;
bool m_ordering_set;
bool m_randomization;
- Time m_last_arrival_time;
+
+ Cycles m_last_arrival_time;
int m_input_link_id;
int m_vnet_id;
diff --git a/src/mem/ruby/buffers/MessageBufferNode.hh b/src/mem/ruby/buffers/MessageBufferNode.hh
index 5e7a52e12..57f09a913 100644
--- a/src/mem/ruby/buffers/MessageBufferNode.hh
+++ b/src/mem/ruby/buffers/MessageBufferNode.hh
@@ -37,22 +37,18 @@ class MessageBufferNode
{
public:
MessageBufferNode()
- {
- m_time = 0;
- m_msg_counter = 0;
- }
+ : m_time(0), m_msg_counter(0)
+ {}
- MessageBufferNode(const Time& time, int counter, const MsgPtr& msgptr)
- {
- m_time = time;
- m_msgptr = msgptr;
- m_msg_counter = counter;
- }
+ MessageBufferNode(const Cycles& time, uint64_t counter,
+ const MsgPtr& msgptr)
+ : m_time(time), m_msg_counter(counter), m_msgptr(msgptr)
+ {}
void print(std::ostream& out) const;
public:
- Time m_time;
+ Cycles m_time;
uint64 m_msg_counter; // FIXME, should this be a 64-bit value?
MsgPtr m_msgptr;
};
diff --git a/src/mem/ruby/common/Consumer.cc b/src/mem/ruby/common/Consumer.cc
index a829f4d99..b7d31ccb0 100644
--- a/src/mem/ruby/common/Consumer.cc
+++ b/src/mem/ruby/common/Consumer.cc
@@ -29,13 +29,14 @@
#include "mem/ruby/common/Consumer.hh"
void
-Consumer::scheduleEvent(Time timeDelta)
+Consumer::scheduleEvent(Cycles timeDelta)
{
- scheduleEventAbsolute(timeDelta + em->curCycle());
+ timeDelta += em->curCycle();
+ scheduleEventAbsolute(timeDelta);
}
void
-Consumer::scheduleEventAbsolute(Time timeAbs)
+Consumer::scheduleEventAbsolute(Cycles timeAbs)
{
Tick evt_time = em->clockPeriod() * timeAbs;
if (!alreadyScheduled(evt_time)) {
diff --git a/src/mem/ruby/common/Consumer.hh b/src/mem/ruby/common/Consumer.hh
index 9e089e992..33c78d780 100644
--- a/src/mem/ruby/common/Consumer.hh
+++ b/src/mem/ruby/common/Consumer.hh
@@ -38,7 +38,6 @@
#include <iostream>
#include <set>
-#include "mem/ruby/common/TypeDefines.hh"
#include "sim/clocked_object.hh"
class Consumer
@@ -88,8 +87,8 @@ class Consumer
m_scheduled_wakeups.erase(time);
}
- void scheduleEvent(Time timeDelta);
- void scheduleEventAbsolute(Time timeAbs);
+ void scheduleEvent(Cycles timeDelta);
+ void scheduleEventAbsolute(Cycles timeAbs);
private:
Tick m_last_scheduled_wakeup;
diff --git a/src/mem/ruby/network/BasicLink.hh b/src/mem/ruby/network/BasicLink.hh
index 11832f9c3..634b9143e 100644
--- a/src/mem/ruby/network/BasicLink.hh
+++ b/src/mem/ruby/network/BasicLink.hh
@@ -52,7 +52,7 @@ class BasicLink : public SimObject
void print(std::ostream& out) const;
- int m_latency;
+ Cycles m_latency;
int m_bandwidth_factor;
int m_weight;
};
diff --git a/src/mem/ruby/network/BasicLink.py b/src/mem/ruby/network/BasicLink.py
index 841208578..8fc83c9e2 100644
--- a/src/mem/ruby/network/BasicLink.py
+++ b/src/mem/ruby/network/BasicLink.py
@@ -34,7 +34,7 @@ class BasicLink(SimObject):
type = 'BasicLink'
cxx_header = "mem/ruby/network/BasicLink.hh"
link_id = Param.Int("ID in relation to other links")
- latency = Param.Int(1, "latency")
+ latency = Param.Cycles(1, "latency")
# The following banwidth factor does not translate to the same value for
# both the simple and Garnet models. For the most part, the bandwidth
# factor is the width of the link in bytes, expect for certain situations
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py b/src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py
index bbd785e2c..14c3f543c 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/GarnetLink_d.py
@@ -37,7 +37,7 @@ class NetworkLink_d(ClockedObject):
type = 'NetworkLink_d'
cxx_header = "mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh"
link_id = Param.Int(Parent.link_id, "link id")
- link_latency = Param.Int(Parent.latency, "link latency")
+ link_latency = Param.Cycles(Parent.latency, "link latency")
vcs_per_vnet = Param.Int(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.Int(Parent.number_of_virtual_networks,
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/InputUnit_d.hh b/src/mem/ruby/network/garnet/fixed-pipeline/InputUnit_d.hh
index 3ebf7c6e9..07c6bec3a 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/InputUnit_d.hh
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/InputUnit_d.hh
@@ -90,7 +90,7 @@ class InputUnit_d : public Consumer
{
flit_d *t_flit = new flit_d(in_vc, free_signal, curTime);
creditQueue->insert(t_flit);
- m_credit_link->scheduleEvent(1);
+ m_credit_link->scheduleEvent(Cycles(1));
}
inline int
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
index f0e117aad..c58b38c52 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkInterface_d.cc
@@ -244,14 +244,14 @@ NetworkInterface_d::wakeup()
free_signal = true;
outNode_ptr[t_flit->get_vnet()]->enqueue(
- t_flit->get_msg_ptr(), 1);
+ t_flit->get_msg_ptr(), Cycles(1));
}
// Simply send a credit back since we are not buffering
// this flit in the NI
flit_d *credit_flit = new flit_d(t_flit->get_vc(), free_signal,
m_net_ptr->curCycle());
creditQueue->insert(credit_flit);
- m_ni_credit_link->scheduleEvent(1);
+ m_ni_credit_link->scheduleEvent(Cycles(1));
int vnet = t_flit->get_vnet();
m_net_ptr->increment_received_flits(vnet);
@@ -324,7 +324,7 @@ NetworkInterface_d::scheduleOutputLink()
t_flit->set_time(m_net_ptr->curCycle() + 1);
outSrcQueue->insert(t_flit);
// schedule the out link
- outNetLink->scheduleEvent(1);
+ outNetLink->scheduleEvent(Cycles(1));
if (t_flit->get_type() == TAIL_ ||
t_flit->get_type() == HEAD_TAIL_) {
@@ -351,13 +351,13 @@ NetworkInterface_d::checkReschedule()
{
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_ni_buffers[vc]->isReadyForNext(m_net_ptr->curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh
index c52c903e0..14f6a6527 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/NetworkLink_d.hh
@@ -72,7 +72,7 @@ class NetworkLink_d : public ClockedObject, public Consumer
protected:
int m_id;
- int m_latency;
+ Cycles m_latency;
int channel_width;
GarnetNetwork_d *m_net_ptr;
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/OutputUnit_d.hh b/src/mem/ruby/network/garnet/fixed-pipeline/OutputUnit_d.hh
index 4fa7dcb90..4b5b851e2 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/OutputUnit_d.hh
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/OutputUnit_d.hh
@@ -84,7 +84,7 @@ class OutputUnit_d : public Consumer
insert_flit(flit_d *t_flit)
{
m_out_buffer->insert(t_flit);
- m_out_link->scheduleEvent(1);
+ m_out_link->scheduleEvent(Cycles(1));
}
private:
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/Router_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/Router_d.cc
index 2a759eb87..eaa147c41 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/Router_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/Router_d.cc
@@ -135,13 +135,13 @@ Router_d::route_req(flit_d *t_flit, InputUnit_d *in_unit, int invc)
void
Router_d::vcarb_req()
{
- m_vc_alloc->scheduleEvent(1);
+ m_vc_alloc->scheduleEvent(Cycles(1));
}
void
Router_d::swarb_req()
{
- m_sw_alloc->scheduleEvent(1);
+ m_sw_alloc->scheduleEvent(Cycles(1));
}
void
@@ -154,7 +154,7 @@ void
Router_d::update_sw_winner(int inport, flit_d *t_flit)
{
m_switch->update_sw_winner(inport, t_flit);
- m_switch->scheduleEvent(1);
+ m_switch->scheduleEvent(Cycles(1));
}
void
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/SWallocator_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/SWallocator_d.cc
index ab3f4b761..49f2e8c57 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/SWallocator_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/SWallocator_d.cc
@@ -220,7 +220,7 @@ SWallocator_d::check_for_wakeup()
for (int j = 0; j < m_num_vcs; j++) {
if (m_input_unit[i]->need_stage_nextcycle(j, ACTIVE_, SA_,
m_router->curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc
index db7446f7a..0b2c3a227 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/Switch_d.cc
@@ -90,7 +90,7 @@ Switch_d::check_for_wakeup()
{
for (int inport = 0; inport < m_num_inports; inport++) {
if (m_switch_buffer[inport]->isReadyForNext(m_router->curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
break;
}
}
diff --git a/src/mem/ruby/network/garnet/fixed-pipeline/VCallocator_d.cc b/src/mem/ruby/network/garnet/fixed-pipeline/VCallocator_d.cc
index 012837362..9569810e8 100644
--- a/src/mem/ruby/network/garnet/fixed-pipeline/VCallocator_d.cc
+++ b/src/mem/ruby/network/garnet/fixed-pipeline/VCallocator_d.cc
@@ -260,7 +260,7 @@ VCallocator_d::check_for_wakeup()
for (int j = 0; j < m_num_vcs; j++) {
if (m_input_unit[i]->need_stage_nextcycle(j, VC_AB_, VA_,
m_router->curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/GarnetLink.py b/src/mem/ruby/network/garnet/flexible-pipeline/GarnetLink.py
index 41049884f..9903c9cd6 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/GarnetLink.py
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/GarnetLink.py
@@ -37,7 +37,7 @@ class NetworkLink(ClockedObject):
type = 'NetworkLink'
cxx_header = "mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh"
link_id = Param.Int(Parent.link_id, "link id")
- link_latency = Param.Int(Parent.latency, "link latency")
+ link_latency = Param.Cycles(Parent.latency, "link latency")
vcs_per_vnet = Param.Int(Parent.vcs_per_vnet,
"virtual channels per virtual network")
virt_nets = Param.Int(Parent.number_of_virtual_networks,
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
index 7267da36d..f0d59f4b4 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkInterface.cc
@@ -193,7 +193,7 @@ NetworkInterface::grant_vc(int out_port, int vc, Time grant_time)
{
assert(m_out_vc_state[vc]->isInState(VC_AB_, grant_time));
m_out_vc_state[vc]->grant_vc(grant_time);
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
}
// The tail flit corresponding to this vc has been buffered at the next hop
@@ -203,7 +203,7 @@ NetworkInterface::release_vc(int out_port, int vc, Time release_time)
{
assert(m_out_vc_state[vc]->isInState(ACTIVE_, release_time));
m_out_vc_state[vc]->setState(IDLE_, release_time);
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
}
// Looking for a free output vc
@@ -270,7 +270,7 @@ NetworkInterface::wakeup()
m_id, m_net_ptr->curCycle());
outNode_ptr[t_flit->get_vnet()]->enqueue(
- t_flit->get_msg_ptr(), 1);
+ t_flit->get_msg_ptr(), Cycles(1));
// signal the upstream router that this vc can be freed now
inNetLink->release_vc_link(t_flit->get_vc(),
@@ -316,7 +316,7 @@ NetworkInterface::scheduleOutputLink()
outSrcQueue->insert(t_flit);
// schedule the out link
- outNetLink->scheduleEvent(1);
+ outNetLink->scheduleEvent(Cycles(1));
return;
}
}
@@ -328,13 +328,13 @@ NetworkInterface::checkReschedule()
{
for (int vnet = 0; vnet < m_virtual_networks; vnet++) {
if (inNode_ptr[vnet]->isReady()) { // Is there a message waiting
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_ni_buffers[vc]->isReadyForNext(m_net_ptr->curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh
index 45dbe7f52..3bee9f659 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/NetworkLink.hh
@@ -81,7 +81,8 @@ class NetworkLink : public ClockedObject, public FlexibleConsumer
uint32_t functionalWrite(Packet *);
protected:
- int m_id, m_latency;
+ int m_id;
+ Cycles m_latency;
int m_in_port, m_out_port;
int m_link_utilized;
std::vector<int> m_vc_load;
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc b/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc
index ca82f0757..71ee1d0bf 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/Router.cc
@@ -130,7 +130,7 @@ Router::isBufferNotFull(int vc, int inport)
// This has to be updated and arbitration performed
void
Router::request_vc(int in_vc, int in_port, NetDest destination,
- Time request_time)
+ Cycles request_time)
{
assert(m_in_vc_state[in_port][in_vc]->isInState(IDLE_, request_time));
@@ -231,7 +231,7 @@ Router::grant_vc(int out_port, int vc, Time grant_time)
{
assert(m_out_vc_state[out_port][vc]->isInState(VC_AB_, grant_time));
m_out_vc_state[out_port][vc]->grant_vc(grant_time);
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
}
void
@@ -239,7 +239,7 @@ Router::release_vc(int out_port, int vc, Time release_time)
{
assert(m_out_vc_state[out_port][vc]->isInState(ACTIVE_, release_time));
m_out_vc_state[out_port][vc]->setState(IDLE_, release_time);
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
}
// This function calculated the output port for a particular destination.
@@ -274,7 +274,8 @@ Router::routeCompute(flit *m_flit, int inport)
m_router_buffers[outport][outvc]->insert(m_flit);
if (m_net_ptr->getNumPipeStages() > 1)
- scheduleEvent(m_net_ptr->getNumPipeStages() - 1 );
+ scheduleEvent(Cycles(m_net_ptr->getNumPipeStages() - 1));
+
if ((m_flit->get_type() == HEAD_) || (m_flit->get_type() == HEAD_TAIL_)) {
NetworkMessage *nm =
safe_cast<NetworkMessage*>(m_flit->get_msg_ptr().get());
@@ -290,6 +291,7 @@ Router::routeCompute(flit *m_flit, int inport)
curCycle());
}
}
+
if ((m_flit->get_type() == TAIL_) || (m_flit->get_type() == HEAD_TAIL_)) {
m_in_vc_state[inport][invc]->setState(IDLE_, curCycle() + 1);
m_in_link[inport]->release_vc_link(invc, curCycle() + 1);
@@ -361,7 +363,7 @@ Router::scheduleOutputLinks()
m_router_buffers[port][vc_tolookat]->getTopFlit();
t_flit->set_time(curCycle() + 1 );
m_out_src_queue[port]->insert(t_flit);
- m_out_link[port]->scheduleEvent(1);
+ m_out_link[port]->scheduleEvent(Cycles(1));
break; // done for this port
}
}
@@ -383,7 +385,7 @@ Router::checkReschedule()
for (int port = 0; port < m_out_link.size(); port++) {
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_router_buffers[port][vc]->isReadyForNext(curCycle())) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
return;
}
}
@@ -396,7 +398,7 @@ Router::check_arbiter_reschedule()
for (int port = 0; port < m_in_link.size(); port++) {
for (int vc = 0; vc < m_num_vcs; vc++) {
if (m_in_vc_state[port][vc]->isInState(VC_AB_, curCycle() + 1)) {
- m_vc_arbiter->scheduleEvent(1);
+ m_vc_arbiter->scheduleEvent(Cycles(1));
return;
}
}
diff --git a/src/mem/ruby/network/garnet/flexible-pipeline/Router.hh b/src/mem/ruby/network/garnet/flexible-pipeline/Router.hh
index 988ec3a55..aa31fd939 100644
--- a/src/mem/ruby/network/garnet/flexible-pipeline/Router.hh
+++ b/src/mem/ruby/network/garnet/flexible-pipeline/Router.hh
@@ -60,7 +60,7 @@ class Router : public BasicRouter, public FlexibleConsumer
int link_weight);
void wakeup();
void request_vc(int in_vc, int in_port, NetDest destination,
- Time request_time);
+ Cycles request_time);
bool isBufferNotFull(int vc, int inport);
void grant_vc(int out_port, int vc, Time grant_time);
void release_vc(int out_port, int vc, Time release_time);
diff --git a/src/mem/ruby/network/simple/PerfectSwitch.cc b/src/mem/ruby/network/simple/PerfectSwitch.cc
index 37035f95f..687bdbd86 100644
--- a/src/mem/ruby/network/simple/PerfectSwitch.cc
+++ b/src/mem/ruby/network/simple/PerfectSwitch.cc
@@ -267,7 +267,7 @@ PerfectSwitch::wakeup()
// There were not enough resources
if (!enough) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
DPRINTF(RubyNetwork, "Can't deliver message since a node "
"is blocked\n");
DPRINTF(RubyNetwork, "Message: %s\n", (*net_msg_ptr));
diff --git a/src/mem/ruby/network/simple/Switch.cc b/src/mem/ruby/network/simple/Switch.cc
index d9abc4cc7..76d37c321 100644
--- a/src/mem/ruby/network/simple/Switch.cc
+++ b/src/mem/ruby/network/simple/Switch.cc
@@ -72,12 +72,12 @@ Switch::addInPort(const vector<MessageBuffer*>& in)
void
Switch::addOutPort(const vector<MessageBuffer*>& out,
- const NetDest& routing_table_entry, int link_latency, int bw_multiplier)
+ const NetDest& routing_table_entry, Cycles link_latency, int bw_multiplier)
{
// Create a throttle
Throttle* throttle_ptr = new Throttle(m_id, m_throttles.size(),
- link_latency, bw_multiplier, m_network_ptr->getEndpointBandwidth(),
- this);
+ link_latency, bw_multiplier, m_network_ptr->getEndpointBandwidth(),
+ this);
m_throttles.push_back(throttle_ptr);
// Create one buffer per vnet (these are intermediaryQueues)
diff --git a/src/mem/ruby/network/simple/Switch.hh b/src/mem/ruby/network/simple/Switch.hh
index 2757e6511..8c265a4bf 100644
--- a/src/mem/ruby/network/simple/Switch.hh
+++ b/src/mem/ruby/network/simple/Switch.hh
@@ -63,7 +63,7 @@ class Switch : public BasicRouter
void init();
void addInPort(const std::vector<MessageBuffer*>& in);
void addOutPort(const std::vector<MessageBuffer*>& out,
- const NetDest& routing_table_entry, int link_latency,
+ const NetDest& routing_table_entry, Cycles link_latency,
int bw_multiplier);
const Throttle* getThrottle(LinkID link_number) const;
const std::vector<Throttle*>* getThrottles() const;
diff --git a/src/mem/ruby/network/simple/Throttle.cc b/src/mem/ruby/network/simple/Throttle.cc
index b591cc81b..bb5d9cf53 100644
--- a/src/mem/ruby/network/simple/Throttle.cc
+++ b/src/mem/ruby/network/simple/Throttle.cc
@@ -48,7 +48,7 @@ const int PRIORITY_SWITCH_LIMIT = 128;
static int network_message_to_size(NetworkMessage* net_msg_ptr);
-Throttle::Throttle(int sID, NodeID node, int link_latency,
+Throttle::Throttle(int sID, NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth,
ClockedObject *em)
: Consumer(em)
@@ -57,7 +57,7 @@ Throttle::Throttle(int sID, NodeID node, int link_latency,
m_sID = sID;
}
-Throttle::Throttle(NodeID node, int link_latency,
+Throttle::Throttle(NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth,
ClockedObject *em)
: Consumer(em)
@@ -67,8 +67,8 @@ Throttle::Throttle(NodeID node, int link_latency,
}
void
-Throttle::init(NodeID node, int link_latency, int link_bandwidth_multiplier,
- int endpoint_bandwidth)
+Throttle::init(NodeID node, Cycles link_latency,
+ int link_bandwidth_multiplier, int endpoint_bandwidth)
{
m_node = node;
m_vnets = 0;
@@ -222,7 +222,7 @@ Throttle::wakeup()
// We are out of bandwidth for this cycle, so wakeup next
// cycle and continue
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
}
}
diff --git a/src/mem/ruby/network/simple/Throttle.hh b/src/mem/ruby/network/simple/Throttle.hh
index 077382041..b75161164 100644
--- a/src/mem/ruby/network/simple/Throttle.hh
+++ b/src/mem/ruby/network/simple/Throttle.hh
@@ -52,10 +52,10 @@ class MessageBuffer;
class Throttle : public Consumer
{
public:
- Throttle(int sID, NodeID node, int link_latency,
+ Throttle(int sID, NodeID node, Cycles link_latency,
int link_bandwidth_multiplier, int endpoint_bandwidth,
ClockedObject *em);
- Throttle(NodeID node, int link_latency, int link_bandwidth_multiplier,
+ Throttle(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
int endpoint_bandwidth, ClockedObject *em);
~Throttle() {}
@@ -70,13 +70,10 @@ class Throttle : public Consumer
void clearStats();
// The average utilization (a percent) since last clearStats()
double getUtilization() const;
- int
- getLinkBandwidth() const
- {
- return m_endpoint_bandwidth * m_link_bandwidth_multiplier;
- }
- int getLatency() const { return m_link_latency; }
+ int getLinkBandwidth() const
+ { return m_endpoint_bandwidth * m_link_bandwidth_multiplier; }
+ Cycles getLatency() const { return m_link_latency; }
const std::vector<std::vector<int> >&
getCounters() const
{
@@ -88,7 +85,7 @@ class Throttle : public Consumer
void print(std::ostream& out) const;
private:
- void init(NodeID node, int link_latency, int link_bandwidth_multiplier,
+ void init(NodeID node, Cycles link_latency, int link_bandwidth_multiplier,
int endpoint_bandwidth);
void addVirtualNetwork(MessageBuffer* in_ptr, MessageBuffer* out_ptr,
ClockedObject *em);
@@ -106,7 +103,7 @@ class Throttle : public Consumer
int m_sID;
NodeID m_node;
int m_link_bandwidth_multiplier;
- int m_link_latency;
+ Cycles m_link_latency;
int m_wakeups_wo_switch;
int m_endpoint_bandwidth;
diff --git a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
index 69333f481..e92f96fd5 100644
--- a/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
+++ b/src/mem/ruby/slicc_interface/AbstractCacheEntry.hh
@@ -52,8 +52,6 @@ class AbstractCacheEntry : public AbstractEntry
void changePermission(AccessPermission new_perm);
Address m_Address; // Address of this block, required by CacheMemory
- Time m_LastRef; // Last time this block was referenced, required
- // by CacheMemory
int m_locked; // Holds info whether the address is locked,
// required for implementing LL/SC
};
diff --git a/src/mem/ruby/slicc_interface/AbstractController.hh b/src/mem/ruby/slicc_interface/AbstractController.hh
index c452da723..44981a7e8 100644
--- a/src/mem/ruby/slicc_interface/AbstractController.hh
+++ b/src/mem/ruby/slicc_interface/AbstractController.hh
@@ -106,7 +106,7 @@ class AbstractController : public ClockedObject, public Consumer
protected:
int m_transitions_per_cycle;
int m_buffer_size;
- int m_recycle_latency;
+ Cycles m_recycle_latency;
std::string m_name;
NodeID m_version;
Network* m_net_ptr;
diff --git a/src/mem/ruby/slicc_interface/Controller.py b/src/mem/ruby/slicc_interface/Controller.py
index aa8f35145..5c2fd9b71 100644
--- a/src/mem/ruby/slicc_interface/Controller.py
+++ b/src/mem/ruby/slicc_interface/Controller.py
@@ -40,6 +40,6 @@ class RubyController(ClockedObject):
transitions_per_cycle = \
Param.Int(32, "no. of SLICC state machine transitions per cycle")
buffer_size = Param.Int(0, "max buffer size 0 means infinite")
- recycle_latency = Param.Int(10, "")
+ recycle_latency = Param.Cycles(10, "")
number_of_TBEs = Param.Int(256, "")
ruby_system = Param.RubySystem("");
diff --git a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
index f55b6eae4..622efd04c 100644
--- a/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
+++ b/src/mem/ruby/slicc_interface/RubySlicc_Util.hh
@@ -37,9 +37,8 @@
#include "debug/RubySlicc.hh"
#include "mem/ruby/common/Address.hh"
-#include "mem/ruby/common/Global.hh"
#include "mem/ruby/slicc_interface/RubySlicc_ComponentMapping.hh"
-#include "mem/ruby/system/System.hh"
+#include "mem/packet.hh"
inline int
random(int n)
@@ -53,6 +52,8 @@ zero_time()
return 0;
}
+inline Cycles TimeToCycles(Time t) { return Cycles(t); }
+
inline NodeID
intToID(int nodenum)
{
diff --git a/src/mem/ruby/system/Cache.py b/src/mem/ruby/system/Cache.py
index 4b0269822..d4af1320a 100644
--- a/src/mem/ruby/system/Cache.py
+++ b/src/mem/ruby/system/Cache.py
@@ -36,7 +36,7 @@ class RubyCache(SimObject):
cxx_class = 'CacheMemory'
cxx_header = "mem/ruby/system/CacheMemory.hh"
size = Param.MemorySize("capacity in bytes");
- latency = Param.Int("");
+ latency = Param.Cycles("");
assoc = Param.Int("");
replacement_policy = Param.String("PSEUDO_LRU", "");
start_index_bit = Param.Int(6, "index start, default 6 for 64-byte line");
diff --git a/src/mem/ruby/system/CacheMemory.hh b/src/mem/ruby/system/CacheMemory.hh
index a4950a09b..6b436082f 100644
--- a/src/mem/ruby/system/CacheMemory.hh
+++ b/src/mem/ruby/system/CacheMemory.hh
@@ -92,7 +92,7 @@ class CacheMemory : public SimObject
AbstractCacheEntry* lookup(const Address& address);
const AbstractCacheEntry* lookup(const Address& address) const;
- int getLatency() const { return m_latency; }
+ Cycles getLatency() const { return m_latency; }
// Hook for checkpointing the contents of the cache
void recordCacheContents(int cntrl, CacheRecorder* tr) const;
@@ -144,7 +144,7 @@ class CacheMemory : public SimObject
private:
const std::string m_cache_name;
- int m_latency;
+ Cycles m_latency;
// Data Members (m_prefix)
bool m_is_instruction_only_cache;
diff --git a/src/mem/ruby/system/RubyMemoryControl.cc b/src/mem/ruby/system/RubyMemoryControl.cc
index 620113719..121551299 100644
--- a/src/mem/ruby/system/RubyMemoryControl.cc
+++ b/src/mem/ruby/system/RubyMemoryControl.cc
@@ -374,10 +374,10 @@ RubyMemoryControl::printStats(ostream& out) const
// Queue up a completed request to send back to directory
void
-RubyMemoryControl::enqueueToDirectory(MemoryNode req, int latency)
+RubyMemoryControl::enqueueToDirectory(MemoryNode req, Cycles latency)
{
Time arrival_time = curTick() + (latency * clock);
- Time ruby_arrival_time = arrival_time / g_system_ptr->clockPeriod();
+ Cycles ruby_arrival_time = g_system_ptr->ticksToCycles(arrival_time);
req.m_time = ruby_arrival_time;
m_response_queue.push_back(req);
diff --git a/src/mem/ruby/system/RubyMemoryControl.hh b/src/mem/ruby/system/RubyMemoryControl.hh
index 53e8fabef..bd94abaa6 100644
--- a/src/mem/ruby/system/RubyMemoryControl.hh
+++ b/src/mem/ruby/system/RubyMemoryControl.hh
@@ -100,7 +100,7 @@ class RubyMemoryControl : public MemoryControl
uint32_t functionalWriteBuffers(Packet *pkt);
private:
- void enqueueToDirectory(MemoryNode req, int latency);
+ void enqueueToDirectory(MemoryNode req, Cycles latency);
const int getRank(int bank) const;
bool queueReady(int bank);
void issueRequest(int bank);
@@ -128,11 +128,11 @@ class RubyMemoryControl : public MemoryControl
int m_rank_rank_delay;
int m_read_write_delay;
int m_basic_bus_busy_time;
- int m_mem_ctl_latency;
+ Cycles m_mem_ctl_latency;
int m_refresh_period;
int m_mem_random_arbitrate;
int m_tFaw;
- int m_mem_fixed_delay;
+ Cycles m_mem_fixed_delay;
int m_total_banks;
int m_total_ranks;
diff --git a/src/mem/ruby/system/RubyMemoryControl.py b/src/mem/ruby/system/RubyMemoryControl.py
index 7764938d3..e46b3f223 100644
--- a/src/mem/ruby/system/RubyMemoryControl.py
+++ b/src/mem/ruby/system/RubyMemoryControl.py
@@ -50,8 +50,8 @@ class RubyMemoryControl(MemoryControl):
rank_rank_delay = Param.Int(1, "");
read_write_delay = Param.Int(2, "");
basic_bus_busy_time = Param.Int(2, "");
- mem_ctl_latency = Param.Int(12, "");
- refresh_period = Param.Int(1560, "");
+ mem_ctl_latency = Param.Cycles(12, "");
+ refresh_period = Param.Cycles(1560, "");
tFaw = Param.Int(0, "");
mem_random_arbitrate = Param.Int(0, "");
- mem_fixed_delay = Param.Int(0, "");
+ mem_fixed_delay = Param.Cycles(0, "");
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 9b30fdbd5..d4cfe77b1 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -668,7 +668,7 @@ Sequencer::issueRequest(PacketPtr pkt, RubyRequestType secondary_type)
msg->getPhysicalAddress(),
RubyRequestType_to_string(secondary_type));
- Time latency = 0; // initialzed to an null value
+ Cycles latency(0); // initialzed to an null value
if (secondary_type == RubyRequestType_IFETCH)
latency = m_instCache_ptr->getLatency();
diff --git a/src/mem/ruby/system/TimerTable.cc b/src/mem/ruby/system/TimerTable.cc
index 992401c50..d87f11662 100644
--- a/src/mem/ruby/system/TimerTable.cc
+++ b/src/mem/ruby/system/TimerTable.cc
@@ -66,12 +66,13 @@ TimerTable::readyAddress() const
}
void
-TimerTable::set(const Address& address, Time relative_latency)
+TimerTable::set(const Address& address, Cycles relative_latency)
{
assert(address == line_address(address));
assert(relative_latency > 0);
assert(!m_map.count(address));
- Time ready_time = m_clockobj_ptr->curCycle() + relative_latency;
+
+ Cycles ready_time = m_clockobj_ptr->curCycle() + relative_latency;
m_map[address] = ready_time;
assert(m_consumer_ptr != NULL);
m_consumer_ptr->scheduleEventAbsolute(ready_time);
diff --git a/src/mem/ruby/system/TimerTable.hh b/src/mem/ruby/system/TimerTable.hh
index ecd95ee19..95af2eaa7 100644
--- a/src/mem/ruby/system/TimerTable.hh
+++ b/src/mem/ruby/system/TimerTable.hh
@@ -64,7 +64,10 @@ class TimerTable
bool isReady() const;
const Address& readyAddress() const;
bool isSet(const Address& address) const { return !!m_map.count(address); }
- void set(const Address& address, Time relative_latency);
+ void set(const Address& address, Cycles relative_latency);
+ void set(const Address& address, uint64_t relative_latency)
+ { set(address, Cycles(relative_latency)); }
+
void unset(const Address& address);
void print(std::ostream& out) const;
@@ -79,7 +82,7 @@ class TimerTable
// use a std::map for the address map as this container is sorted
// and ensures a well-defined iteration order
- typedef std::map<Address, Time> AddressMap;
+ typedef std::map<Address, Cycles> AddressMap;
AddressMap m_map;
mutable bool m_next_valid;
mutable Time m_next_time; // Only valid if m_next_valid is true
diff --git a/src/mem/ruby/system/WireBuffer.cc b/src/mem/ruby/system/WireBuffer.cc
index b5a2849ce..fba53b902 100644
--- a/src/mem/ruby/system/WireBuffer.cc
+++ b/src/mem/ruby/system/WireBuffer.cc
@@ -70,12 +70,13 @@ WireBuffer::~WireBuffer()
}
void
-WireBuffer::enqueue(MsgPtr message, int latency)
+WireBuffer::enqueue(MsgPtr message, Cycles latency)
{
m_msg_counter++;
- Time current_time = g_system_ptr->getTime();
- Time arrival_time = current_time + latency;
+ Cycles current_time = g_system_ptr->getTime();
+ Cycles arrival_time = current_time + latency;
assert(arrival_time > current_time);
+
MessageBufferNode thisNode(arrival_time, m_msg_counter, message);
m_message_queue.push_back(thisNode);
if (m_consumer_ptr != NULL) {
@@ -122,11 +123,12 @@ WireBuffer::recycle()
MessageBufferNode node = m_message_queue.front();
pop_heap(m_message_queue.begin(), m_message_queue.end(),
greater<MessageBufferNode>());
- node.m_time = g_system_ptr->getTime() + 1;
+
+ node.m_time = g_system_ptr->getTime() + Cycles(1);
m_message_queue.back() = node;
push_heap(m_message_queue.begin(), m_message_queue.end(),
greater<MessageBufferNode>());
- m_consumer_ptr->scheduleEventAbsolute(g_system_ptr->getTime() + 1);
+ m_consumer_ptr->scheduleEventAbsolute(node.m_time);
}
bool
diff --git a/src/mem/ruby/system/WireBuffer.hh b/src/mem/ruby/system/WireBuffer.hh
index 07da965c8..d71bf4520 100644
--- a/src/mem/ruby/system/WireBuffer.hh
+++ b/src/mem/ruby/system/WireBuffer.hh
@@ -72,7 +72,7 @@ class WireBuffer : public SimObject
void setDescription(const std::string& name) { m_description = name; };
std::string getDescription() { return m_description; };
- void enqueue(MsgPtr message, int latency );
+ void enqueue(MsgPtr message, Cycles latency);
void dequeue();
const Message* peek();
MessageBufferNode peekNode();
diff --git a/src/mem/slicc/ast/EnqueueStatementAST.py b/src/mem/slicc/ast/EnqueueStatementAST.py
index a8785c9af..329ed30a3 100644
--- a/src/mem/slicc/ast/EnqueueStatementAST.py
+++ b/src/mem/slicc/ast/EnqueueStatementAST.py
@@ -53,8 +53,8 @@ class EnqueueStatementAST(StatementAST):
self.symtab.newSymbol(v)
# Declare message
- code("${{msg_type.ident}} *out_msg = \
- new ${{msg_type.ident}}(curCycle());")
+ code("${{msg_type.ident}} *out_msg = "\
+ "new ${{msg_type.ident}}(curCycle());")
# The other statements
t = self.statements.generate(code, None)
@@ -67,7 +67,7 @@ class EnqueueStatementAST(StatementAST):
try:
# see if this is an integer
latency = int(latency)
- args.append("%s" % latency)
+ args.append("Cycles(%s)" % latency)
except ValueError:
# if not, it should be a member
args.append("m_%s" % latency)
diff --git a/src/mem/slicc/ast/FuncCallExprAST.py b/src/mem/slicc/ast/FuncCallExprAST.py
index b0ab931de..fc42a8a3e 100644
--- a/src/mem/slicc/ast/FuncCallExprAST.py
+++ b/src/mem/slicc/ast/FuncCallExprAST.py
@@ -142,7 +142,7 @@ class FuncCallExprAST(ExprAST):
}
if (result == TransitionResult_ResourceStall) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
// Cannot do anything with this transition, go check next doable transition (mostly likely of next port)
}
@@ -173,7 +173,7 @@ class FuncCallExprAST(ExprAST):
}
if (result1 == TransitionResult_ResourceStall) {
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
// Cannot do anything with this transition, go check next
// doable transition (mostly likely of next port)
}
diff --git a/src/mem/slicc/ast/InfixOperatorExprAST.py b/src/mem/slicc/ast/InfixOperatorExprAST.py
index c5f384c4b..2f62813df 100644
--- a/src/mem/slicc/ast/InfixOperatorExprAST.py
+++ b/src/mem/slicc/ast/InfixOperatorExprAST.py
@@ -47,7 +47,7 @@ class InfixOperatorExprAST(ExprAST):
rtype = self.right.generate(rcode)
# Figure out what the input and output types should be
- if self.op in ("==", "!="):
+ if self.op in ("==", "!=", ">=", "<=", ">", "<"):
output = "bool"
if (ltype != rtype):
self.error("Type mismatch: left and right operands of " +
@@ -55,30 +55,35 @@ class InfixOperatorExprAST(ExprAST):
"left: '%s', right: '%s'",
self.op, ltype, rtype)
else:
+ expected_types = []
+ output = None
+
if self.op in ("&&", "||"):
# boolean inputs and output
- inputs = "bool"
- output = "bool"
- elif self.op in ("==", "!=", ">=", "<=", ">", "<"):
- # Integer inputs, boolean output
- inputs = "int"
- output = "bool"
+ expected_types = [("bool", "bool", "bool")]
+ elif self.op in ("<<", ">>"):
+ expected_types = [("int", "int", "int"),
+ ("Cycles", "int", "Cycles")]
+ elif self.op in ("+", "-", "*", "/"):
+ expected_types = [("int", "int", "int"),
+ ("Time", "Time", "Time"),
+ ("Cycles", "Cycles", "Cycles"),
+ ("Cycles", "int", "Cycles"),
+ ("int", "Cycles", "Cycles")]
else:
- # integer inputs and output
- inputs = "int"
- output = "int"
+ self.error("No operator matched with {0}!" .format(self.op))
- inputs_type = self.symtab.find(inputs, Type)
+ for expected_type in expected_types:
+ left_input_type = self.symtab.find(expected_type[0], Type)
+ right_input_type = self.symtab.find(expected_type[1], Type)
- if inputs_type != ltype:
- self.left.error("Type mismatch: left operand of operator " +
- "'%s' expects type '%s', actual was '%s'",
- self.op, inputs, ltype)
+ if (left_input_type == ltype) and (right_input_type == rtype):
+ output = expected_type[2]
- if inputs_type != rtype:
- self.right.error("Type mismatch: right operand of operator " +
- "'%s' expects type '%s', actual was '%s'",
- self.op, inputs, rtype)
+ if output == None:
+ self.error("Type mismatch: operands ({0}, {1}) for operator " \
+ "'{2}' failed to match with the expected types" .
+ format(ltype, rtype, self.op))
# All is well
fix = code.nofix()
diff --git a/src/mem/slicc/symbols/StateMachine.py b/src/mem/slicc/symbols/StateMachine.py
index d1e7dc147..e66743255 100644
--- a/src/mem/slicc/symbols/StateMachine.py
+++ b/src/mem/slicc/symbols/StateMachine.py
@@ -32,7 +32,8 @@ from slicc.symbols.Var import Var
import slicc.generate.html as html
import re
-python_class_map = {"int": "Int",
+python_class_map = {
+ "int": "Int",
"uint32_t" : "UInt32",
"std::string": "String",
"bool": "Bool",
@@ -42,8 +43,9 @@ python_class_map = {"int": "Int",
"DirectoryMemory": "RubyDirectoryMemory",
"MemoryControl": "MemoryControl",
"DMASequencer": "DMASequencer",
- "Prefetcher":"Prefetcher"
- }
+ "Prefetcher":"Prefetcher",
+ "Cycles":"Cycles",
+ }
class StateMachine(Symbol):
def __init__(self, symtab, ident, location, pairs, config_parameters):
@@ -629,7 +631,8 @@ $vid->setDescription("[Version " + to_string(m_version) + ", ${ident}, name=${{v
if vtype.isBuffer:
if "recycle_latency" in var:
- code('$vid->setRecycleLatency(${{var["recycle_latency"]}});')
+ code('$vid->setRecycleLatency( ' \
+ 'Cycles(${{var["recycle_latency"]}}));')
else:
code('$vid->setRecycleLatency(m_recycle_latency);')
@@ -1055,7 +1058,7 @@ ${ident}_Controller::wakeup()
m_fully_busy_cycles++;
// Wakeup in another cycle and try again
- scheduleEvent(1);
+ scheduleEvent(Cycles(1));
break;
}
''')
diff --git a/src/mem/slicc/symbols/Type.py b/src/mem/slicc/symbols/Type.py
index ebf187630..ee1b8102a 100644
--- a/src/mem/slicc/symbols/Type.py
+++ b/src/mem/slicc/symbols/Type.py
@@ -423,6 +423,8 @@ operator<<(std::ostream& out, const ${{self.c_ident}}& obj)
#include <iostream>
#include "mem/protocol/${{self.c_ident}}.hh"
+#include "mem/ruby/common/Global.hh"
+#include "mem/ruby/system/System.hh"
using namespace std;
''')