summaryrefslogtreecommitdiff
path: root/src/mem/ruby/system
diff options
context:
space:
mode:
authorPolina Dudnik <pdudnik@gmail.com>2009-08-21 15:52:46 -0500
committerPolina Dudnik <pdudnik@gmail.com>2009-08-21 15:52:46 -0500
commita4fc1bad94f028111dcf36ac53c42e5dab79605d (patch)
tree5573cc6c1b9472dd6f21e513de785ab066a2ff2d /src/mem/ruby/system
parentc438b2e431066d9324238e1c678ef68a3b721f33 (diff)
downloadgem5-a4fc1bad94f028111dcf36ac53c42e5dab79605d.tar.xz
[mq]: first_patch
Diffstat (limited to 'src/mem/ruby/system')
-rw-r--r--src/mem/ruby/system/DMASequencer.hh1
-rw-r--r--src/mem/ruby/system/RubyPort.hh2
-rw-r--r--src/mem/ruby/system/Sequencer.cc55
-rw-r--r--src/mem/ruby/system/Sequencer.hh4
4 files changed, 41 insertions, 21 deletions
diff --git a/src/mem/ruby/system/DMASequencer.hh b/src/mem/ruby/system/DMASequencer.hh
index 1f60b95ec..77c0a2258 100644
--- a/src/mem/ruby/system/DMASequencer.hh
+++ b/src/mem/ruby/system/DMASequencer.hh
@@ -25,6 +25,7 @@ public:
void init(const vector<string> & argv);
/* external interface */
int64_t makeRequest(const RubyRequest & request);
+ bool isReady(const RubyRequest & request, bool dont_set = false) { assert(0); return false;};
// void issueRequest(uint64_t paddr, uint8* data, int len, bool rw);
bool busy() { return m_is_busy;}
diff --git a/src/mem/ruby/system/RubyPort.hh b/src/mem/ruby/system/RubyPort.hh
index 2f391070f..cc7fd8d1f 100644
--- a/src/mem/ruby/system/RubyPort.hh
+++ b/src/mem/ruby/system/RubyPort.hh
@@ -21,6 +21,8 @@ public:
virtual int64_t makeRequest(const RubyRequest & request) = 0;
+ virtual bool isReady(const RubyRequest & request, bool dont_set = false) = 0;
+
void registerHitCallback(void (*hit_callback)(int64_t request_id)) {
assert(m_hit_callback == NULL); // can't assign hit_callback twice
m_hit_callback = hit_callback;
diff --git a/src/mem/ruby/system/Sequencer.cc b/src/mem/ruby/system/Sequencer.cc
index 780c1128e..9549cc340 100644
--- a/src/mem/ruby/system/Sequencer.cc
+++ b/src/mem/ruby/system/Sequencer.cc
@@ -61,7 +61,7 @@ void Sequencer::init(const vector<string> & argv)
m_instCache_ptr = NULL;
m_dataCache_ptr = NULL;
m_controller = NULL;
- m_servicing_atomic = -1;
+ m_servicing_atomic = 200;
m_atomics_counter = 0;
for (size_t i=0; i<argv.size(); i+=2) {
if ( argv[i] == "controller") {
@@ -108,6 +108,7 @@ void Sequencer::wakeup() {
WARN_MSG("Possible Deadlock detected");
WARN_EXPR(request);
WARN_EXPR(m_version);
+ WARN_EXPR(request->ruby_request.paddr);
WARN_EXPR(keys.size());
WARN_EXPR(current_time);
WARN_EXPR(request->issue_time);
@@ -344,13 +345,22 @@ void Sequencer::hitCallback(SequencerRequest* srequest, DataBlock& data) {
data.setData(ruby_request.data, request_address.getOffset(), ruby_request.len);
}
}
-
+ if (type == RubyRequestType_RMW_Write) {
+ if (m_servicing_atomic != ruby_request.proc_id) {
+ assert(0);
+ }
+ assert(m_atomics_counter > 0);
+ m_atomics_counter--;
+ if (m_atomics_counter == 0) {
+ m_servicing_atomic = 200;
+ }
+ }
m_hit_callback(srequest->id);
delete srequest;
}
// Returns true if the sequencer already has a load or store outstanding
-bool Sequencer::isReady(const RubyRequest& request) {
+bool Sequencer::isReady(const RubyRequest& request, bool dont_set) {
// POLINA: check if we are currently flushing the write buffer, if so Ruby is returned as not ready
// to simulate stalling of the front-end
// Do we stall all the sequencers? If it is atomic instruction - yes!
@@ -365,27 +375,30 @@ bool Sequencer::isReady(const RubyRequest& request) {
return false;
}
- if (m_servicing_atomic != -1 && m_servicing_atomic != (int)request.proc_id) {
+ assert(request.proc_id != 100);
+ if (m_servicing_atomic != 200 && m_servicing_atomic != request.proc_id) {
assert(m_atomics_counter > 0);
return false;
}
else {
- if (request.type == RubyRequestType_RMW_Read) {
- if (m_servicing_atomic == -1) {
- assert(m_atomics_counter == 0);
- m_servicing_atomic = (int)request.proc_id;
+ if (!dont_set) {
+ if (request.type == RubyRequestType_RMW_Read) {
+ if (m_servicing_atomic == 200) {
+ assert(m_atomics_counter == 0);
+ m_servicing_atomic = request.proc_id;
+ }
+ else {
+ assert(m_servicing_atomic == request.proc_id);
+ }
+ m_atomics_counter++;
}
else {
- assert(m_servicing_atomic == (int)request.proc_id);
- }
- m_atomics_counter++;
- }
- else if (request.type == RubyRequestType_RMW_Write) {
- assert(m_servicing_atomic == (int)request.proc_id);
- assert(m_atomics_counter > 0);
- m_atomics_counter--;
- if (m_atomics_counter == 0) {
- m_servicing_atomic = -1;
+ if (m_servicing_atomic == request.proc_id) {
+ if (request.type != RubyRequestType_RMW_Write) {
+ m_servicing_atomic = 200;
+ m_atomics_counter = 0;
+ }
+ }
}
}
}
@@ -405,7 +418,7 @@ int64_t Sequencer::makeRequest(const RubyRequest & request)
int64_t id = makeUniqueRequestID();
SequencerRequest *srequest = new SequencerRequest(request, id, g_eventQueue_ptr->getTime());
bool found = insertRequest(srequest);
- if (!found)
+ if (!found) {
if (request.type == RubyRequestType_Locked_Write) {
// NOTE: it is OK to check the locked flag here as the mandatory queue will be checked first
// ensuring that nothing comes between checking the flag and servicing the store
@@ -423,6 +436,10 @@ int64_t Sequencer::makeRequest(const RubyRequest & request)
// TODO: issue hardware prefetches here
return id;
+ }
+ else {
+ assert(0);
+ }
}
else {
return -1;
diff --git a/src/mem/ruby/system/Sequencer.hh b/src/mem/ruby/system/Sequencer.hh
index 2b1f023c5..e75cdaa3a 100644
--- a/src/mem/ruby/system/Sequencer.hh
+++ b/src/mem/ruby/system/Sequencer.hh
@@ -84,7 +84,7 @@ public:
// called by Tester or Simics
int64_t makeRequest(const RubyRequest & request);
- bool isReady(const RubyRequest& request);
+ bool isReady(const RubyRequest& request, bool dont_set = false);
bool empty() const;
void print(ostream& out) const;
@@ -125,7 +125,7 @@ private:
// Global outstanding request count, across all request tables
int m_outstanding_count;
bool m_deadlock_check_scheduled;
- int m_servicing_atomic;
+ unsigned m_servicing_atomic;
int m_atomics_counter;
};