summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNilay Vaish <nilay@cs.wisc.edu>2014-11-06 00:55:09 -0600
committerNilay Vaish <nilay@cs.wisc.edu>2014-11-06 00:55:09 -0600
commit8ccfd9defa930d5c2904134d7a7286682e721db9 (patch)
tree48f55df4f9446255c44c649a6d203793dd801e62
parentae82551496155588786751a3a92191069488d7f3 (diff)
downloadgem5-8ccfd9defa930d5c2904134d7a7286682e721db9.tar.xz
ruby: dma sequencer: remove RubyPort as parent class
As of now DMASequencer inherits from the RubyPort class. But the code in RubyPort class is heavily tailored for the CPU Sequencer. There are parts of the code that are not required at all for the DMA sequencer. Moreover, the next patch uses the dma sequencer for carrying out memory accesses for all the io devices. Hence, it is better to have a leaner dma sequencer.
-rw-r--r--src/mem/ruby/system/DMASequencer.cc195
-rw-r--r--src/mem/ruby/system/DMASequencer.hh75
-rw-r--r--src/mem/ruby/system/Sequencer.py13
3 files changed, 274 insertions, 9 deletions
diff --git a/src/mem/ruby/system/DMASequencer.cc b/src/mem/ruby/system/DMASequencer.cc
index 2a458a408..66b6e404a 100644
--- a/src/mem/ruby/system/DMASequencer.cc
+++ b/src/mem/ruby/system/DMASequencer.cc
@@ -28,26 +28,212 @@
#include <memory>
+#include "debug/Config.hh"
+#include "debug/Drain.hh"
#include "debug/RubyDma.hh"
#include "debug/RubyStats.hh"
#include "mem/protocol/SequencerMsg.hh"
-#include "mem/protocol/SequencerRequestType.hh"
#include "mem/ruby/system/DMASequencer.hh"
#include "mem/ruby/system/System.hh"
+#include "sim/system.hh"
DMASequencer::DMASequencer(const Params *p)
- : RubyPort(p)
+ : MemObject(p), m_version(p->version), m_controller(NULL),
+ m_mandatory_q_ptr(NULL), m_usingRubyTester(p->using_ruby_tester),
+ slave_port(csprintf("%s.slave", name()), this, access_phys_mem, 0),
+ drainManager(NULL), system(p->system), retry(false),
+ access_phys_mem(p->access_phys_mem)
{
+ assert(m_version != -1);
}
void
DMASequencer::init()
{
- RubyPort::init();
+ MemObject::init();
+ assert(m_controller != NULL);
+ m_mandatory_q_ptr = m_controller->getMandatoryQueue();
+ m_mandatory_q_ptr->setSender(this);
m_is_busy = false;
m_data_block_mask = ~ (~0 << RubySystem::getBlockSizeBits());
}
+BaseSlavePort &
+DMASequencer::getSlavePort(const std::string &if_name, PortID idx)
+{
+ // used by the CPUs to connect the caches to the interconnect, and
+ // for the x86 case also the interrupt master
+ if (if_name != "slave") {
+ // pass it along to our super class
+ return MemObject::getSlavePort(if_name, idx);
+ } else {
+ return slave_port;
+ }
+}
+
+DMASequencer::MemSlavePort::MemSlavePort(const std::string &_name,
+ DMASequencer *_port, bool _access_phys_mem, PortID id)
+ : QueuedSlavePort(_name, _port, queue, id), queue(*_port, *this),
+ access_phys_mem(_access_phys_mem)
+{
+ DPRINTF(RubyDma, "Created slave memport on ruby sequencer %s\n", _name);
+}
+
+bool
+DMASequencer::MemSlavePort::recvTimingReq(PacketPtr pkt)
+{
+ DPRINTF(RubyDma, "Timing request for address %#x on port %d\n",
+ pkt->getAddr(), id);
+ DMASequencer *seq = static_cast<DMASequencer *>(&owner);
+
+ if (pkt->memInhibitAsserted())
+ panic("DMASequencer should never see an inhibited request\n");
+
+ assert(isPhysMemAddress(pkt->getAddr()));
+ assert(Address(pkt->getAddr()).getOffset() + pkt->getSize() <=
+ RubySystem::getBlockSizeBytes());
+
+ // Submit the ruby request
+ RequestStatus requestStatus = seq->makeRequest(pkt);
+
+ // If the request successfully issued then we should return true.
+ // Otherwise, we need to tell the port to retry at a later point
+ // and return false.
+ if (requestStatus == RequestStatus_Issued) {
+ DPRINTF(RubyDma, "Request %s 0x%x issued\n", pkt->cmdString(),
+ pkt->getAddr());
+ return true;
+ }
+
+ // Unless one is using the ruby tester, record the stalled M5 port for
+ // later retry when the sequencer becomes free.
+ if (!seq->m_usingRubyTester) {
+ seq->retry = true;
+ }
+
+ DPRINTF(RubyDma, "Request for address %#x did not issued because %s\n",
+ pkt->getAddr(), RequestStatus_to_string(requestStatus));
+
+ return false;
+}
+
+void
+DMASequencer::ruby_hit_callback(PacketPtr pkt)
+{
+ DPRINTF(RubyDma, "Hit callback for %s 0x%x\n", pkt->cmdString(),
+ pkt->getAddr());
+
+ // The packet was destined for memory and has not yet been turned
+ // into a response
+ assert(system->isMemAddr(pkt->getAddr()));
+ assert(pkt->isRequest());
+ slave_port.hitCallback(pkt);
+
+ // If we had to stall the slave ports, wake it up because
+ // the sequencer likely has free resources now.
+ if (retry) {
+ retry = false;
+ DPRINTF(RubyDma,"Sequencer may now be free. SendRetry to port %s\n",
+ slave_port.name());
+ slave_port.sendRetry();
+ }
+
+ testDrainComplete();
+}
+
+void
+DMASequencer::testDrainComplete()
+{
+ //If we weren't able to drain before, we might be able to now.
+ if (drainManager != NULL) {
+ unsigned int drainCount = outstandingCount();
+ DPRINTF(Drain, "Drain count: %u\n", drainCount);
+ if (drainCount == 0) {
+ DPRINTF(Drain, "DMASequencer done draining, signaling drain done\n");
+ drainManager->signalDrainDone();
+ // Clear the drain manager once we're done with it.
+ drainManager = NULL;
+ }
+ }
+}
+
+unsigned int
+DMASequencer::getChildDrainCount(DrainManager *dm)
+{
+ int count = 0;
+ count += slave_port.drain(dm);
+ DPRINTF(Config, "count after slave port check %d\n", count);
+ return count;
+}
+
+unsigned int
+DMASequencer::drain(DrainManager *dm)
+{
+ if (isDeadlockEventScheduled()) {
+ descheduleDeadlockEvent();
+ }
+
+ // If the DMASequencer is not empty, then it needs to clear all outstanding
+ // requests before it should call drainManager->signalDrainDone()
+ DPRINTF(Config, "outstanding count %d\n", outstandingCount());
+ bool need_drain = outstandingCount() > 0;
+
+ //
+ // Also, get the number of child ports that will also need to clear
+ // their buffered requests before they call drainManager->signalDrainDone()
+ //
+ unsigned int child_drain_count = getChildDrainCount(dm);
+
+ // Set status
+ if (need_drain) {
+ drainManager = dm;
+
+ DPRINTF(Drain, "DMASequencer not drained\n");
+ setDrainState(Drainable::Draining);
+ return child_drain_count + 1;
+ }
+
+ drainManager = NULL;
+ setDrainState(Drainable::Drained);
+ return child_drain_count;
+}
+
+void
+DMASequencer::MemSlavePort::hitCallback(PacketPtr pkt)
+{
+ bool needsResponse = pkt->needsResponse();
+ bool accessPhysMem = access_phys_mem;
+
+ assert(!pkt->isLLSC());
+ assert(!pkt->isFlush());
+
+ DPRINTF(RubyDma, "Hit callback needs response %d\n", needsResponse);
+
+ if (accessPhysMem) {
+ DMASequencer *seq = static_cast<DMASequencer *>(&owner);
+ seq->system->getPhysMem().access(pkt);
+ } else if (needsResponse) {
+ pkt->makeResponse();
+ }
+
+ // turn packet around to go back to requester if response expected
+ if (needsResponse) {
+ DPRINTF(RubyDma, "Sending packet back over port\n");
+ // send next cycle
+ schedTimingResp(pkt, curTick() + g_system_ptr->clockPeriod());
+ } else {
+ delete pkt;
+ }
+ DPRINTF(RubyDma, "Hit callback done!\n");
+}
+
+bool
+DMASequencer::MemSlavePort::isPhysMemAddress(Addr addr) const
+{
+ DMASequencer *seq = static_cast<DMASequencer *>(&owner);
+ return seq->system->isMemAddr(addr);
+}
+
RequestStatus
DMASequencer::makeRequest(PacketPtr pkt)
{
@@ -168,7 +354,8 @@ DMASequencer::ackCallback()
}
void
-DMASequencer::recordRequestType(DMASequencerRequestType requestType) {
+DMASequencer::recordRequestType(DMASequencerRequestType requestType)
+{
DPRINTF(RubyStats, "Recorded statistic: %s\n",
DMASequencerRequestType_to_string(requestType));
}
diff --git a/src/mem/ruby/system/DMASequencer.hh b/src/mem/ruby/system/DMASequencer.hh
index 13d79182d..a24db2d34 100644
--- a/src/mem/ruby/system/DMASequencer.hh
+++ b/src/mem/ruby/system/DMASequencer.hh
@@ -33,10 +33,16 @@
#include <memory>
#include "mem/protocol/DMASequencerRequestType.hh"
+#include "mem/protocol/RequestStatus.hh"
#include "mem/ruby/common/DataBlock.hh"
-#include "mem/ruby/system/RubyPort.hh"
+#include "mem/ruby/network/MessageBuffer.hh"
+#include "mem/ruby/system/System.hh"
+#include "mem/mem_object.hh"
+#include "mem/tport.hh"
#include "params/DMASequencer.hh"
+class AbstractController;
+
struct DMARequest
{
uint64_t start_paddr;
@@ -48,12 +54,45 @@ struct DMARequest
PacketPtr pkt;
};
-class DMASequencer : public RubyPort
+class DMASequencer : public MemObject
{
public:
typedef DMASequencerParams Params;
DMASequencer(const Params *);
void init();
+
+ public:
+ class MemSlavePort : public QueuedSlavePort
+ {
+ private:
+ SlavePacketQueue queue;
+ bool access_phys_mem;
+
+ public:
+ MemSlavePort(const std::string &_name, DMASequencer *_port,
+ bool _access_phys_mem, PortID id);
+ void hitCallback(PacketPtr pkt);
+ void evictionCallback(const Address& address);
+
+ protected:
+ bool recvTimingReq(PacketPtr pkt);
+
+ Tick recvAtomic(PacketPtr pkt)
+ { panic("DMASequencer::MemSlavePort::recvAtomic() not implemented!\n"); }
+
+ void recvFunctional(PacketPtr pkt)
+ { panic("DMASequencer::MemSlavePort::recvFunctional() not implemented!\n"); }
+
+ AddrRangeList getAddrRanges() const
+ { AddrRangeList ranges; return ranges; }
+
+ private:
+ bool isPhysMemAddress(Addr addr) const;
+ };
+
+ BaseSlavePort &getSlavePort(const std::string &if_name,
+ PortID idx = InvalidPortID);
+
/* external interface */
RequestStatus makeRequest(PacketPtr pkt);
bool busy() { return m_is_busy;}
@@ -61,6 +100,12 @@ class DMASequencer : public RubyPort
bool isDeadlockEventScheduled() const { return false; }
void descheduleDeadlockEvent() {}
+ // Called by the controller to give the sequencer a pointer.
+ // A pointer to the controller is needed for atomic support.
+ void setController(AbstractController* _cntrl) { m_controller = _cntrl; }
+ uint32_t getId() { return m_version; }
+ unsigned int drain(DrainManager *dm);
+
/* SLICC callback */
void dataCallback(const DataBlock & dblk);
void ackCallback();
@@ -69,8 +114,34 @@ class DMASequencer : public RubyPort
private:
void issueNext();
+ void ruby_hit_callback(PacketPtr pkt);
+ void testDrainComplete();
+
+ /**
+ * Called by the PIO port when receiving a timing response.
+ *
+ * @param pkt Response packet
+ * @param master_port_id Port id of the PIO port
+ *
+ * @return Whether successfully sent
+ */
+ bool recvTimingResp(PacketPtr pkt, PortID master_port_id);
+ unsigned int getChildDrainCount(DrainManager *dm);
private:
+ uint32_t m_version;
+ AbstractController* m_controller;
+ MessageBuffer* m_mandatory_q_ptr;
+ bool m_usingRubyTester;
+
+ MemSlavePort slave_port;
+
+ DrainManager *drainManager;
+ System* system;
+
+ bool retry;
+ bool access_phys_mem;
+
bool m_is_busy;
uint64_t m_data_block_mask;
DMARequest active_request;
diff --git a/src/mem/ruby/system/Sequencer.py b/src/mem/ruby/system/Sequencer.py
index 8ebd63dee..8bad83db5 100644
--- a/src/mem/ruby/system/Sequencer.py
+++ b/src/mem/ruby/system/Sequencer.py
@@ -52,7 +52,6 @@ class RubyPort(MemObject):
support_data_reqs = Param.Bool(True, "data cache requests supported")
support_inst_reqs = Param.Bool(True, "inst cache requests supported")
-
class RubyPortProxy(RubyPort):
type = 'RubyPortProxy'
cxx_header = "mem/ruby/system/RubyPortProxy.hh"
@@ -71,7 +70,15 @@ class RubySequencer(RubyPort):
"max outstanding cycles for a request before deadlock/livelock declared")
using_network_tester = Param.Bool(False, "")
-class DMASequencer(RubyPort):
+class DMASequencer(MemObject):
type = 'DMASequencer'
cxx_header = "mem/ruby/system/DMASequencer.hh"
- access_phys_mem = True
+ version = Param.Int(0, "")
+
+ slave = SlavePort("Device slave port")
+
+ using_ruby_tester = Param.Bool(False, "")
+ access_phys_mem = Param.Bool(True,
+ "should the dma atomically update phys_mem")
+ ruby_system = Param.RubySystem(Parent.any, "")
+ system = Param.System(Parent.any, "system object")