summaryrefslogtreecommitdiff
path: root/src/mem
diff options
context:
space:
mode:
authorAndreas Hansson <andreas.hansson@arm.com>2012-03-22 06:36:27 -0400
committerAndreas Hansson <andreas.hansson@arm.com>2012-03-22 06:36:27 -0400
commitc2d2ea99e3efe13bc50d410e2eeae9dd6757e57f (patch)
tree5836cc125091b436dee3fbc32ef26e1eeed49a6c /src/mem
parentfb395b56dd2432b862c550bad7b4bbe1f205ec59 (diff)
downloadgem5-c2d2ea99e3efe13bc50d410e2eeae9dd6757e57f.tar.xz
MEM: Split SimpleTimingPort into PacketQueue and ports
This patch decouples the queueing and the port interactions to simplify the introduction of the master and slave ports. By separating the queueing functionality from the port itself, it becomes much easier to distinguish between master and slave ports, and still retain the queueing ability for both (without code duplication). As part of the split into a PacketQueue and a port, there is now also a hierarchy of two port classes, QueuedPort and SimpleTimingPort. The QueuedPort is useful for ports that want to leave the packet transmission of outgoing packets to the queue and is used by both master and slave ports. The SimpleTimingPort inherits from the QueuedPort and adds the implemention of recvTiming and recvFunctional through recvAtomic. The PioPort and MessagePort are cleaned up as part of the changes. --HG-- rename : src/mem/tport.cc => src/mem/packet_queue.cc rename : src/mem/tport.hh => src/mem/packet_queue.hh
Diffstat (limited to 'src/mem')
-rw-r--r--src/mem/SConscript2
-rw-r--r--src/mem/cache/base.cc11
-rw-r--r--src/mem/cache/base.hh35
-rw-r--r--src/mem/cache/cache.hh36
-rw-r--r--src/mem/cache/cache_impl.hh13
-rw-r--r--src/mem/mport.hh15
-rw-r--r--src/mem/packet_queue.cc205
-rw-r--r--src/mem/packet_queue.hh218
-rw-r--r--src/mem/physical.cc2
-rw-r--r--src/mem/qport.hh101
-rw-r--r--src/mem/ruby/system/RubyPort.cc15
-rw-r--r--src/mem/ruby/system/RubyPort.hh10
-rw-r--r--src/mem/tport.cc180
-rw-r--r--src/mem/tport.hh160
14 files changed, 643 insertions, 360 deletions
diff --git a/src/mem/SConscript b/src/mem/SConscript
index fe43f71be..cc5e3a37a 100644
--- a/src/mem/SConscript
+++ b/src/mem/SConscript
@@ -40,6 +40,7 @@ Source('mem_object.cc')
Source('mport.cc')
Source('packet.cc')
Source('port.cc')
+Source('packet_queue.cc')
Source('tport.cc')
Source('port_proxy.cc')
Source('fs_translating_port_proxy.cc')
@@ -57,6 +58,7 @@ DebugFlag('BusBridge')
DebugFlag('LLSC')
DebugFlag('MMU')
DebugFlag('MemoryAccess')
+DebugFlag('PacketQueue')
DebugFlag('ProtocolTrace')
DebugFlag('RubyCache')
diff --git a/src/mem/cache/base.cc b/src/mem/cache/base.cc
index fb2757616..a2cb59a76 100644
--- a/src/mem/cache/base.cc
+++ b/src/mem/cache/base.cc
@@ -54,18 +54,11 @@
using namespace std;
-BaseCache::CacheMasterPort::CacheMasterPort(const std::string &_name,
- BaseCache *_cache,
- const std::string &_label)
- : SimpleTimingPort(_name, _cache, _label)
-{
-}
-
BaseCache::CacheSlavePort::CacheSlavePort(const std::string &_name,
BaseCache *_cache,
const std::string &_label)
- : SimpleTimingPort(_name, _cache, _label), blocked(false),
- mustSendRetry(false), sendRetryEvent(this)
+ : QueuedPort(_name, _cache, queue), queue(*_cache, *this, _label),
+ blocked(false), mustSendRetry(false), sendRetryEvent(this)
{
}
diff --git a/src/mem/cache/base.hh b/src/mem/cache/base.hh
index 2a79fb354..c13d27d42 100644
--- a/src/mem/cache/base.hh
+++ b/src/mem/cache/base.hh
@@ -64,8 +64,8 @@
#include "mem/cache/mshr_queue.hh"
#include "mem/mem_object.hh"
#include "mem/packet.hh"
+#include "mem/qport.hh"
#include "mem/request.hh"
-#include "mem/tport.hh"
#include "params/BaseCache.hh"
#include "sim/eventq.hh"
#include "sim/full_system.hh"
@@ -118,7 +118,7 @@ class BaseCache : public MemObject
* and the sendDeferredPacket of the timing port is modified to
* consider both the transmit list and the requests from the MSHR.
*/
- class CacheMasterPort : public SimpleTimingPort
+ class CacheMasterPort : public QueuedPort
{
public:
@@ -131,22 +131,31 @@ class BaseCache : public MemObject
void requestBus(RequestCause cause, Tick time)
{
DPRINTF(CachePort, "Asserting bus request for cause %d\n", cause);
- schedSendEvent(time);
+ queue.schedSendEvent(time);
}
+ /**
+ * Schedule the transmissions of a response packet at a given
+ * point in time.
+ *
+ * @param pkt response packet
+ * @param when time to send the response
+ */
void respond(PacketPtr pkt, Tick time) {
- schedSendTiming(pkt, time);
+ queue.schedSendTiming(pkt, time);
}
protected:
CacheMasterPort(const std::string &_name, BaseCache *_cache,
- const std::string &_label);
+ PacketQueue &_queue) :
+ QueuedPort(_name, _cache, _queue)
+ { }
/**
* Memory-side port always snoops.
*
- * return always true
+ * @return always true
*/
virtual bool isSnooping() { return true; }
};
@@ -159,7 +168,7 @@ class BaseCache : public MemObject
* incoming requests. If blocked, the port will issue a retry once
* unblocked.
*/
- class CacheSlavePort : public SimpleTimingPort
+ class CacheSlavePort : public QueuedPort
{
public:
@@ -170,8 +179,15 @@ class BaseCache : public MemObject
/** Return to normal operation and accept new requests. */
void clearBlocked();
+ /**
+ * Schedule the transmissions of a response packet at a given
+ * point in time.
+ *
+ * @param pkt response packet
+ * @param when time to send the response
+ */
void respond(PacketPtr pkt, Tick time) {
- schedSendTiming(pkt, time);
+ queue.schedSendTiming(pkt, time);
}
protected:
@@ -179,6 +195,9 @@ class BaseCache : public MemObject
CacheSlavePort(const std::string &_name, BaseCache *_cache,
const std::string &_label);
+ /** A normal packet queue used to store responses. */
+ PacketQueue queue;
+
bool blocked;
bool mustSendRetry;
diff --git a/src/mem/cache/cache.hh b/src/mem/cache/cache.hh
index 288395584..782749aab 100644
--- a/src/mem/cache/cache.hh
+++ b/src/mem/cache/cache.hh
@@ -109,6 +109,34 @@ class Cache : public BaseCache
};
/**
+ * Override the default behaviour of sendDeferredPacket to enable
+ * the memory-side cache port to also send requests based on the
+ * current MSHR status. This queue has a pointer to our specific
+ * cache implementation and is used by the MemSidePort.
+ */
+ class MemSidePacketQueue : public PacketQueue
+ {
+
+ protected:
+
+ Cache<TagStore> &cache;
+
+ public:
+
+ MemSidePacketQueue(Cache<TagStore> &cache, Port &port,
+ const std::string &label) :
+ PacketQueue(cache, port, label), cache(cache) { }
+
+ /**
+ * Override the normal sendDeferredPacket and do not only
+ * consider the transmit list (used for responses), but also
+ * requests.
+ */
+ virtual void sendDeferredPacket();
+
+ };
+
+ /**
* The memory-side port extends the base cache master port with
* access functions for functional, atomic and timing snoops.
*/
@@ -116,6 +144,9 @@ class Cache : public BaseCache
{
private:
+ /** The cache-specific queue. */
+ MemSidePacketQueue _queue;
+
// a pointer to our specific cache implementation
Cache<TagStore> *cache;
@@ -134,11 +165,6 @@ class Cache : public BaseCache
MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
const std::string &_label);
-
- /**
- * Overload sendDeferredPacket of SimpleTimingPort.
- */
- virtual void sendDeferredPacket();
};
/** Tag and data Storage */
diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh
index f6efc3fb8..2463071de 100644
--- a/src/mem/cache/cache_impl.hh
+++ b/src/mem/cache/cache_impl.hh
@@ -1646,7 +1646,7 @@ Cache<TagStore>::MemSidePort::recvFunctional(PacketPtr pkt)
template<class TagStore>
void
-Cache<TagStore>::MemSidePort::sendDeferredPacket()
+Cache<TagStore>::MemSidePacketQueue::sendDeferredPacket()
{
// if we have a response packet waiting we have to start with that
if (deferredPacketReady()) {
@@ -1654,7 +1654,7 @@ Cache<TagStore>::MemSidePort::sendDeferredPacket()
trySendTiming();
} else {
// check for request packets (requests & writebacks)
- PacketPtr pkt = cache->getTimingPacket();
+ PacketPtr pkt = cache.getTimingPacket();
if (pkt == NULL) {
// can happen if e.g. we attempt a writeback and fail, but
// before the retry, the writeback is eliminated because
@@ -1663,7 +1663,7 @@ Cache<TagStore>::MemSidePort::sendDeferredPacket()
} else {
MSHR *mshr = dynamic_cast<MSHR*>(pkt->senderState);
- waitingOnRetry = !sendTiming(pkt);
+ waitingOnRetry = !port.sendTiming(pkt);
if (waitingOnRetry) {
DPRINTF(CachePort, "now waiting on a retry\n");
@@ -1679,7 +1679,7 @@ Cache<TagStore>::MemSidePort::sendDeferredPacket()
// care about this packet and might override it before
// it gets retried
} else {
- cache->markInService(mshr, pkt);
+ cache.markInService(mshr, pkt);
}
}
}
@@ -1688,7 +1688,7 @@ Cache<TagStore>::MemSidePort::sendDeferredPacket()
// next send, not only looking at the response transmit list, but
// also considering when the next MSHR is ready
if (!waitingOnRetry) {
- scheduleSend(cache->nextMSHRReadyTime());
+ scheduleSend(cache.nextMSHRReadyTime());
}
}
@@ -1696,6 +1696,7 @@ template<class TagStore>
Cache<TagStore>::
MemSidePort::MemSidePort(const std::string &_name, Cache<TagStore> *_cache,
const std::string &_label)
- : BaseCache::CacheMasterPort(_name, _cache, _label), cache(_cache)
+ : BaseCache::CacheMasterPort(_name, _cache, _queue),
+ _queue(*_cache, *this, _label), cache(_cache)
{
}
diff --git a/src/mem/mport.hh b/src/mem/mport.hh
index 062dcca0b..7f167c227 100644
--- a/src/mem/mport.hh
+++ b/src/mem/mport.hh
@@ -31,6 +31,7 @@
#ifndef __MEM_MPORT_HH__
#define __MEM_MPORT_HH__
+#include "mem/mem_object.hh"
#include "mem/tport.hh"
/*
@@ -40,27 +41,21 @@
* the underpinnings of SimpleTimingPort, but it tweaks some of the external
* functions.
*/
-
class MessagePort : public SimpleTimingPort
{
+
public:
- MessagePort(std::string pname, MemObject *_owner = NULL) :
- SimpleTimingPort(pname, _owner)
+ MessagePort(const std::string &name, MemObject *owner) :
+ SimpleTimingPort(name, owner)
{}
virtual ~MessagePort()
{}
- void
- recvFunctional(PacketPtr pkt)
- {
- recvAtomic(pkt);
- }
+ protected:
Tick recvAtomic(PacketPtr pkt);
- protected:
-
virtual Tick recvMessage(PacketPtr pkt) = 0;
// Accept and ignore responses.
diff --git a/src/mem/packet_queue.cc b/src/mem/packet_queue.cc
new file mode 100644
index 000000000..29914bea2
--- /dev/null
+++ b/src/mem/packet_queue.cc
@@ -0,0 +1,205 @@
+/*
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2006 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ali Saidi
+ * Andreas Hansson
+ */
+
+#include "debug/PacketQueue.hh"
+#include "mem/packet_queue.hh"
+
+using namespace std;
+
+PacketQueue::PacketQueue(EventManager& _em, Port& _port,
+ const std::string _label)
+ : em(_em), label(_label), sendEvent(this), drainEvent(NULL), port(_port),
+ waitingOnRetry(false)
+{
+}
+
+PacketQueue::~PacketQueue()
+{
+}
+
+void
+PacketQueue::retry()
+{
+ DPRINTF(PacketQueue, "Queue %s received retry\n", name());
+ assert(waitingOnRetry);
+ sendDeferredPacket();
+}
+
+bool
+PacketQueue::checkFunctional(PacketPtr pkt)
+{
+ pkt->pushLabel(label);
+
+ DeferredPacketIterator i = transmitList.begin();
+ DeferredPacketIterator end = transmitList.end();
+ bool found = false;
+
+ while (!found && i != end) {
+ // If the buffered packet contains data, and it overlaps the
+ // current packet, then update data
+ found = pkt->checkFunctional(i->pkt);
+ ++i;
+ }
+
+ pkt->popLabel();
+
+ return found;
+}
+
+void
+PacketQueue::schedSendEvent(Tick when)
+{
+ // if we are waiting on a retry, do not schedule a send event, and
+ // instead rely on retry being called
+ if (waitingOnRetry) {
+ assert(!sendEvent.scheduled());
+ return;
+ }
+
+ if (!sendEvent.scheduled()) {
+ em.schedule(&sendEvent, when);
+ } else if (sendEvent.when() > when) {
+ em.reschedule(&sendEvent, when);
+ }
+}
+
+void
+PacketQueue::schedSendTiming(PacketPtr pkt, Tick when)
+{
+ assert(when > curTick());
+
+ // nothing on the list, or earlier than current front element,
+ // schedule an event
+ if (transmitList.empty() || when < transmitList.front().tick) {
+ // note that currently we ignore a potentially outstanding retry
+ // and could in theory put a new packet at the head of the
+ // transmit list before retrying the existing packet
+ transmitList.push_front(DeferredPacket(when, pkt));
+ schedSendEvent(when);
+ return;
+ }
+
+ // list is non-empty and this belongs at the end
+ if (when >= transmitList.back().tick) {
+ transmitList.push_back(DeferredPacket(when, pkt));
+ return;
+ }
+
+ // this belongs in the middle somewhere, insertion sort
+ DeferredPacketIterator i = transmitList.begin();
+ ++i; // already checked for insertion at front
+ while (i != transmitList.end() && when >= i->tick)
+ ++i;
+ transmitList.insert(i, DeferredPacket(when, pkt));
+}
+
+void PacketQueue::trySendTiming()
+{
+ assert(deferredPacketReady());
+
+ // take the next packet off the list here, as we might return to
+ // ourselves through the sendTiming call below
+ DeferredPacket dp = transmitList.front();
+ transmitList.pop_front();
+
+ // attempt to send the packet and remember the outcome
+ waitingOnRetry = !port.sendTiming(dp.pkt);
+
+ if (waitingOnRetry) {
+ // put the packet back at the front of the list (packet should
+ // not have changed since it wasn't accepted)
+ assert(!sendEvent.scheduled());
+ transmitList.push_front(dp);
+ }
+}
+
+void
+PacketQueue::scheduleSend(Tick time)
+{
+ // the next ready time is either determined by the next deferred packet,
+ // or in the cache through the MSHR ready time
+ Tick nextReady = std::min(deferredPacketReadyTime(), time);
+
+ if (nextReady != MaxTick) {
+ // if the sendTiming caused someone else to call our
+ // recvTiming we could already have an event scheduled, check
+ if (!sendEvent.scheduled())
+ em.schedule(&sendEvent, std::max(nextReady, curTick() + 1));
+ } else {
+ // no more to send, so if we're draining, we may be done
+ if (drainEvent && !sendEvent.scheduled()) {
+ drainEvent->process();
+ drainEvent = NULL;
+ }
+ }
+}
+
+void
+PacketQueue::sendDeferredPacket()
+{
+ // try to send what is on the list, this will set waitingOnRetry
+ // accordingly
+ trySendTiming();
+
+ // if we succeeded and are not waiting for a retry, schedule the
+ // next send
+ if (!waitingOnRetry) {
+ scheduleSend();
+ }
+}
+
+void
+PacketQueue::processSendEvent()
+{
+ assert(!waitingOnRetry);
+ sendDeferredPacket();
+}
+
+unsigned int
+PacketQueue::drain(Event *de)
+{
+ if (transmitList.empty() && !sendEvent.scheduled())
+ return 0;
+ drainEvent = de;
+ return 1;
+}
diff --git a/src/mem/packet_queue.hh b/src/mem/packet_queue.hh
new file mode 100644
index 000000000..ac868802b
--- /dev/null
+++ b/src/mem/packet_queue.hh
@@ -0,0 +1,218 @@
+/*
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Copyright (c) 2006 The Regents of The University of Michigan
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Ali Saidi
+ * Andreas Hansson
+ */
+
+#ifndef __MEM_PACKET_QUEUE_HH__
+#define __MEM_PACKET_QUEUE_HH__
+
+/**
+ * @file
+ * Declaration of a simple PacketQueue that is associated with
+ * a port on which it attempts to send packets according to the time
+ * stamp given to them at insertion. The packet queue is responsible
+ * for the flow control of the port, but relies on the module
+ * notifying the queue when a transfer ends.
+ */
+
+#include <list>
+
+#include "mem/port.hh"
+#include "sim/eventq.hh"
+
+/**
+ * A packet queue is a class that holds deferred packets and later
+ * sends them using the associated slave port or master port.
+ */
+class PacketQueue
+{
+ private:
+ /** A deferred packet, buffered to transmit later. */
+ class DeferredPacket {
+ public:
+ Tick tick; ///< The tick when the packet is ready to transmit
+ PacketPtr pkt; ///< Pointer to the packet to transmit
+ DeferredPacket(Tick t, PacketPtr p)
+ : tick(t), pkt(p)
+ {}
+ };
+
+ typedef std::list<DeferredPacket> DeferredPacketList;
+ typedef std::list<DeferredPacket>::iterator DeferredPacketIterator;
+
+ /** A list of outgoing timing response packets that haven't been
+ * serviced yet. */
+ DeferredPacketList transmitList;
+
+ /** The manager which is used for the event queue */
+ EventManager& em;
+
+ /** Label to use for print request packets label stack. */
+ const std::string label;
+
+ /** This function attempts to send deferred packets. Scheduled to
+ * be called in the future via SendEvent. */
+ void processSendEvent();
+
+ /**
+ * Event used to call processSendEvent.
+ **/
+ EventWrapper<PacketQueue, &PacketQueue::processSendEvent> sendEvent;
+
+ /** If we need to drain, keep the drain event around until we're done
+ * here.*/
+ Event *drainEvent;
+
+ protected:
+
+ /** The port used to send the packets. */
+ Port& port;
+
+ /** Remember whether we're awaiting a retry from the bus. */
+ bool waitingOnRetry;
+
+ /** Check whether we have a packet ready to go on the transmit list. */
+ bool deferredPacketReady()
+ { return !transmitList.empty() && transmitList.front().tick <= curTick(); }
+
+ Tick deferredPacketReadyTime()
+ { return transmitList.empty() ? MaxTick : transmitList.front().tick; }
+
+ /**
+ * Attempt to send the packet at the head of the transmit
+ * list. Caller must guarantee that the list is non-empty and that
+ * the head packet is scheduled for curTick() (or earlier). Note
+ * that a subclass of the PacketQueue can override this method and
+ * thus change the behaviour (as done by the cache).
+ */
+ virtual void sendDeferredPacket();
+
+ /**
+ * Attempt to send the packet at the front of the transmit list,
+ * and set waitingOnRetry accordingly. The packet is temporarily
+ * taken off the list, but put back at the front if not
+ * successfully sent.
+ */
+ void trySendTiming();
+
+ /**
+ * Based on the transmit list, or the provided time, schedule a
+ * send event if there are packets to send. If we are idle and
+ * asked to drain then do so.
+ *
+ * @param time an alternative time for the next send event
+ */
+ void scheduleSend(Tick time = MaxTick);
+
+ /**
+ * Simple ports are generally used as slave ports (i.e. the
+ * respond to requests) and thus do not expect to receive any
+ * range changes (as the neighbouring port has a master role and
+ * do not have any address ranges. A subclass can override the
+ * default behaviuor if needed.
+ */
+ virtual void recvRangeChange() { }
+
+ public:
+
+ /**
+ * Create a packet queue, linked to an event manager, a port used
+ * to send the packets, and potentially give it a label that will
+ * be used for functional print request packets.
+ *
+ * @param _em Event manager used for scheduling this queue
+ * @param _port Port used to send the packets
+ * @param _label Label to push on the label stack for print request packets
+ */
+ PacketQueue(EventManager& _em, Port& _port,
+ const std::string _label = "PacketQueue");
+
+ /**
+ * Virtual desctructor since the class may be used as a base class.
+ */
+ virtual ~PacketQueue();
+
+ /**
+ * Provide a name to simplify debugging. Base it on the port.
+ *
+ * @return A complete name, appended to module and port
+ */
+ const std::string name() const { return port.name() + "-queue"; }
+
+ /** Check the list of buffered packets against the supplied
+ * functional request. */
+ bool checkFunctional(PacketPtr pkt);
+
+ /**
+ * Schedule a send even if not already waiting for a retry. If the
+ * requested time is before an already scheduled send event it
+ * will be rescheduled.
+ *
+ * @param when
+ */
+ void schedSendEvent(Tick when);
+
+ /**
+ * Add a packet to the transmit list, and ensure that a
+ * processSendEvent is called in the future.
+ *
+ * @param pkt Packet to send
+ * @param when Absolute time (in ticks) to send packet
+ */
+ void schedSendTiming(PacketPtr pkt, Tick when);
+
+ /**
+ * Used by a port to notify the queue that a retry was received
+ * and that the queue can proceed and retry sending the packet
+ * that caused the wait.
+ */
+ void retry();
+
+ /**
+ * Hook for draining the packet queue.
+ *
+ * @param de An event which is used to signal back to the caller
+ * @return A number indicating how many times process will be called
+ */
+ unsigned int drain(Event *de);
+};
+
+#endif // __MEM_TPORT_HH__
diff --git a/src/mem/physical.cc b/src/mem/physical.cc
index 999ad0cdb..f11fbb947 100644
--- a/src/mem/physical.cc
+++ b/src/mem/physical.cc
@@ -496,7 +496,7 @@ PhysicalMemory::MemoryPort::recvFunctional(PacketPtr pkt)
{
pkt->pushLabel(memory->name());
- if (!checkFunctional(pkt)) {
+ if (!queue.checkFunctional(pkt)) {
// Default implementation of SimpleTimingPort::recvFunctional()
// calls recvAtomic() and throws away the latency; we can save a
// little here by just not calculating the latency.
diff --git a/src/mem/qport.hh b/src/mem/qport.hh
new file mode 100644
index 000000000..39612d22f
--- /dev/null
+++ b/src/mem/qport.hh
@@ -0,0 +1,101 @@
+/*
+ * Copyright (c) 2012 ARM Limited
+ * All rights reserved.
+ *
+ * The license below extends only to copyright in the software and shall
+ * not be construed as granting a license to any other intellectual
+ * property including but not limited to intellectual property relating
+ * to a hardware implementation of the functionality of the software
+ * licensed hereunder. You may use the software subject to the license
+ * terms below provided that you ensure that this notice is replicated
+ * unmodified and in its entirety in all distributions of the software,
+ * modified or unmodified, in source code or in binary form.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met: redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer;
+ * redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution;
+ * neither the name of the copyright holders nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Authors: Andreas Hansson
+ */
+
+#ifndef __MEM_QPORT_HH__
+#define __MEM_QPORT_HH__
+
+/**
+ * @file
+ * Declaration of the queued port.
+ */
+
+#include "mem/packet_queue.hh"
+#include "mem/port.hh"
+
+/**
+ * A queued port is a port that has an infinite queue for outgoing
+ * packets and thus decouples the module that wants to send
+ * request/responses from the flow control (retry mechanism) of the
+ * port. A queued port can be used by both a master and a slave. The
+ * queue is a parameter to allow tailoring of the queue implementation
+ * (used in the cache).
+ */
+class QueuedPort : public Port
+{
+
+ protected:
+
+ /** Packet queue used to store outgoing requests and responses. */
+ PacketQueue &queue;
+
+ /** This function is notification that the device should attempt to send a
+ * packet again. */
+ virtual void recvRetry() { queue.retry(); }
+
+ virtual void recvRangeChange() { }
+
+ public:
+
+ /**
+ * Create a QueuedPort with a given name, owner, and a supplied
+ * implementation of a packet queue. The external definition of
+ * the queue enables e.g. the cache to implement a specific queue
+ * behaviuor in a subclass, and provide the latter to the
+ * QueuePort constructor.
+ */
+ QueuedPort(const std::string& name, MemObject* owner, PacketQueue &queue) :
+ Port(name, owner), queue(queue)
+ { }
+
+ virtual ~QueuedPort() { }
+
+ /** Check the list of buffered packets against the supplied
+ * functional request. */
+ bool checkFunctional(PacketPtr pkt) { return queue.checkFunctional(pkt); }
+
+ /**
+ * Hook for draining the queued port.
+ *
+ * @param de an event which is used to signal back to the caller
+ * @returns a number indicating how many times process will be called
+ */
+ unsigned int drain(Event *de) { return queue.drain(de); }
+};
+
+#endif // __MEM_QPORT_HH__
diff --git a/src/mem/ruby/system/RubyPort.cc b/src/mem/ruby/system/RubyPort.cc
index aff129b50..aca6604c6 100644
--- a/src/mem/ruby/system/RubyPort.cc
+++ b/src/mem/ruby/system/RubyPort.cc
@@ -98,21 +98,18 @@ RubyPort::getPort(const std::string &if_name, int idx)
RubyPort::PioPort::PioPort(const std::string &_name,
RubyPort *_port)
- : SimpleTimingPort(_name, _port)
+ : QueuedPort(_name, _port, queue), queue(*_port, *this), ruby_port(_port)
{
DPRINTF(RubyPort, "creating port to ruby sequencer to cpu %s\n", _name);
- ruby_port = _port;
}
RubyPort::M5Port::M5Port(const std::string &_name, RubyPort *_port,
RubySystem *_system, bool _access_phys_mem)
- : SimpleTimingPort(_name, _port)
+ : QueuedPort(_name, _port, queue), queue(*_port, *this),
+ ruby_port(_port), ruby_system(_system),
+ _onRetryList(false), access_phys_mem(_access_phys_mem)
{
DPRINTF(RubyPort, "creating port from ruby sequcner to cpu %s\n", _name);
- ruby_port = _port;
- ruby_system = _system;
- _onRetryList = false;
- access_phys_mem = _access_phys_mem;
}
Tick
@@ -648,7 +645,7 @@ bool
RubyPort::M5Port::sendNextCycle(PacketPtr pkt)
{
//minimum latency, must be > 0
- schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
+ queue.schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
return true;
}
@@ -656,7 +653,7 @@ bool
RubyPort::PioPort::sendNextCycle(PacketPtr pkt)
{
//minimum latency, must be > 0
- schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
+ queue.schedSendTiming(pkt, curTick() + (1 * g_eventQueue_ptr->getClock()));
return true;
}
diff --git a/src/mem/ruby/system/RubyPort.hh b/src/mem/ruby/system/RubyPort.hh
index 4aa132131..bef291d63 100644
--- a/src/mem/ruby/system/RubyPort.hh
+++ b/src/mem/ruby/system/RubyPort.hh
@@ -46,9 +46,11 @@ class AbstractController;
class RubyPort : public MemObject
{
public:
- class M5Port : public SimpleTimingPort
+ class M5Port : public QueuedPort
{
private:
+
+ PacketQueue queue;
RubyPort *ruby_port;
RubySystem* ruby_system;
bool _onRetryList;
@@ -81,9 +83,12 @@ class RubyPort : public MemObject
friend class M5Port;
- class PioPort : public SimpleTimingPort
+ class PioPort : public QueuedPort
{
private:
+
+ PacketQueue queue;
+
RubyPort *ruby_port;
public:
@@ -93,6 +98,7 @@ class RubyPort : public MemObject
protected:
virtual bool recvTiming(PacketPtr pkt);
virtual Tick recvAtomic(PacketPtr pkt);
+ virtual void recvFunctional(PacketPtr pkt) { }
};
friend class PioPort;
diff --git a/src/mem/tport.cc b/src/mem/tport.cc
index cbb7ed2ac..bf3d59a8f 100644
--- a/src/mem/tport.cc
+++ b/src/mem/tport.cc
@@ -41,49 +41,21 @@
* Andreas Hansson
*/
-#include "debug/Bus.hh"
#include "mem/mem_object.hh"
#include "mem/tport.hh"
-using namespace std;
-
-SimpleTimingPort::SimpleTimingPort(const string &_name, MemObject *_owner,
- const string _label)
- : Port(_name, _owner), label(_label), sendEvent(this), drainEvent(NULL),
- waitingOnRetry(false)
-{
-}
-
-SimpleTimingPort::~SimpleTimingPort()
-{
-}
-
-bool
-SimpleTimingPort::checkFunctional(PacketPtr pkt)
+SimpleTimingPort::SimpleTimingPort(const std::string& _name,
+ MemObject* _owner) :
+ QueuedPort(_name, _owner, queue), queue(*_owner, *this)
{
- pkt->pushLabel(label);
-
- DeferredPacketIterator i = transmitList.begin();
- DeferredPacketIterator end = transmitList.end();
- bool found = false;
-
- while (!found && i != end) {
- // If the buffered packet contains data, and it overlaps the
- // current packet, then update data
- found = pkt->checkFunctional(i->pkt);
- ++i;
- }
-
- pkt->popLabel();
-
- return found;
}
void
SimpleTimingPort::recvFunctional(PacketPtr pkt)
{
- if (!checkFunctional(pkt)) {
- // Just do an atomic access and throw away the returned latency
+ assert(pkt->isRequest());
+ if (!queue.checkFunctional(pkt)) {
+ // do an atomic access and throw away the returned latency
recvAtomic(pkt);
}
}
@@ -91,12 +63,6 @@ SimpleTimingPort::recvFunctional(PacketPtr pkt)
bool
SimpleTimingPort::recvTiming(PacketPtr pkt)
{
- // If the device is only a slave, it should only be sending
- // responses, which should never get nacked. There used to be
- // code to hanldle nacks here, but I'm pretty sure it didn't work
- // correctly with the drain code, so that would need to be fixed
- // if we ever added it back.
-
if (pkt->memInhibitAsserted()) {
// snooper will supply based on copy of packet
// still target's responsibility to delete packet
@@ -111,142 +77,10 @@ SimpleTimingPort::recvTiming(PacketPtr pkt)
// recvAtomic() should already have turned packet into
// atomic response
assert(pkt->isResponse());
- schedSendTiming(pkt, curTick() + latency);
+ queue.schedSendTiming(pkt, curTick() + latency);
} else {
delete pkt;
}
return true;
}
-
-void
-SimpleTimingPort::schedSendEvent(Tick when)
-{
- // if we are waiting on a retry, do not schedule a send event, and
- // instead rely on retry being called
- if (waitingOnRetry) {
- assert(!sendEvent.scheduled());
- return;
- }
-
- if (!sendEvent.scheduled()) {
- owner->schedule(&sendEvent, when);
- } else if (sendEvent.when() > when) {
- owner->reschedule(&sendEvent, when);
- }
-}
-
-void
-SimpleTimingPort::schedSendTiming(PacketPtr pkt, Tick when)
-{
- assert(when > curTick());
- assert(when < curTick() + SimClock::Int::ms);
-
- // Nothing is on the list: add it and schedule an event
- if (transmitList.empty() || when < transmitList.front().tick) {
- transmitList.push_front(DeferredPacket(when, pkt));
- schedSendEvent(when);
- return;
- }
-
- // list is non-empty & this belongs at the end
- if (when >= transmitList.back().tick) {
- transmitList.push_back(DeferredPacket(when, pkt));
- return;
- }
-
- // this belongs in the middle somewhere
- DeferredPacketIterator i = transmitList.begin();
- i++; // already checked for insertion at front
- DeferredPacketIterator end = transmitList.end();
-
- for (; i != end; ++i) {
- if (when < i->tick) {
- transmitList.insert(i, DeferredPacket(when, pkt));
- return;
- }
- }
- assert(false); // should never get here
-}
-
-void SimpleTimingPort::trySendTiming()
-{
- assert(deferredPacketReady());
- // take the next packet off the list here, as we might return to
- // ourselves through the sendTiming call below
- DeferredPacket dp = transmitList.front();
- transmitList.pop_front();
-
- // attempt to send the packet and remember the outcome
- waitingOnRetry = !sendTiming(dp.pkt);
-
- if (waitingOnRetry) {
- // put the packet back at the front of the list (packet should
- // not have changed since it wasn't accepted)
- assert(!sendEvent.scheduled());
- transmitList.push_front(dp);
- }
-}
-
-void
-SimpleTimingPort::scheduleSend(Tick time)
-{
- // the next ready time is either determined by the next deferred packet,
- // or in the cache through the MSHR ready time
- Tick nextReady = std::min(deferredPacketReadyTime(), time);
- if (nextReady != MaxTick) {
- // if the sendTiming caused someone else to call our
- // recvTiming we could already have an event scheduled, check
- if (!sendEvent.scheduled())
- owner->schedule(&sendEvent, std::max(nextReady, curTick() + 1));
- } else {
- // no more to send, so if we're draining, we may be done
- if (drainEvent && !sendEvent.scheduled()) {
- drainEvent->process();
- drainEvent = NULL;
- }
- }
-}
-
-void
-SimpleTimingPort::sendDeferredPacket()
-{
- // try to send what is on the list
- trySendTiming();
-
- // if we succeeded and are not waiting for a retry, schedule the
- // next send
- if (!waitingOnRetry) {
- scheduleSend();
- }
-}
-
-
-void
-SimpleTimingPort::recvRetry()
-{
- DPRINTF(Bus, "Received retry\n");
- // note that in the cache we get a retry even though we may not
- // have a packet to retry (we could potentially decide on a new
- // packet every time we retry)
- assert(waitingOnRetry);
- sendDeferredPacket();
-}
-
-
-void
-SimpleTimingPort::processSendEvent()
-{
- assert(!waitingOnRetry);
- sendDeferredPacket();
-}
-
-
-unsigned int
-SimpleTimingPort::drain(Event *de)
-{
- if (transmitList.empty() && !sendEvent.scheduled())
- return 0;
- drainEvent = de;
- return 1;
-}
diff --git a/src/mem/tport.hh b/src/mem/tport.hh
index d720f227c..c77166386 100644
--- a/src/mem/tport.hh
+++ b/src/mem/tport.hh
@@ -50,117 +50,20 @@
* Declaration of SimpleTimingPort.
*/
-#include <list>
-#include <string>
-
-#include "mem/port.hh"
-#include "sim/eventq.hh"
+#include "mem/qport.hh"
/**
- * A simple port for interfacing objects that basically have only
- * functional memory behavior (e.g. I/O devices) to the memory system.
- * Both timing and functional accesses are implemented in terms of
- * atomic accesses. A derived port class thus only needs to provide
- * recvAtomic() to support all memory access modes.
- *
- * The tricky part is handling recvTiming(), where the response must
- * be scheduled separately via a later call to sendTiming(). This
- * feature is handled by scheduling an internal event that calls
- * sendTiming() after a delay, and optionally rescheduling the
- * response if it is nacked.
+ * The simple timing port uses a queued port to implement
+ * recvFunctional and recvTiming through recvAtomic. It is always a
+ * slave port.
*/
-class SimpleTimingPort : public Port
+class SimpleTimingPort : public QueuedPort
{
- protected:
- /** A deferred packet, buffered to transmit later. */
- class DeferredPacket {
- public:
- Tick tick; ///< The tick when the packet is ready to transmit
- PacketPtr pkt; ///< Pointer to the packet to transmit
- DeferredPacket(Tick t, PacketPtr p)
- : tick(t), pkt(p)
- {}
- };
-
- typedef std::list<DeferredPacket> DeferredPacketList;
- typedef std::list<DeferredPacket>::iterator DeferredPacketIterator;
-
- /** A list of outgoing timing response packets that haven't been
- * serviced yet. */
- DeferredPacketList transmitList;
-
- /** Label to use for print request packets label stack. */
- const std::string label;
-
- /** This function attempts to send deferred packets. Scheduled to
- * be called in the future via SendEvent. */
- void processSendEvent();
-
- /**
- * This class is used to implemented sendTiming() with a delay. When
- * a delay is requested a the event is scheduled if it isn't already.
- * When the event time expires it attempts to send the packet.
- * If it cannot, the packet sent when recvRetry() is called.
- **/
- EventWrapper<SimpleTimingPort,
- &SimpleTimingPort::processSendEvent> sendEvent;
- /** If we need to drain, keep the drain event around until we're done
- * here.*/
- Event *drainEvent;
-
- /** Remember whether we're awaiting a retry from the bus. */
- bool waitingOnRetry;
-
- /** Check whether we have a packet ready to go on the transmit list. */
- bool deferredPacketReady()
- { return !transmitList.empty() && transmitList.front().tick <= curTick(); }
-
- Tick deferredPacketReadyTime()
- { return transmitList.empty() ? MaxTick : transmitList.front().tick; }
-
- /**
- * Schedule a send even if not already waiting for a retry. If the
- * requested time is before an already scheduled send event it
- * will be rescheduled.
- *
- * @param when
- */
- void schedSendEvent(Tick when);
-
- /** Schedule a sendTiming() event to be called in the future.
- * @param pkt packet to send
- * @param absolute time (in ticks) to send packet
- */
- void schedSendTiming(PacketPtr pkt, Tick when);
-
- /** Attempt to send the packet at the head of the deferred packet
- * list. Caller must guarantee that the deferred packet list is
- * non-empty and that the head packet is scheduled for curTick() (or
- * earlier).
- */
- virtual void sendDeferredPacket();
-
- /**
- * Attempt to send the packet at the front of the transmit list,
- * and set waitingOnRetry accordingly. The packet is temporarily
- * taken off the list, but put back at the front if not
- * successfully sent.
- */
- void trySendTiming();
-
- /**
- * Based on the transmit list, or the provided time, schedule a
- * send event if there are packets to send. If we are idle and
- * asked to drain then do so.
- *
- * @param time an alternative time for the next send event
- */
- void scheduleSend(Tick time = MaxTick);
+ protected:
- /** This function is notification that the device should attempt to send a
- * packet again. */
- virtual void recvRetry();
+ /** The packet queue used to store outgoing responses. */
+ PacketQueue queue;
/** Implemented using recvAtomic(). */
void recvFunctional(PacketPtr pkt);
@@ -168,42 +71,25 @@ class SimpleTimingPort : public Port
/** Implemented using recvAtomic(). */
bool recvTiming(PacketPtr pkt);
- /**
- * Simple ports are generally used as slave ports (i.e. the
- * respond to requests) and thus do not expect to receive any
- * range changes (as the neighbouring port has a master role and
- * do not have any address ranges. A subclass can override the
- * default behaviuor if needed.
- */
- virtual void recvRangeChange() { }
-
+ virtual Tick recvAtomic(PacketPtr pkt) = 0;
public:
- SimpleTimingPort(const std::string &_name, MemObject *_owner,
- const std::string _label = "SimpleTimingPort");
- ~SimpleTimingPort();
- /** Check the list of buffered packets against the supplied
- * functional request. */
- bool checkFunctional(PacketPtr pkt);
+ /**
+ * Create a new SimpleTimingPort that relies on a packet queue to
+ * hold responses, and implements recvTiming and recvFunctional
+ * through calls to recvAtomic. Once a request arrives, it is
+ * passed to recvAtomic, and in the case of a timing access any
+ * response is scheduled to be sent after the delay of the atomic
+ * operation.
+ *
+ * @param name port name
+ * @param owner structural owner
+ */
+ SimpleTimingPort(const std::string& name, MemObject* owner);
+
+ virtual ~SimpleTimingPort() { }
- /** Hook for draining timing accesses from the system. The
- * associated SimObject's drain() functions should be implemented
- * something like this when this class is used:
- \code
- PioDevice::drain(Event *de)
- {
- unsigned int count;
- count = SimpleTimingPort->drain(de);
- if (count)
- changeState(Draining);
- else
- changeState(Drained);
- return count;
- }
- \endcode
- */
- unsigned int drain(Event *de);
};
#endif // __MEM_TPORT_HH__