diff options
Diffstat (limited to 'src/mem')
-rw-r--r-- | src/mem/bridge.cc | 244 | ||||
-rw-r--r-- | src/mem/bridge.hh | 56 | ||||
-rw-r--r-- | src/mem/bus.cc | 62 | ||||
-rw-r--r-- | src/mem/bus.hh | 22 | ||||
-rw-r--r-- | src/mem/cache/base_cache.cc | 178 | ||||
-rw-r--r-- | src/mem/cache/base_cache.hh | 27 | ||||
-rw-r--r-- | src/mem/cache/cache_builder.cc | 6 | ||||
-rw-r--r-- | src/mem/cache/cache_impl.hh | 28 | ||||
-rw-r--r-- | src/mem/packet.cc | 2 | ||||
-rw-r--r-- | src/mem/port.hh | 6 | ||||
-rw-r--r-- | src/mem/tport.cc | 1 | ||||
-rw-r--r-- | src/mem/translating_port.cc | 17 | ||||
-rw-r--r-- | src/mem/translating_port.hh | 13 |
13 files changed, 460 insertions, 202 deletions
diff --git a/src/mem/bridge.cc b/src/mem/bridge.cc index b787f79ca..9509aea38 100644 --- a/src/mem/bridge.cc +++ b/src/mem/bridge.cc @@ -43,20 +43,25 @@ Bridge::BridgePort::BridgePort(const std::string &_name, Bridge *_bridge, BridgePort *_otherPort, - int _delay, int _queueLimit) + int _delay, int _nack_delay, int _req_limit, + int _resp_limit, bool fix_partial_write) : Port(_name), bridge(_bridge), otherPort(_otherPort), - delay(_delay), outstandingResponses(0), - queueLimit(_queueLimit), sendEvent(this) + delay(_delay), nackDelay(_nack_delay), fixPartialWrite(fix_partial_write), + outstandingResponses(0), queuedRequests(0), inRetry(false), + reqQueueLimit(_req_limit), respQueueLimit(_resp_limit), sendEvent(this) { } -Bridge::Bridge(const std::string &n, int qsa, int qsb, - Tick _delay, int write_ack) - : MemObject(n), - portA(n + "-portA", this, &portB, _delay, qsa), - portB(n + "-portB", this, &portA, _delay, qsa), - ackWrites(write_ack) +Bridge::Bridge(Params *p) + : MemObject(p->name), + portA(p->name + "-portA", this, &portB, p->delay, p->nack_delay, + p->req_size_a, p->resp_size_a, p->fix_partial_write_a), + portB(p->name + "-portB", this, &portA, p->delay, p->nack_delay, + p->req_size_b, p->resp_size_b, p->fix_partial_write_b), + ackWrites(p->write_ack), _params(p) { + if (ackWrites) + panic("No support for acknowledging writes\n"); } Port * @@ -82,35 +87,118 @@ Bridge::init() { // Make sure that both sides are connected to. if (portA.getPeer() == NULL || portB.getPeer() == NULL) - panic("Both ports of bus bridge are not connected to a bus.\n"); + fatal("Both ports of bus bridge are not connected to a bus.\n"); + + if (portA.peerBlockSize() != portB.peerBlockSize()) + fatal("Busses don't have the same block size... Not supported.\n"); +} + +bool +Bridge::BridgePort::respQueueFull() +{ + assert(outstandingResponses >= 0 && outstandingResponses <= respQueueLimit); + return outstandingResponses >= respQueueLimit; } +bool +Bridge::BridgePort::reqQueueFull() +{ + assert(queuedRequests >= 0 && queuedRequests <= reqQueueLimit); + return queuedRequests >= reqQueueLimit; +} /** Function called by the port when the bus is receiving a Timing * transaction.*/ bool Bridge::BridgePort::recvTiming(PacketPtr pkt) { - if (pkt->flags & SNOOP_COMMIT) { - DPRINTF(BusBridge, "recvTiming: src %d dest %d addr 0x%x\n", + if (!(pkt->flags & SNOOP_COMMIT)) + return true; + + + DPRINTF(BusBridge, "recvTiming: src %d dest %d addr 0x%x\n", pkt->getSrc(), pkt->getDest(), pkt->getAddr()); - return otherPort->queueForSendTiming(pkt); + DPRINTF(BusBridge, "Local queue size: %d outreq: %d outresp: %d\n", + sendQueue.size(), queuedRequests, outstandingResponses); + DPRINTF(BusBridge, "Remove queue size: %d outreq: %d outresp: %d\n", + otherPort->sendQueue.size(), otherPort->queuedRequests, + otherPort->outstandingResponses); + + if (pkt->isRequest() && otherPort->reqQueueFull() && pkt->result != + Packet::Nacked) { + DPRINTF(BusBridge, "Remote queue full, nacking\n"); + nackRequest(pkt); + return true; } - else { - // Else it's just a snoop, properly return if we are blocking - return !queueFull(); + + if (pkt->needsResponse() && pkt->result != Packet::Nacked) + if (respQueueFull()) { + DPRINTF(BusBridge, "Local queue full, no space for response, nacking\n"); + DPRINTF(BusBridge, "queue size: %d outreq: %d outstanding resp: %d\n", + sendQueue.size(), queuedRequests, outstandingResponses); + nackRequest(pkt); + return true; + } else { + DPRINTF(BusBridge, "Request Needs response, reserving space\n"); + ++outstandingResponses; + } + + otherPort->queueForSendTiming(pkt); + + return true; +} + +void +Bridge::BridgePort::nackRequest(PacketPtr pkt) +{ + // Nack the packet + pkt->result = Packet::Nacked; + pkt->setDest(pkt->getSrc()); + + //put it on the list to send + Tick readyTime = curTick + nackDelay; + PacketBuffer *buf = new PacketBuffer(pkt, readyTime, true); + + // nothing on the list, add it and we're done + if (sendQueue.empty()) { + assert(!sendEvent.scheduled()); + sendEvent.schedule(readyTime); + sendQueue.push_back(buf); + return; + } + + assert(sendEvent.scheduled() || inRetry); + + // does it go at the end? + if (readyTime >= sendQueue.back()->ready) { + sendQueue.push_back(buf); + return; + } + + // ok, somewhere in the middle, fun + std::list<PacketBuffer*>::iterator i = sendQueue.begin(); + std::list<PacketBuffer*>::iterator end = sendQueue.end(); + std::list<PacketBuffer*>::iterator begin = sendQueue.begin(); + bool done = false; + + while (i != end && !done) { + if (readyTime < (*i)->ready) { + if (i == begin) + sendEvent.reschedule(readyTime); + sendQueue.insert(i,buf); + done = true; + } + i++; } + assert(done); } -bool +void Bridge::BridgePort::queueForSendTiming(PacketPtr pkt) { - if (queueFull()) - return false; - - if (pkt->isResponse()) { + if (pkt->isResponse() || pkt->result == Packet::Nacked) { // This is a response for a request we forwarded earlier. The // corresponding PacketBuffer should be stored in the packet's // senderState field. @@ -119,12 +207,26 @@ Bridge::BridgePort::queueForSendTiming(PacketPtr pkt) // set up new packet dest & senderState based on values saved // from original request buf->fixResponse(pkt); + + // Check if this packet was expecting a response and it's a nacked + // packet, in which case we will never being seeing it + if (buf->expectResponse && pkt->result == Packet::Nacked) + --outstandingResponses; + + DPRINTF(BusBridge, "restoring sender state: %#X, from packet buffer: %#X\n", pkt->senderState, buf); DPRINTF(BusBridge, " is response, new dest %d\n", pkt->getDest()); delete buf; } + + if (pkt->isRequest() && pkt->result != Packet::Nacked) { + ++queuedRequests; + } + + + Tick readyTime = curTick + delay; PacketBuffer *buf = new PacketBuffer(pkt, readyTime); DPRINTF(BusBridge, "old sender state: %#X, new sender state: %#X\n", @@ -137,10 +239,7 @@ Bridge::BridgePort::queueForSendTiming(PacketPtr pkt) if (sendQueue.empty()) { sendEvent.schedule(readyTime); } - sendQueue.push_back(buf); - - return true; } void @@ -148,28 +247,38 @@ Bridge::BridgePort::trySend() { assert(!sendQueue.empty()); - bool was_full = queueFull(); - PacketBuffer *buf = sendQueue.front(); assert(buf->ready <= curTick); PacketPtr pkt = buf->pkt; + pkt->flags &= ~SNOOP_COMMIT; //CLear it if it was set + + // Ugly! @todo When multilevel coherence works this will be removed + if (pkt->cmd == MemCmd::WriteInvalidateReq && fixPartialWrite && + pkt->result != Packet::Nacked) { + PacketPtr funcPkt = new Packet(pkt->req, MemCmd::WriteReq, + Packet::Broadcast); + funcPkt->dataStatic(pkt->getPtr<uint8_t>()); + sendFunctional(funcPkt); + pkt->cmd = MemCmd::WriteReq; + delete funcPkt; + } + DPRINTF(BusBridge, "trySend: origSrc %d dest %d addr 0x%x\n", buf->origSrc, pkt->getDest(), pkt->getAddr()); - pkt->flags &= ~SNOOP_COMMIT; //CLear it if it was set + bool wasReq = pkt->isRequest(); + bool wasNacked = pkt->result == Packet::Nacked; + if (sendTiming(pkt)) { // send successful sendQueue.pop_front(); buf->pkt = NULL; // we no longer own packet, so it's not safe to look at it if (buf->expectResponse) { - // Must wait for response. We just need to count outstanding - // responses (in case we want to cap them); PacketBuffer - // pointer will be recovered on response. - ++outstandingResponses; + // Must wait for response DPRINTF(BusBridge, " successful: awaiting response (%d)\n", outstandingResponses); } else { @@ -178,27 +287,37 @@ Bridge::BridgePort::trySend() delete buf; } + if (!wasNacked) { + if (wasReq) + --queuedRequests; + else + --outstandingResponses; + } + // If there are more packets to send, schedule event to try again. if (!sendQueue.empty()) { buf = sendQueue.front(); + DPRINTF(BusBridge, "Scheduling next send\n"); sendEvent.schedule(std::max(buf->ready, curTick + 1)); } - // Let things start sending again - if (was_full) { - DPRINTF(BusBridge, "Queue was full, sending retry\n"); - otherPort->sendRetry(); - } - } else { DPRINTF(BusBridge, " unsuccessful\n"); + inRetry = true; } + DPRINTF(BusBridge, "trySend: queue size: %d outreq: %d outstanding resp: %d\n", + sendQueue.size(), queuedRequests, outstandingResponses); } void Bridge::BridgePort::recvRetry() { - trySend(); + inRetry = false; + Tick nextReady = sendQueue.front()->ready; + if (nextReady <= curTick) + trySend(); + else + sendEvent.schedule(nextReady); } /** Function called by the port when the bus is receiving a Atomic @@ -206,7 +325,18 @@ Bridge::BridgePort::recvRetry() Tick Bridge::BridgePort::recvAtomic(PacketPtr pkt) { - return otherPort->sendAtomic(pkt) + delay; + // fix partial atomic writes... similar to the timing code that does the + // same... will be removed once our code gets this right + if (pkt->cmd == MemCmd::WriteInvalidateReq && fixPartialWrite) { + + PacketPtr funcPkt = new Packet(pkt->req, MemCmd::WriteReq, + Packet::Broadcast); + funcPkt->dataStatic(pkt->getPtr<uint8_t>()); + otherPort->sendFunctional(funcPkt); + delete funcPkt; + pkt->cmd = MemCmd::WriteReq; + } + return delay + otherPort->sendAtomic(pkt); } /** Function called by the port when the bus is receiving a Functional @@ -244,26 +374,48 @@ Bridge::BridgePort::getDeviceAddressRanges(AddrRangeList &resp, BEGIN_DECLARE_SIM_OBJECT_PARAMS(Bridge) - Param<int> queue_size_a; - Param<int> queue_size_b; + Param<int> req_size_a; + Param<int> req_size_b; + Param<int> resp_size_a; + Param<int> resp_size_b; Param<Tick> delay; + Param<Tick> nack_delay; Param<bool> write_ack; + Param<bool> fix_partial_write_a; + Param<bool> fix_partial_write_b; END_DECLARE_SIM_OBJECT_PARAMS(Bridge) BEGIN_INIT_SIM_OBJECT_PARAMS(Bridge) - INIT_PARAM(queue_size_a, "The size of the queue for data coming into side a"), - INIT_PARAM(queue_size_b, "The size of the queue for data coming into side b"), + INIT_PARAM(req_size_a, "The size of the queue for requests coming into side a"), + INIT_PARAM(req_size_b, "The size of the queue for requests coming into side b"), + INIT_PARAM(resp_size_a, "The size of the queue for responses coming into side a"), + INIT_PARAM(resp_size_b, "The size of the queue for responses coming into side b"), INIT_PARAM(delay, "The miminum delay to cross this bridge"), - INIT_PARAM(write_ack, "Acknowledge any writes that are received.") + INIT_PARAM(nack_delay, "The minimum delay to nack a packet"), + INIT_PARAM(write_ack, "Acknowledge any writes that are received."), + INIT_PARAM(fix_partial_write_a, "Fixup any partial block writes that are received"), + INIT_PARAM(fix_partial_write_b, "Fixup any partial block writes that are received") END_INIT_SIM_OBJECT_PARAMS(Bridge) CREATE_SIM_OBJECT(Bridge) { - return new Bridge(getInstanceName(), queue_size_a, queue_size_b, delay, - write_ack); + Bridge::Params *p = new Bridge::Params; + p->name = getInstanceName(); + p->req_size_a = req_size_a; + p->req_size_b = req_size_b; + p->resp_size_a = resp_size_a; + p->resp_size_b = resp_size_b; + p->delay = delay; + p->nack_delay = nack_delay; + p->write_ack = write_ack; + p->fix_partial_write_a = fix_partial_write_a; + p->fix_partial_write_b = fix_partial_write_b; + return new Bridge(p); } REGISTER_SIM_OBJECT("Bridge", Bridge) + + diff --git a/src/mem/bridge.hh b/src/mem/bridge.hh index f7d0d12d0..a47fe3c1e 100644 --- a/src/mem/bridge.hh +++ b/src/mem/bridge.hh @@ -66,6 +66,11 @@ class Bridge : public MemObject /** Minimum delay though this bridge. */ Tick delay; + /** Min delay to respond to a nack. */ + Tick nackDelay; + + bool fixPartialWrite; + class PacketBuffer : public Packet::SenderState { public: @@ -75,12 +80,13 @@ class Bridge : public MemObject short origSrc; bool expectResponse; - PacketBuffer(PacketPtr _pkt, Tick t) + PacketBuffer(PacketPtr _pkt, Tick t, bool nack = false) : ready(t), pkt(_pkt), origSenderState(_pkt->senderState), origSrc(_pkt->getSrc()), - expectResponse(_pkt->needsResponse()) + expectResponse(_pkt->needsResponse() && !nack) + { - if (!pkt->isResponse()) + if (!pkt->isResponse() && !nack && pkt->result != Packet::Nacked) pkt->senderState = this; } @@ -100,19 +106,29 @@ class Bridge : public MemObject std::list<PacketBuffer*> sendQueue; int outstandingResponses; + int queuedRequests; + + /** If we're waiting for a retry to happen.*/ + bool inRetry; /** Max queue size for outbound packets */ - int queueLimit; + int reqQueueLimit; + + /** Max queue size for reserved responses. */ + int respQueueLimit; /** * Is this side blocked from accepting outbound packets? */ - bool queueFull() { return (sendQueue.size() == queueLimit); } + bool respQueueFull(); + bool reqQueueFull(); - bool queueForSendTiming(PacketPtr pkt); + void queueForSendTiming(PacketPtr pkt); void finishSend(PacketBuffer *buf); + void nackRequest(PacketPtr pkt); + /** * Handle send event, scheduled when the packet at the head of * the outbound queue is ready to transmit (for timing @@ -136,11 +152,10 @@ class Bridge : public MemObject SendEvent sendEvent; public: - /** Constructor for the BusPort.*/ - BridgePort(const std::string &_name, - Bridge *_bridge, BridgePort *_otherPort, - int _delay, int _queueLimit); + BridgePort(const std::string &_name, Bridge *_bridge, + BridgePort *_otherPort, int _delay, int _nack_delay, + int _req_limit, int _resp_limit, bool fix_partial_write); protected: @@ -176,13 +191,32 @@ class Bridge : public MemObject bool ackWrites; public: + struct Params + { + std::string name; + int req_size_a; + int req_size_b; + int resp_size_a; + int resp_size_b; + Tick delay; + Tick nack_delay; + bool write_ack; + bool fix_partial_write_a; + bool fix_partial_write_b; + }; + + protected: + Params *_params; + + public: + const Params *params() const { return _params; } /** A function used to return the port associated with this bus object. */ virtual Port *getPort(const std::string &if_name, int idx = -1); virtual void init(); - Bridge(const std::string &n, int qsa, int qsb, Tick _delay, int write_ack); + Bridge(Params *p); }; #endif //__MEM_BUS_HH__ diff --git a/src/mem/bus.cc b/src/mem/bus.cc index b0636ecc2..95d4e2873 100644 --- a/src/mem/bus.cc +++ b/src/mem/bus.cc @@ -48,6 +48,7 @@ Bus::getPort(const std::string &if_name, int idx) if (defaultPort == NULL) { defaultPort = new BusPort(csprintf("%s-default",name()), this, defaultId); + cachedBlockSizeValid = false; return defaultPort; } else fatal("Default port already set\n"); @@ -68,6 +69,7 @@ Bus::getPort(const std::string &if_name, int idx) assert(maxId < std::numeric_limits<typeof(maxId)>::max()); BusPort *bp = new BusPort(csprintf("%s-p%d", name(), id), this, id); interfaces[id] = bp; + cachedBlockSizeValid = false; return bp; } @@ -169,8 +171,9 @@ bool Bus::recvTiming(PacketPtr pkt) { Port *port; - DPRINTF(Bus, "recvTiming: packet src %d dest %d addr 0x%x cmd %s\n", - pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString()); + DPRINTF(Bus, "recvTiming: packet src %d dest %d addr 0x%x cmd %s result %d\n", + pkt->getSrc(), pkt->getDest(), pkt->getAddr(), pkt->cmdString(), + pkt->result); BusPort *pktPort; if (pkt->getSrc() == defaultId) @@ -182,6 +185,7 @@ Bus::recvTiming(PacketPtr pkt) if (tickNextIdle > curTick || (retryList.size() && (!inRetry || pktPort != retryList.front()))) { addToRetryList(pktPort); + DPRINTF(Bus, "recvTiming: Bus is busy, returning false\n"); return false; } @@ -207,11 +211,12 @@ Bus::recvTiming(PacketPtr pkt) inRetry = false; } occupyBus(pkt); + DPRINTF(Bus, "recvTiming: Packet sucessfully sent\n"); return true; } } else { //Snoop didn't succeed - DPRINTF(Bus, "Adding a retry to RETRY list %d\n", + DPRINTF(Bus, "Adding1 a retry to RETRY list %d\n", pktPort->getId()); addToRetryList(pktPort); return false; @@ -239,13 +244,14 @@ Bus::recvTiming(PacketPtr pkt) } // Packet not successfully sent. Leave or put it on the retry list. - DPRINTF(Bus, "Adding a retry to RETRY list %d\n", + DPRINTF(Bus, "Adding2 a retry to RETRY list %d\n", pktPort->getId()); addToRetryList(pktPort); return false; } else { //Forwarding up from responder, just return true; + DPRINTF(Bus, "recvTiming: can we be here?\n"); return true; } } @@ -253,12 +259,12 @@ Bus::recvTiming(PacketPtr pkt) void Bus::recvRetry(int id) { - DPRINTF(Bus, "Received a retry\n"); + DPRINTF(Bus, "Received a retry from %s\n", id == -1 ? "self" : interfaces[id]->getPeer()->name()); // If there's anything waiting, and the bus isn't busy... if (retryList.size() && curTick >= tickNextIdle) { //retryingPort = retryList.front(); inRetry = true; - DPRINTF(Bus, "Sending a retry\n"); + DPRINTF(Bus, "Sending a retry to %s\n", retryList.front()->getPeer()->name()); retryList.front()->sendRetry(); // If inRetry is still true, sendTiming wasn't called if (inRetry) @@ -274,11 +280,7 @@ Bus::recvRetry(int id) //Burn a cycle for the missed grant. tickNextIdle += clock; - if (!busIdle.scheduled()) { - busIdle.schedule(tickNextIdle); - } else { - busIdle.reschedule(tickNextIdle); - } + busIdle.reschedule(tickNextIdle, true); } } //If we weren't able to drain before, we might be able to now. @@ -598,6 +600,37 @@ Bus::addressRanges(AddrRangeList &resp, AddrRangeList &snoop, int id) } } +int +Bus::findBlockSize(int id) +{ + if (cachedBlockSizeValid) + return cachedBlockSize; + + int max_bs = -1, tmp_bs; + range_map<Addr,int>::iterator portIter; + std::vector<DevMap>::iterator snoopIter; + for (portIter = portMap.begin(); portIter != portMap.end(); portIter++) { + tmp_bs = interfaces[portIter->second]->peerBlockSize(); + if (tmp_bs > max_bs) + max_bs = tmp_bs; + } + for (snoopIter = portSnoopList.begin(); + snoopIter != portSnoopList.end(); snoopIter++) { + tmp_bs = interfaces[snoopIter->portId]->peerBlockSize(); + if (tmp_bs > max_bs) + max_bs = tmp_bs; + } + if (max_bs <= 0) + max_bs = defaultBlockSize; + + if (max_bs != 64) + warn_once("Blocksize found to not be 64... hmm... probably not.\n"); + cachedBlockSize = max_bs; + cachedBlockSizeValid = true; + return max_bs; +} + + unsigned int Bus::drain(Event * de) { @@ -618,6 +651,7 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(Bus) Param<int> clock; Param<int> width; Param<bool> responder_set; + Param<int> block_size; END_DECLARE_SIM_OBJECT_PARAMS(Bus) @@ -625,12 +659,14 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(Bus) INIT_PARAM(bus_id, "a globally unique bus id"), INIT_PARAM(clock, "bus clock speed"), INIT_PARAM(width, "width of the bus (bits)"), - INIT_PARAM(responder_set, "Is a default responder set by the user") + INIT_PARAM(responder_set, "Is a default responder set by the user"), + INIT_PARAM(block_size, "Default blocksize if no device has one") END_INIT_SIM_OBJECT_PARAMS(Bus) CREATE_SIM_OBJECT(Bus) { - return new Bus(getInstanceName(), bus_id, clock, width, responder_set); + return new Bus(getInstanceName(), bus_id, clock, width, responder_set, + block_size); } REGISTER_SIM_OBJECT("Bus", Bus) diff --git a/src/mem/bus.hh b/src/mem/bus.hh index 0dd7547c5..f0dc67b12 100644 --- a/src/mem/bus.hh +++ b/src/mem/bus.hh @@ -133,6 +133,12 @@ class Bus : public MemObject /** Occupy the bus with transmitting the packet pkt */ void occupyBus(PacketPtr pkt); + /** Ask everyone on the bus what their size is + * @param id id of the busport that made the request + * @return the max of all the sizes + */ + int findBlockSize(int id); + /** Declaration of the buses port type, one will be instantiated for each of the interfaces connecting to the bus. */ class BusPort : public Port @@ -195,8 +201,11 @@ class Bus : public MemObject AddrRangeList &snoop) { bus->addressRanges(resp, snoop, id); } - // Hack to make translating port work without changes - virtual int deviceBlockSize() { return 32; } + // Ask the bus to ask everyone on the bus what their block size is and + // take the max of it. This might need to be changed a bit if we ever + // support multiple block sizes. + virtual int deviceBlockSize() + { return bus->findBlockSize(id); } }; @@ -256,6 +265,10 @@ class Bus : public MemObject /** Has the user specified their own default responder? */ bool responderSet; + int defaultBlockSize; + int cachedBlockSize; + bool cachedBlockSizeValid; + public: /** A function used to return the port associated with this bus object. */ @@ -267,11 +280,12 @@ class Bus : public MemObject unsigned int drain(Event *de); Bus(const std::string &n, int bus_id, int _clock, int _width, - bool responder_set) + bool responder_set, int dflt_blk_size) : MemObject(n), busId(bus_id), clock(_clock), width(_width), tickNextIdle(0), drainEvent(NULL), busIdle(this), inRetry(false), maxId(0), defaultPort(NULL), funcPort(NULL), funcPortId(-4), - responderSet(responder_set) + responderSet(responder_set), defaultBlockSize(dflt_blk_size), + cachedBlockSize(0), cachedBlockSizeValid(false) { //Both the width and clock period must be positive if (width <= 0) diff --git a/src/mem/cache/base_cache.cc b/src/mem/cache/base_cache.cc index ed665dafb..3ed4b84d1 100644 --- a/src/mem/cache/base_cache.cc +++ b/src/mem/cache/base_cache.cc @@ -134,8 +134,7 @@ BaseCache::CachePort::recvRetry() isCpuSide && cache->doSlaveRequest()) { DPRINTF(CachePort, "%s has more responses/requests\n", name()); - BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false); - reqCpu->schedule(curTick + 1); + new BaseCache::RequestEvent(this, curTick + 1); } waitingOnRetry = false; } @@ -178,8 +177,7 @@ BaseCache::CachePort::recvRetry() { DPRINTF(CachePort, "%s has more requests\n", name()); //Still more to issue, rerequest in 1 cycle - BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false); - reqCpu->schedule(curTick + 1); + new BaseCache::RequestEvent(this, curTick + 1); } } else @@ -196,8 +194,7 @@ BaseCache::CachePort::recvRetry() { DPRINTF(CachePort, "%s has more requests\n", name()); //Still more to issue, rerequest in 1 cycle - BaseCache::CacheEvent * reqCpu = new BaseCache::CacheEvent(this, false); - reqCpu->schedule(curTick + 1); + new BaseCache::RequestEvent(this, curTick + 1); } } if (waitingOnRetry) DPRINTF(CachePort, "%s STILL Waiting on retry\n", name()); @@ -228,102 +225,109 @@ BaseCache::CachePort::clearBlocked() } } -BaseCache::CacheEvent::CacheEvent(CachePort *_cachePort, bool _newResponse) - : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort), - newResponse(_newResponse) +BaseCache::RequestEvent::RequestEvent(CachePort *_cachePort, Tick when) + : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort) { - if (!newResponse) - this->setFlags(AutoDelete); - pkt = NULL; + this->setFlags(AutoDelete); + schedule(when); } void -BaseCache::CacheEvent::process() +BaseCache::RequestEvent::process() { - if (!newResponse) - { - if (cachePort->waitingOnRetry) return; - //We have some responses to drain first - if (!cachePort->drainList.empty()) { - DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name()); - if (cachePort->sendTiming(cachePort->drainList.front())) { - DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name()); - cachePort->drainList.pop_front(); - if (!cachePort->drainList.empty() || - !cachePort->isCpuSide && cachePort->cache->doMasterRequest() || - cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) { - - DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name()); - this->schedule(curTick + 1); - } - } - else { - cachePort->waitingOnRetry = true; - DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name()); + if (cachePort->waitingOnRetry) return; + //We have some responses to drain first + if (!cachePort->drainList.empty()) { + DPRINTF(CachePort, "%s trying to drain a response\n", cachePort->name()); + if (cachePort->sendTiming(cachePort->drainList.front())) { + DPRINTF(CachePort, "%s drains a response succesfully\n", cachePort->name()); + cachePort->drainList.pop_front(); + if (!cachePort->drainList.empty() || + !cachePort->isCpuSide && cachePort->cache->doMasterRequest() || + cachePort->isCpuSide && cachePort->cache->doSlaveRequest()) { + + DPRINTF(CachePort, "%s still has outstanding bus reqs\n", cachePort->name()); + this->schedule(curTick + 1); } } - else if (!cachePort->isCpuSide) - { //MSHR - DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name()); - if (!cachePort->cache->doMasterRequest()) { - //This can happen if I am the owner of a block and see an upgrade - //while the block was in my WB Buffers. I just remove the - //wb and de-assert the masterRequest - return; - } + else { + cachePort->waitingOnRetry = true; + DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name()); + } + } + else if (!cachePort->isCpuSide) + { //MSHR + DPRINTF(CachePort, "%s trying to send a MSHR request\n", cachePort->name()); + if (!cachePort->cache->doMasterRequest()) { + //This can happen if I am the owner of a block and see an upgrade + //while the block was in my WB Buffers. I just remove the + //wb and de-assert the masterRequest + return; + } - pkt = cachePort->cache->getPacket(); - MSHR* mshr = (MSHR*) pkt->senderState; - //Copy the packet, it may be modified/destroyed elsewhere - PacketPtr copyPkt = new Packet(*pkt); - copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>()); - mshr->pkt = copyPkt; + PacketPtr pkt = cachePort->cache->getPacket(); + MSHR* mshr = (MSHR*) pkt->senderState; + //Copy the packet, it may be modified/destroyed elsewhere + PacketPtr copyPkt = new Packet(*pkt); + copyPkt->dataStatic<uint8_t>(pkt->getPtr<uint8_t>()); + mshr->pkt = copyPkt; - bool success = cachePort->sendTiming(pkt); - DPRINTF(Cache, "Address %x was %s in sending the timing request\n", - pkt->getAddr(), success ? "succesful" : "unsuccesful"); + bool success = cachePort->sendTiming(pkt); + DPRINTF(Cache, "Address %x was %s in sending the timing request\n", + pkt->getAddr(), success ? "succesful" : "unsuccesful"); - cachePort->waitingOnRetry = !success; - if (cachePort->waitingOnRetry) { - DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name()); - } + cachePort->waitingOnRetry = !success; + if (cachePort->waitingOnRetry) { + DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name()); + } - cachePort->cache->sendResult(pkt, mshr, success); - if (success && cachePort->cache->doMasterRequest()) - { - DPRINTF(CachePort, "%s still more MSHR requests to send\n", - cachePort->name()); - //Still more to issue, rerequest in 1 cycle - pkt = NULL; - this->schedule(curTick+1); - } + cachePort->cache->sendResult(pkt, mshr, success); + if (success && cachePort->cache->doMasterRequest()) + { + DPRINTF(CachePort, "%s still more MSHR requests to send\n", + cachePort->name()); + //Still more to issue, rerequest in 1 cycle + this->schedule(curTick+1); } - else + } + else + { + //CSHR + assert(cachePort->cache->doSlaveRequest()); + PacketPtr pkt = cachePort->cache->getCoherencePacket(); + MSHR* cshr = (MSHR*) pkt->senderState; + bool success = cachePort->sendTiming(pkt); + cachePort->cache->sendCoherenceResult(pkt, cshr, success); + cachePort->waitingOnRetry = !success; + if (cachePort->waitingOnRetry) + DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name()); + if (success && cachePort->cache->doSlaveRequest()) { - //CSHR - assert(cachePort->cache->doSlaveRequest()); - pkt = cachePort->cache->getCoherencePacket(); - MSHR* cshr = (MSHR*) pkt->senderState; - bool success = cachePort->sendTiming(pkt); - cachePort->cache->sendCoherenceResult(pkt, cshr, success); - cachePort->waitingOnRetry = !success; - if (cachePort->waitingOnRetry) - DPRINTF(CachePort, "%s now waiting on a retry\n", cachePort->name()); - if (success && cachePort->cache->doSlaveRequest()) - { - DPRINTF(CachePort, "%s still more CSHR requests to send\n", - cachePort->name()); - //Still more to issue, rerequest in 1 cycle - pkt = NULL; - this->schedule(curTick+1); - } + DPRINTF(CachePort, "%s still more CSHR requests to send\n", + cachePort->name()); + //Still more to issue, rerequest in 1 cycle + this->schedule(curTick+1); } - return; } - //Else it's a response +} + +const char * +BaseCache::RequestEvent::description() +{ + return "Cache request event"; +} + +BaseCache::ResponseEvent::ResponseEvent(CachePort *_cachePort) + : Event(&mainEventQueue, CPU_Tick_Pri), cachePort(_cachePort) +{ +} + +void +BaseCache::ResponseEvent::process() +{ assert(cachePort->transmitList.size()); assert(cachePort->transmitList.front().first <= curTick); - pkt = cachePort->transmitList.front().second; + PacketPtr pkt = cachePort->transmitList.front().second; cachePort->transmitList.pop_front(); if (!cachePort->transmitList.empty()) { Tick time = cachePort->transmitList.front().first; @@ -354,9 +358,9 @@ BaseCache::CacheEvent::process() } const char * -BaseCache::CacheEvent::description() +BaseCache::ResponseEvent::description() { - return "BaseCache timing event"; + return "Cache response event"; } void diff --git a/src/mem/cache/base_cache.hh b/src/mem/cache/base_cache.hh index ee871c1c4..e45e36fa0 100644 --- a/src/mem/cache/base_cache.hh +++ b/src/mem/cache/base_cache.hh @@ -117,13 +117,20 @@ class BaseCache : public MemObject std::list<std::pair<Tick,PacketPtr> > transmitList; }; - struct CacheEvent : public Event + struct RequestEvent : public Event { CachePort *cachePort; - PacketPtr pkt; - bool newResponse; - CacheEvent(CachePort *_cachePort, bool response); + RequestEvent(CachePort *_cachePort, Tick when); + void process(); + const char *description(); + }; + + struct ResponseEvent : public Event + { + CachePort *cachePort; + + ResponseEvent(CachePort *_cachePort); void process(); const char *description(); }; @@ -132,8 +139,8 @@ class BaseCache : public MemObject CachePort *cpuSidePort; CachePort *memSidePort; - CacheEvent *sendEvent; - CacheEvent *memSendEvent; + ResponseEvent *sendEvent; + ResponseEvent *memSendEvent; private: void recvStatusChange(Port::Status status, bool isCpuSide) @@ -432,9 +439,7 @@ class BaseCache : public MemObject { if (!doMasterRequest() && !memSidePort->waitingOnRetry) { - BaseCache::CacheEvent * reqCpu = - new BaseCache::CacheEvent(memSidePort, false); - reqCpu->schedule(time); + new RequestEvent(memSidePort, time); } uint8_t flag = 1<<cause; masterRequests |= flag; @@ -469,9 +474,7 @@ class BaseCache : public MemObject { if (!doSlaveRequest() && !cpuSidePort->waitingOnRetry) { - BaseCache::CacheEvent * reqCpu = - new BaseCache::CacheEvent(cpuSidePort, false); - reqCpu->schedule(time); + new RequestEvent(cpuSidePort, time); } uint8_t flag = 1<<cause; slaveRequests |= flag; diff --git a/src/mem/cache/cache_builder.cc b/src/mem/cache/cache_builder.cc index 318b57d50..e887f711e 100644 --- a/src/mem/cache/cache_builder.cc +++ b/src/mem/cache/cache_builder.cc @@ -134,7 +134,6 @@ BEGIN_DECLARE_SIM_OBJECT_PARAMS(BaseCache) Param<bool> prefetch_cache_check_push; Param<bool> prefetch_use_cpu_id; Param<bool> prefetch_data_accesses_only; - Param<int> hit_latency; END_DECLARE_SIM_OBJECT_PARAMS(BaseCache) @@ -190,8 +189,7 @@ BEGIN_INIT_SIM_OBJECT_PARAMS(BaseCache) INIT_PARAM_DFLT(prefetch_policy, "Type of prefetcher to use", "none"), INIT_PARAM_DFLT(prefetch_cache_check_push, "Check if in cash on push or pop of prefetch queue", true), INIT_PARAM_DFLT(prefetch_use_cpu_id, "Use the CPU ID to seperate calculations of prefetches", true), - INIT_PARAM_DFLT(prefetch_data_accesses_only, "Only prefetch on data not on instruction accesses", false), - INIT_PARAM_DFLT(hit_latency, "Hit Latecny for a succesful access", 1) + INIT_PARAM_DFLT(prefetch_data_accesses_only, "Only prefetch on data not on instruction accesses", false) END_INIT_SIM_OBJECT_PARAMS(BaseCache) @@ -211,7 +209,7 @@ END_INIT_SIM_OBJECT_PARAMS(BaseCache) BUILD_NULL_PREFETCHER(TAGS); \ } \ Cache<TAGS, c>::Params params(tags, mq, coh, base_params, \ - pf, prefetch_access, hit_latency, \ + pf, prefetch_access, latency, \ true, \ store_compressed, \ adaptive_compression, \ diff --git a/src/mem/cache/cache_impl.hh b/src/mem/cache/cache_impl.hh index 0a528aa5d..9b094c1e3 100644 --- a/src/mem/cache/cache_impl.hh +++ b/src/mem/cache/cache_impl.hh @@ -1146,11 +1146,11 @@ template<class TagStore, class Coherence> Port * Cache<TagStore,Coherence>::getPort(const std::string &if_name, int idx) { - if (if_name == "") + if (if_name == "" || if_name == "cpu_side") { if (cpuSidePort == NULL) { cpuSidePort = new CpuSidePort(name() + "-cpu_side_port", this); - sendEvent = new CacheEvent(cpuSidePort, true); + sendEvent = new ResponseEvent(cpuSidePort); } return cpuSidePort; } @@ -1158,20 +1158,12 @@ Cache<TagStore,Coherence>::getPort(const std::string &if_name, int idx) { return new CpuSidePort(name() + "-cpu_side_funcport", this); } - else if (if_name == "cpu_side") - { - if (cpuSidePort == NULL) { - cpuSidePort = new CpuSidePort(name() + "-cpu_side_port", this); - sendEvent = new CacheEvent(cpuSidePort, true); - } - return cpuSidePort; - } else if (if_name == "mem_side") { if (memSidePort != NULL) panic("Already have a mem side for this cache\n"); memSidePort = new MemSidePort(name() + "-mem_side_port", this); - memSendEvent = new CacheEvent(memSidePort, true); + memSendEvent = new ResponseEvent(memSidePort); return memSidePort; } else panic("Port name %s unrecognized\n", if_name); @@ -1192,6 +1184,8 @@ template<class TagStore, class Coherence> bool Cache<TagStore,Coherence>::CpuSidePort::recvTiming(PacketPtr pkt) { + assert(pkt->result != Packet::Nacked); + if (!pkt->req->isUncacheable() && pkt->isInvalidate() && !pkt->isRead() && !pkt->isWrite()) { @@ -1249,6 +1243,12 @@ template<class TagStore, class Coherence> bool Cache<TagStore,Coherence>::MemSidePort::recvTiming(PacketPtr pkt) { + // this needs to be fixed so that the cache updates the mshr and sends the + // packet back out on the link, but it probably won't happen so until this + // gets fixed, just panic when it does + if (pkt->result == Packet::Nacked) + panic("Need to implement cache resending nacked packets!\n"); + if (pkt->isRequest() && blocked) { DPRINTF(Cache,"Scheduling a retry while blocked\n"); @@ -1282,9 +1282,9 @@ template<class TagStore, class Coherence> void Cache<TagStore,Coherence>::MemSidePort::recvFunctional(PacketPtr pkt) { - if (checkFunctional(pkt)) { - myCache()->probe(pkt, false, cache->cpuSidePort); - } + myCache()->probe(pkt, false, cache->cpuSidePort); + if (pkt->result != Packet::Success) + checkFunctional(pkt); } diff --git a/src/mem/packet.cc b/src/mem/packet.cc index 14d08db1b..2463a19ba 100644 --- a/src/mem/packet.cc +++ b/src/mem/packet.cc @@ -85,7 +85,7 @@ MemCmd::commandInfo[] = { SET5(IsWrite, IsInvalidate, IsRequest, HasData, NeedsResponse), WriteInvalidateResp, "WriteInvalidateReq" }, /* WriteInvalidateResp */ - { SET5(IsWrite, IsInvalidate, IsRequest, NeedsResponse, IsResponse), + { SET3(IsWrite, IsInvalidate, IsResponse), InvalidCmd, "WriteInvalidateResp" }, /* UpgradeReq */ { SET3(IsInvalidate, IsRequest, IsUpgrade), InvalidCmd, "UpgradeReq" }, diff --git a/src/mem/port.hh b/src/mem/port.hh index 6296b42ca..877e00293 100644 --- a/src/mem/port.hh +++ b/src/mem/port.hh @@ -161,10 +161,10 @@ class Port /** Called by a peer port in order to determine the block size of the device connected to this port. It sometimes doesn't make sense for - this function to be called, a DMA interface doesn't really have a - block size, so it is defaulted to a panic. + this function to be called, so it just returns 0. Anytthing that is + concerned with the size should just ignore that. */ - virtual int deviceBlockSize() { panic("??"); M5_DUMMY_RETURN } + virtual int deviceBlockSize() { return 0; } /** The peer port is requesting us to reply with a list of the ranges we are responsible for. diff --git a/src/mem/tport.cc b/src/mem/tport.cc index b384a0444..9a4bd7967 100644 --- a/src/mem/tport.cc +++ b/src/mem/tport.cc @@ -128,6 +128,7 @@ SimpleTimingPort::sendTiming(PacketPtr pkt, Tick time) } i++; } + assert(done); } void diff --git a/src/mem/translating_port.cc b/src/mem/translating_port.cc index d2c854086..54de6625e 100644 --- a/src/mem/translating_port.cc +++ b/src/mem/translating_port.cc @@ -34,12 +34,14 @@ #include "mem/port.hh" #include "mem/translating_port.hh" #include "mem/page_table.hh" +#include "sim/process.hh" using namespace TheISA; TranslatingPort::TranslatingPort(const std::string &_name, - PageTable *p_table, bool alloc) - : FunctionalPort(_name), pTable(p_table), allocating(alloc) + Process *p, AllocType alloc) + : FunctionalPort(_name), pTable(p->pTable), process(p), + allocating(alloc) { } TranslatingPort::~TranslatingPort() @@ -81,13 +83,18 @@ TranslatingPort::tryWriteBlob(Addr addr, uint8_t *p, int size) for (ChunkGenerator gen(addr, size, VMPageSize); !gen.done(); gen.next()) { if (!pTable->translate(gen.addr(), paddr)) { - if (allocating) { + if (allocating == Always) { pTable->allocate(roundDown(gen.addr(), VMPageSize), VMPageSize); - pTable->translate(gen.addr(), paddr); + } else if (allocating == NextPage) { + // check if we've accessed the next page on the stack + if (!process->checkAndAllocNextPage(gen.addr())) + panic("Page table fault when accessing virtual address %#x " + "during functional write\n", gen.addr()); } else { return false; } + pTable->translate(gen.addr(), paddr); } Port::writeBlob(paddr, p + prevSize, gen.size()); @@ -113,7 +120,7 @@ TranslatingPort::tryMemsetBlob(Addr addr, uint8_t val, int size) for (ChunkGenerator gen(addr, size, VMPageSize); !gen.done(); gen.next()) { if (!pTable->translate(gen.addr(), paddr)) { - if (allocating) { + if (allocating == Always) { pTable->allocate(roundDown(gen.addr(), VMPageSize), VMPageSize); pTable->translate(gen.addr(), paddr); diff --git a/src/mem/translating_port.hh b/src/mem/translating_port.hh index 7354278ba..76c7947be 100644 --- a/src/mem/translating_port.hh +++ b/src/mem/translating_port.hh @@ -35,16 +35,25 @@ #include "mem/port.hh" class PageTable; +class Process; class TranslatingPort : public FunctionalPort { + public: + enum AllocType { + Always, + Never, + NextPage + }; + private: PageTable *pTable; - bool allocating; + Process *process; + AllocType allocating; public: TranslatingPort(const std::string &_name, - PageTable *p_table, bool alloc = false); + Process *p, AllocType alloc); virtual ~TranslatingPort(); bool tryReadBlob(Addr addr, uint8_t *p, int size); |