1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
|
/*
* Copyright (c) 2012-2016 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2002-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Dave Greene
* Steve Reinhardt
* Ron Dreslinski
* Andreas Hansson
*/
/**
* @file
* Describes a cache based on template policies.
*/
#ifndef __MEM_CACHE_CACHE_HH__
#define __MEM_CACHE_CACHE_HH__
#include <unordered_set>
#include "base/misc.hh" // fatal, panic, and warn
#include "enums/Clusivity.hh"
#include "mem/cache/base.hh"
#include "mem/cache/blk.hh"
#include "mem/cache/mshr.hh"
#include "mem/cache/tags/base.hh"
#include "params/Cache.hh"
#include "sim/eventq.hh"
//Forward decleration
class BasePrefetcher;
/**
* A template-policy based cache. The behavior of the cache can be altered by
* supplying different template policies. TagStore handles all tag and data
* storage @sa TagStore, \ref gem5MemorySystem "gem5 Memory System"
*/
class Cache : public BaseCache
{
public:
/** A typedef for a list of CacheBlk pointers. */
typedef std::list<CacheBlk*> BlkList;
protected:
/**
* The CPU-side port extends the base cache slave port with access
* functions for functional, atomic and timing requests.
*/
class CpuSidePort : public CacheSlavePort
{
private:
// a pointer to our specific cache implementation
Cache *cache;
protected:
virtual bool recvTimingSnoopResp(PacketPtr pkt);
virtual bool recvTimingReq(PacketPtr pkt);
virtual Tick recvAtomic(PacketPtr pkt);
virtual void recvFunctional(PacketPtr pkt);
virtual AddrRangeList getAddrRanges() const;
public:
CpuSidePort(const std::string &_name, Cache *_cache,
const std::string &_label);
};
/**
* Override the default behaviour of sendDeferredPacket to enable
* the memory-side cache port to also send requests based on the
* current MSHR status. This queue has a pointer to our specific
* cache implementation and is used by the MemSidePort.
*/
class CacheReqPacketQueue : public ReqPacketQueue
{
protected:
Cache &cache;
SnoopRespPacketQueue &snoopRespQueue;
public:
CacheReqPacketQueue(Cache &cache, MasterPort &port,
SnoopRespPacketQueue &snoop_resp_queue,
const std::string &label) :
ReqPacketQueue(cache, port, label), cache(cache),
snoopRespQueue(snoop_resp_queue) { }
/**
* Override the normal sendDeferredPacket and do not only
* consider the transmit list (used for responses), but also
* requests.
*/
virtual void sendDeferredPacket();
/**
* Check if there is a conflicting snoop response about to be
* send out, and if so simply stall any requests, and schedule
* a send event at the same time as the next snoop response is
* being sent out.
*/
bool checkConflictingSnoop(Addr addr)
{
if (snoopRespQueue.hasAddr(addr)) {
DPRINTF(CachePort, "Waiting for snoop response to be "
"sent\n");
Tick when = snoopRespQueue.deferredPacketReadyTime();
schedSendEvent(when);
return true;
}
return false;
}
};
/**
* The memory-side port extends the base cache master port with
* access functions for functional, atomic and timing snoops.
*/
class MemSidePort : public CacheMasterPort
{
private:
/** The cache-specific queue. */
CacheReqPacketQueue _reqQueue;
SnoopRespPacketQueue _snoopRespQueue;
// a pointer to our specific cache implementation
Cache *cache;
protected:
virtual void recvTimingSnoopReq(PacketPtr pkt);
virtual bool recvTimingResp(PacketPtr pkt);
virtual Tick recvAtomicSnoop(PacketPtr pkt);
virtual void recvFunctionalSnoop(PacketPtr pkt);
public:
MemSidePort(const std::string &_name, Cache *_cache,
const std::string &_label);
};
/** Tag and data Storage */
BaseTags *tags;
/** Prefetcher */
BasePrefetcher *prefetcher;
/** Temporary cache block for occasional transitory use */
CacheBlk *tempBlock;
/**
* This cache should allocate a block on a line-sized write miss.
*/
const bool doFastWrites;
/**
* Turn line-sized writes into WriteInvalidate transactions.
*/
void promoteWholeLineWrites(PacketPtr pkt);
/**
* Notify the prefetcher on every access, not just misses.
*/
const bool prefetchOnAccess;
/**
* Clusivity with respect to the upstream cache, determining if we
* fill into both this cache and the cache above on a miss. Note
* that we currently do not support strict clusivity policies.
*/
const Enums::Clusivity clusivity;
/**
* Determine if clean lines should be written back or not. In
* cases where a downstream cache is mostly inclusive we likely
* want it to act as a victim cache also for lines that have not
* been modified. Hence, we cannot simply drop the line (or send a
* clean evict), but rather need to send the actual data.
*/
const bool writebackClean;
/**
* Upstream caches need this packet until true is returned, so
* hold it for deletion until a subsequent call
*/
std::unique_ptr<Packet> pendingDelete;
/**
* Writebacks from the tempBlock, resulting on the response path
* in atomic mode, must happen after the call to recvAtomic has
* finished (for the right ordering of the packets). We therefore
* need to hold on to the packets, and have a method and an event
* to send them.
*/
PacketPtr tempBlockWriteback;
/**
* Send the outstanding tempBlock writeback. To be called after
* recvAtomic finishes in cases where the block we filled is in
* fact the tempBlock, and now needs to be written back.
*/
void writebackTempBlockAtomic() {
assert(tempBlockWriteback != nullptr);
PacketList writebacks{tempBlockWriteback};
doWritebacksAtomic(writebacks);
tempBlockWriteback = nullptr;
}
/**
* An event to writeback the tempBlock after recvAtomic
* finishes. To avoid other calls to recvAtomic getting in
* between, we create this event with a higher priority.
*/
EventWrapper<Cache, &Cache::writebackTempBlockAtomic> \
writebackTempBlockAtomicEvent;
/**
* Store the outstanding requests that we are expecting snoop
* responses from so we can determine which snoop responses we
* generated and which ones were merely forwarded.
*/
std::unordered_set<RequestPtr> outstandingSnoop;
/**
* Does all the processing necessary to perform the provided request.
* @param pkt The memory request to perform.
* @param blk The cache block to be updated.
* @param lat The latency of the access.
* @param writebacks List for any writebacks that need to be performed.
* @return Boolean indicating whether the request was satisfied.
*/
bool access(PacketPtr pkt, CacheBlk *&blk,
Cycles &lat, PacketList &writebacks);
/**
*Handle doing the Compare and Swap function for SPARC.
*/
void cmpAndSwap(CacheBlk *blk, PacketPtr pkt);
/**
* Find a block frame for new block at address addr targeting the
* given security space, assuming that the block is not currently
* in the cache. Append writebacks if any to provided packet
* list. Return free block frame. May return nullptr if there are
* no replaceable blocks at the moment.
*/
CacheBlk *allocateBlock(Addr addr, bool is_secure, PacketList &writebacks);
/**
* Invalidate a cache block.
*
* @param blk Block to invalidate
*/
void invalidateBlock(CacheBlk *blk);
/**
* Maintain the clusivity of this cache by potentially
* invalidating a block. This method works in conjunction with
* satisfyRequest, but is separate to allow us to handle all MSHR
* targets before potentially dropping a block.
*
* @param from_cache Whether we have dealt with a packet from a cache
* @param blk The block that should potentially be dropped
*/
void maintainClusivity(bool from_cache, CacheBlk *blk);
/**
* Populates a cache block and handles all outstanding requests for the
* satisfied fill request. This version takes two memory requests. One
* contains the fill data, the other is an optional target to satisfy.
* @param pkt The memory request with the fill data.
* @param blk The cache block if it already exists.
* @param writebacks List for any writebacks that need to be performed.
* @param allocate Whether to allocate a block or use the temp block
* @return Pointer to the new cache block.
*/
CacheBlk *handleFill(PacketPtr pkt, CacheBlk *blk,
PacketList &writebacks, bool allocate);
/**
* Determine whether we should allocate on a fill or not. If this
* cache is mostly inclusive with regards to the upstream cache(s)
* we always allocate (for any non-forwarded and cacheable
* requests). In the case of a mostly exclusive cache, we allocate
* on fill if the packet did not come from a cache, thus if we:
* are dealing with a whole-line write (the latter behaves much
* like a writeback), the original target packet came from a
* non-caching source, or if we are performing a prefetch or LLSC.
*
* @param cmd Command of the incoming requesting packet
* @return Whether we should allocate on the fill
*/
inline bool allocOnFill(MemCmd cmd) const override
{
return clusivity == Enums::mostly_incl ||
cmd == MemCmd::WriteLineReq ||
cmd == MemCmd::ReadReq ||
cmd == MemCmd::WriteReq ||
cmd.isPrefetch() ||
cmd.isLLSC();
}
/**
* Performs the access specified by the request.
* @param pkt The request to perform.
* @return The result of the access.
*/
bool recvTimingReq(PacketPtr pkt);
/**
* Insert writebacks into the write buffer
*/
void doWritebacks(PacketList& writebacks, Tick forward_time);
/**
* Send writebacks down the memory hierarchy in atomic mode
*/
void doWritebacksAtomic(PacketList& writebacks);
/**
* Handling the special case of uncacheable write responses to
* make recvTimingResp less cluttered.
*/
void handleUncacheableWriteResp(PacketPtr pkt);
/**
* Handles a response (cache line fill/write ack) from the bus.
* @param pkt The response packet
*/
void recvTimingResp(PacketPtr pkt);
/**
* Snoops bus transactions to maintain coherence.
* @param pkt The current bus transaction.
*/
void recvTimingSnoopReq(PacketPtr pkt);
/**
* Handle a snoop response.
* @param pkt Snoop response packet
*/
void recvTimingSnoopResp(PacketPtr pkt);
/**
* Performs the access specified by the request.
* @param pkt The request to perform.
* @return The number of ticks required for the access.
*/
Tick recvAtomic(PacketPtr pkt);
/**
* Snoop for the provided request in the cache and return the estimated
* time taken.
* @param pkt The memory request to snoop
* @return The number of ticks required for the snoop.
*/
Tick recvAtomicSnoop(PacketPtr pkt);
/**
* Performs the access specified by the request.
* @param pkt The request to perform.
* @param fromCpuSide from the CPU side port or the memory side port
*/
void functionalAccess(PacketPtr pkt, bool fromCpuSide);
/**
* Perform any necessary updates to the block and perform any data
* exchange between the packet and the block. The flags of the
* packet are also set accordingly.
*
* @param pkt Request packet from upstream that hit a block
* @param blk Cache block that the packet hit
* @param deferred_response Whether this hit is to block that
* originally missed
* @param pending_downgrade Whether the writable flag is to be removed
*
* @return True if the block is to be invalidated
*/
void satisfyRequest(PacketPtr pkt, CacheBlk *blk,
bool deferred_response = false,
bool pending_downgrade = false);
void doTimingSupplyResponse(PacketPtr req_pkt, const uint8_t *blk_data,
bool already_copied, bool pending_inval);
/**
* Perform an upward snoop if needed, and update the block state
* (possibly invalidating the block). Also create a response if required.
*
* @param pkt Snoop packet
* @param blk Cache block being snooped
* @param is_timing Timing or atomic for the response
* @param is_deferred Is this a deferred snoop or not?
* @param pending_inval Do we have a pending invalidation?
*
* @return The snoop delay incurred by the upwards snoop
*/
uint32_t handleSnoop(PacketPtr pkt, CacheBlk *blk,
bool is_timing, bool is_deferred, bool pending_inval);
/**
* Create a writeback request for the given block.
* @param blk The block to writeback.
* @return The writeback request for the block.
*/
PacketPtr writebackBlk(CacheBlk *blk);
/**
* Create a CleanEvict request for the given block.
* @param blk The block to evict.
* @return The CleanEvict request for the block.
*/
PacketPtr cleanEvictBlk(CacheBlk *blk);
void memWriteback() override;
void memInvalidate() override;
bool isDirty() const override;
/**
* Cache block visitor that writes back dirty cache blocks using
* functional writes.
*
* \return Always returns true.
*/
bool writebackVisitor(CacheBlk &blk);
/**
* Cache block visitor that invalidates all blocks in the cache.
*
* @warn Dirty cache lines will not be written back to memory.
*
* \return Always returns true.
*/
bool invalidateVisitor(CacheBlk &blk);
/**
* Create an appropriate downstream bus request packet for the
* given parameters.
* @param cpu_pkt The miss that needs to be satisfied.
* @param blk The block currently in the cache corresponding to
* cpu_pkt (nullptr if none).
* @param needsWritable Indicates that the block must be writable
* even if the request in cpu_pkt doesn't indicate that.
* @return A new Packet containing the request, or nullptr if the
* current request in cpu_pkt should just be forwarded on.
*/
PacketPtr createMissPacket(PacketPtr cpu_pkt, CacheBlk *blk,
bool needsWritable) const;
/**
* Return the next queue entry to service, either a pending miss
* from the MSHR queue, a buffered write from the write buffer, or
* something from the prefetcher. This function is responsible
* for prioritizing among those sources on the fly.
*/
QueueEntry* getNextQueueEntry();
/**
* Send up a snoop request and find cached copies. If cached copies are
* found, set the BLOCK_CACHED flag in pkt.
*/
bool isCachedAbove(PacketPtr pkt, bool is_timing = true) const;
/**
* Return whether there are any outstanding misses.
*/
bool outstandingMisses() const
{
return !mshrQueue.isEmpty();
}
CacheBlk *findBlock(Addr addr, bool is_secure) const {
return tags->findBlock(addr, is_secure);
}
bool inCache(Addr addr, bool is_secure) const override {
return (tags->findBlock(addr, is_secure) != 0);
}
bool inMissQueue(Addr addr, bool is_secure) const override {
return (mshrQueue.findMatch(addr, is_secure) != 0);
}
/**
* Find next request ready time from among possible sources.
*/
Tick nextQueueReadyTime() const;
public:
/** Instantiates a basic cache object. */
Cache(const CacheParams *p);
/** Non-default destructor is needed to deallocate memory. */
virtual ~Cache();
void regStats() override;
/**
* Take an MSHR, turn it into a suitable downstream packet, and
* send it out. This construct allows a queue entry to choose a suitable
* approach based on its type.
*
* @param mshr The MSHR to turn into a packet and send
* @return True if the port is waiting for a retry
*/
bool sendMSHRQueuePacket(MSHR* mshr);
/**
* Similar to sendMSHR, but for a write-queue entry
* instead. Create the packet, and send it, and if successful also
* mark the entry in service.
*
* @param wq_entry The write-queue entry to turn into a packet and send
* @return True if the port is waiting for a retry
*/
bool sendWriteQueuePacket(WriteQueueEntry* wq_entry);
/** serialize the state of the caches
* We currently don't support checkpointing cache state, so this panics.
*/
void serialize(CheckpointOut &cp) const override;
void unserialize(CheckpointIn &cp) override;
};
/**
* Wrap a method and present it as a cache block visitor.
*
* For example the forEachBlk method in the tag arrays expects a
* callable object/function as their parameter. This class wraps a
* method in an object and presents callable object that adheres to
* the cache block visitor protocol.
*/
class CacheBlkVisitorWrapper : public CacheBlkVisitor
{
public:
typedef bool (Cache::*VisitorPtr)(CacheBlk &blk);
CacheBlkVisitorWrapper(Cache &_cache, VisitorPtr _visitor)
: cache(_cache), visitor(_visitor) {}
bool operator()(CacheBlk &blk) override {
return (cache.*visitor)(blk);
}
private:
Cache &cache;
VisitorPtr visitor;
};
/**
* Cache block visitor that determines if there are dirty blocks in a
* cache.
*
* Use with the forEachBlk method in the tag array to determine if the
* array contains dirty blocks.
*/
class CacheBlkIsDirtyVisitor : public CacheBlkVisitor
{
public:
CacheBlkIsDirtyVisitor()
: _isDirty(false) {}
bool operator()(CacheBlk &blk) override {
if (blk.isDirty()) {
_isDirty = true;
return false;
} else {
return true;
}
}
/**
* Does the array contain a dirty line?
*
* \return true if yes, false otherwise.
*/
bool isDirty() const { return _isDirty; };
private:
bool _isDirty;
};
#endif // __MEM_CACHE_CACHE_HH__
|