1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
|
/*
* Copyright (c) 2012-2013, 2015-2016, 2018 ARM Limited
* All rights reserved.
*
* The license below extends only to copyright in the software and shall
* not be construed as granting a license to any other intellectual
* property including but not limited to intellectual property relating
* to a hardware implementation of the functionality of the software
* licensed hereunder. You may use the software subject to the license
* terms below provided that you ensure that this notice is replicated
* unmodified and in its entirety in all distributions of the software,
* modified or unmodified, in source code or in binary form.
*
* Copyright (c) 2003-2005 The Regents of The University of Michigan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Erik Hallnor
* Andreas Sandberg
* Andreas Hansson
*/
/** @file
* Declaration of a high-level queue structure
*/
#ifndef __MEM_CACHE_QUEUE_HH__
#define __MEM_CACHE_QUEUE_HH__
#include <cassert>
#include <string>
#include "base/trace.hh"
#include "base/types.hh"
#include "debug/Drain.hh"
#include "mem/cache/queue_entry.hh"
#include "mem/packet.hh"
#include "sim/core.hh"
#include "sim/drain.hh"
/**
* A high-level queue interface, to be used by both the MSHR queue and
* the write buffer.
*/
template<class Entry>
class Queue : public Drainable
{
protected:
/** Local label (for functional print requests) */
const std::string label;
/**
* The total number of entries in this queue. This number is set
* as the number of entries requested plus any reserve. This
* allows for the same number of effective entries while still
* maintaining an overflow reserve.
*/
const int numEntries;
/**
* The number of entries to hold as a temporary overflow
* space. This is used to allow temporary overflow of the number
* of entries as we only check the full condition under certain
* conditions.
*/
const int numReserve;
/** Actual storage. */
std::vector<Entry> entries;
/** Holds pointers to all allocated entries. */
typename Entry::List allocatedList;
/** Holds pointers to entries that haven't been sent downstream. */
typename Entry::List readyList;
/** Holds non allocated entries. */
typename Entry::List freeList;
typename Entry::Iterator addToReadyList(Entry* entry)
{
if (readyList.empty() ||
readyList.back()->readyTime <= entry->readyTime) {
return readyList.insert(readyList.end(), entry);
}
for (auto i = readyList.begin(); i != readyList.end(); ++i) {
if ((*i)->readyTime > entry->readyTime) {
return readyList.insert(i, entry);
}
}
assert(false);
return readyList.end(); // keep stupid compilers happy
}
/** The number of entries that are in service. */
int _numInService;
/** The number of currently allocated entries. */
int allocated;
public:
/**
* Create a queue with a given number of entries.
*
* @param num_entries The number of entries in this queue.
* @param reserve The extra overflow entries needed.
*/
Queue(const std::string &_label, int num_entries, int reserve) :
label(_label), numEntries(num_entries + reserve),
numReserve(reserve), entries(numEntries), _numInService(0),
allocated(0)
{
for (int i = 0; i < numEntries; ++i) {
freeList.push_back(&entries[i]);
}
}
bool isEmpty() const
{
return allocated == 0;
}
bool isFull() const
{
return (allocated >= numEntries - numReserve);
}
int numInService() const
{
return _numInService;
}
/**
* Find the first entry that matches the provided address.
*
* @param blk_addr The block address to find.
* @param is_secure True if the target memory space is secure.
* @param ignore_uncacheable Should uncacheables be ignored or not
* @return Pointer to the matching WriteQueueEntry, null if not found.
*/
Entry* findMatch(Addr blk_addr, bool is_secure,
bool ignore_uncacheable = true) const
{
for (const auto& entry : allocatedList) {
// we ignore any entries allocated for uncacheable
// accesses and simply ignore them when matching, in the
// cache we never check for matches when adding new
// uncacheable entries, and we do not want normal
// cacheable accesses being added to an WriteQueueEntry
// serving an uncacheable access
if (!(ignore_uncacheable && entry->isUncacheable()) &&
entry->blkAddr == blk_addr && entry->isSecure == is_secure) {
return entry;
}
}
return nullptr;
}
bool trySatisfyFunctional(PacketPtr pkt, Addr blk_addr)
{
pkt->pushLabel(label);
for (const auto& entry : allocatedList) {
if (entry->blkAddr == blk_addr && entry->trySatisfyFunctional(pkt)) {
pkt->popLabel();
return true;
}
}
pkt->popLabel();
return false;
}
/**
* Find any pending requests that overlap the given request.
* @param blk_addr Block address.
* @param is_secure True if the target memory space is secure.
* @return A pointer to the earliest matching WriteQueueEntry.
*/
Entry* findPending(Addr blk_addr, bool is_secure) const
{
for (const auto& entry : readyList) {
if (entry->blkAddr == blk_addr && entry->isSecure == is_secure) {
return entry;
}
}
return nullptr;
}
/**
* Returns the WriteQueueEntry at the head of the readyList.
* @return The next request to service.
*/
Entry* getNext() const
{
if (readyList.empty() || readyList.front()->readyTime > curTick()) {
return nullptr;
}
return readyList.front();
}
Tick nextReadyTime() const
{
return readyList.empty() ? MaxTick : readyList.front()->readyTime;
}
/**
* Removes the given entry from the queue. This places the entry
* on the free list.
*
* @param entry
*/
void deallocate(Entry *entry)
{
allocatedList.erase(entry->allocIter);
freeList.push_front(entry);
allocated--;
if (entry->inService) {
_numInService--;
} else {
readyList.erase(entry->readyIter);
}
entry->deallocate();
if (drainState() == DrainState::Draining && allocated == 0) {
// Notify the drain manager that we have completed
// draining if there are no other outstanding requests in
// this queue.
DPRINTF(Drain, "Queue now empty, signalling drained\n");
signalDrainDone();
}
}
DrainState drain() override
{
return allocated == 0 ? DrainState::Drained : DrainState::Draining;
}
};
#endif //__MEM_CACHE_QUEUE_HH__
|