1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
|
/*
* This file is part of the coreboot project.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _REGION_H_
#define _REGION_H_
#include <sys/types.h>
#include <stdint.h>
#include <stddef.h>
#include <commonlib/mem_pool.h>
/*
* Region support.
*
* Regions are intended to abstract away the access mechanisms for blocks of
* data. This could be SPI, eMMC, or a memory region as the backing store.
* They are accessed through a region_device. Subregions can be made by
* chaining together multiple region_devices.
*/
struct region_device;
/*
* Returns NULL on error otherwise a buffer is returned with the contents of
* the requested data at offset of size.
*/
void *rdev_mmap(const struct region_device *rd, size_t offset, size_t size);
/* Unmap a previously mapped area. Returns 0 on success, < 0 on error. */
int rdev_munmap(const struct region_device *rd, void *mapping);
/*
* Returns < 0 on error otherwise returns size of data read at provided
* offset filling in the buffer passed.
*/
ssize_t rdev_readat(const struct region_device *rd, void *b, size_t offset,
size_t size);
/*
* Returns < 0 on error otherwise returns size of data wrote at provided
* offset from the buffer passed.
*/
ssize_t rdev_writeat(const struct region_device *rd, const void *b,
size_t offset, size_t size);
/*
* Returns < 0 on error otherwise returns size of data erased.
* If eraseat ops is not defined it returns size which indicates
* that operation was successful.
*/
ssize_t rdev_eraseat(const struct region_device *rd, size_t offset,
size_t size);
/****************************************
* Implementation of a region device *
****************************************/
/*
* Create a child region of the parent provided the sub-region is within
* the parent's region. Returns < 0 on error otherwise 0 on success. Note
* that the child device only calls through the parent's operations.
*/
int rdev_chain(struct region_device *child, const struct region_device *parent,
size_t offset, size_t size);
/* A region_device operations. */
struct region_device_ops {
void *(*mmap)(const struct region_device *, size_t, size_t);
int (*munmap)(const struct region_device *, void *);
ssize_t (*readat)(const struct region_device *, void *, size_t, size_t);
ssize_t (*writeat)(const struct region_device *, const void *, size_t,
size_t);
ssize_t (*eraseat)(const struct region_device *, size_t, size_t);
};
struct region {
size_t offset;
size_t size;
};
struct region_device {
const struct region_device *root;
const struct region_device_ops *ops;
struct region region;
};
#define REGION_DEV_INIT(ops_, offset_, size_) \
{ \
.root = NULL, \
.ops = (ops_), \
.region = { \
.offset = (offset_), \
.size = (size_), \
}, \
}
/* Helper to dynamically initialize region device. */
void region_device_init(struct region_device *rdev,
const struct region_device_ops *ops, size_t offset,
size_t size);
/* Return 1 if child is subregion of parent, else 0. */
int region_is_subregion(const struct region *p, const struct region *c);
static inline size_t region_offset(const struct region *r)
{
return r->offset;
}
static inline size_t region_sz(const struct region *r)
{
return r->size;
}
static inline size_t region_end(const struct region *r)
{
return region_offset(r) + region_sz(r);
}
static inline const struct region *region_device_region(
const struct region_device *rdev)
{
return &rdev->region;
}
static inline size_t region_device_sz(const struct region_device *rdev)
{
return region_sz(region_device_region(rdev));
}
static inline size_t region_device_offset(const struct region_device *rdev)
{
return region_offset(region_device_region(rdev));
}
static inline size_t region_device_end(const struct region_device *rdev)
{
return region_end(region_device_region(rdev));
}
/* Memory map entire region device. Same semantics as rdev_mmap() above. */
static inline void *rdev_mmap_full(const struct region_device *rd)
{
return rdev_mmap(rd, 0, region_device_sz(rd));
}
static inline int rdev_chain_full(struct region_device *child,
const struct region_device *parent)
{
/* Chain full size of parent. */
return rdev_chain(child, parent, 0, region_device_sz(parent));
}
/*
* Compute relative offset of the child (c) w.r.t. the parent (p). Returns < 0
* when child is not within the parent's region.
*/
ssize_t rdev_relative_offset(const struct region_device *p,
const struct region_device *c);
struct mem_region_device {
char *base;
struct region_device rdev;
};
/* Initialize at runtime a mem_region_device. This would be used when
* the base and size are dynamic or can't be known during linking.
* There are two variants: read-only and read-write. */
void mem_region_device_ro_init(struct mem_region_device *mdev, void *base,
size_t size);
void mem_region_device_rw_init(struct mem_region_device *mdev, void *base,
size_t size);
extern const struct region_device_ops mem_rdev_ro_ops;
extern const struct region_device_ops mem_rdev_rw_ops;
/* Statically initialize mem_region_device. */
#define MEM_REGION_DEV_INIT(base_, size_, ops_) \
{ \
.base = (void *)(base_), \
.rdev = REGION_DEV_INIT((ops_), 0, (size_)), \
}
#define MEM_REGION_DEV_RO_INIT(base_, size_) \
MEM_REGION_DEV_INIT(base_, size_, &mem_rdev_ro_ops) \
#define MEM_REGION_DEV_RW_INIT(base_, size_) \
MEM_REGION_DEV_INIT(base_, size_, &mem_rdev_rw_ops) \
struct mmap_helper_region_device {
struct mem_pool pool;
struct region_device rdev;
};
#define MMAP_HELPER_REGION_INIT(ops_, offset_, size_) \
{ \
.rdev = REGION_DEV_INIT((ops_), (offset_), (size_)), \
}
void mmap_helper_device_init(struct mmap_helper_region_device *mdev,
void *cache, size_t cache_size);
void *mmap_helper_rdev_mmap(const struct region_device *, size_t, size_t);
int mmap_helper_rdev_munmap(const struct region_device *, void *);
/* A translated region device provides the ability to publish a region device
* in one address space and use an access mechanism within another address
* space. The sub region is the window within the 1st address space and
* the request is modified prior to accessing the second address space
* provided by access_dev. */
struct xlate_region_device {
const struct region_device *access_dev;
struct region sub_region;
struct region_device rdev;
};
extern const struct region_device_ops xlate_rdev_ro_ops;
extern const struct region_device_ops xlate_rdev_rw_ops;
#define XLATE_REGION_DEV_INIT(access_dev_, sub_offset_, sub_size_, \
parent_sz_, ops_) \
{ \
.access_dev = access_dev_, \
.sub_region = { \
.offset = (sub_offset_), \
.size = (sub_size_), \
}, \
.rdev = REGION_DEV_INIT((ops_), 0, (parent_sz_)), \
}
#define XLATE_REGION_DEV_RO_INIT(access_dev_, sub_offset_, sub_size_, \
parent_sz_) \
XLATE_REGION_DEV_INIT(access_dev_, sub_offset_, \
sub_size_, parent_sz_, &xlate_rdev_ro_ops), \
#define XLATE_REGION_DEV_RW_INIT(access_dev_, sub_offset_, sub_size_, \
parent_sz_) \
XLATE_REGION_DEV_INIT(access_dev_, sub_offset_, \
sub_size_, parent_sz_, &xlate_rdev_rw_ops), \
/* Helper to dynamically initialize xlate region device. */
void xlate_region_device_ro_init(struct xlate_region_device *xdev,
const struct region_device *access_dev,
size_t sub_offset, size_t sub_size,
size_t parent_size);
void xlate_region_device_rw_init(struct xlate_region_device *xdev,
const struct region_device *access_dev,
size_t sub_offset, size_t sub_size,
size_t parent_size);
/* This type can be used for incoherent access where the read and write
* operations are backed by separate drivers. An example is x86 systems
* with memory mapped media for reading but use a spi flash driver for
* writing. One needs to ensure using this object is appropriate in context. */
struct incoherent_rdev {
struct region_device rdev;
const struct region_device *read;
const struct region_device *write;
};
/* Initialize an incoherent_rdev based on the region as well as the read and
* write rdevs. The read and write rdevs should match in size to the passed
* in region. If not the initialization will fail returning NULL. Otherwise
* the function will return a pointer to the containing region_device to
* be used for region operations. Therefore, the lifetime of the returned
* pointer matches the lifetime of the incoherent_rdev object. Likewise,
* the lifetime of the read and write rdev need to match the lifetime of
* the incoherent_rdev object. */
const struct region_device *incoherent_rdev_init(struct incoherent_rdev *irdev,
const struct region *r,
const struct region_device *read,
const struct region_device *write);
#endif /* _REGION_H_ */
|