summaryrefslogtreecommitdiff
path: root/src/commonlib
diff options
context:
space:
mode:
authorFurquan Shaikh <furquan@google.com>2020-11-11 23:23:13 -0800
committerFurquan Shaikh <furquan@google.com>2020-12-08 18:59:18 +0000
commitf5b30eda1fa02ecd7b23bbb9c0bf40932f3cde9b (patch)
treede6eae6dac5933edbb26250cabf7bdd72c143906 /src/commonlib
parent73982edadd83319339839fec026d29abd24034de (diff)
downloadcoreboot-f5b30eda1fa02ecd7b23bbb9c0bf40932f3cde9b.tar.xz
commonlib/region: Allow multiple windows for xlate_region_dev
This change updates the translated region device (xlate_region_dev) to support multiple translation windows from the 1st address space to 2nd address space. The address spaces described by the translation windows can be non-contiguous in both spaces. This is required so that newer x86 platforms can describe memory mapping of SPI flash into multiple decode windows in order to support greater than 16MiB of memory mapped space. Since the windows can be non-contiguous, it introduces new restrictions on the region device ops - any operation performed on the translated region device is limited to only 1 window at a time. This restriction is primarily because of the mmap operation. The caller expects that the memory mapped space is contiguous, however, that is not true anymore. Thus, even though the other operations (readat, writeat, eraseat) can be updated to translate into multiple operations one for each access device, all operations across multiple windows are prohibited for the sake of consistency. It is the responsibility of the platform to ensure that any section that is operated on using the translated region device does not span multiple windows in the fmap description. One additional difference in behavior is xlate_region_device does not perform any action in munmap call. This is because it does not keep track of the access device that was used to service the mmap request. Currently, xlate_region_device is used only by memory mapped boot media on the backend. So, not doing unmap is fine. If this needs to be changed in the future, xlate_region_device will have to accept a pre-allocated space from the caller to keep track of all mapping requests. BUG=b:171534504 Change-Id: Id5b21ffca2c8d6a9dfc37a878429aed4a8301651 Signed-off-by: Furquan Shaikh <furquan@google.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/47658 Reviewed-by: Duncan Laurie <dlaurie@chromium.org> Reviewed-by: Tim Wawrzynczak <twawrzynczak@chromium.org> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/commonlib')
-rw-r--r--src/commonlib/include/commonlib/region.h63
-rw-r--r--src/commonlib/region.c92
2 files changed, 99 insertions, 56 deletions
diff --git a/src/commonlib/include/commonlib/region.h b/src/commonlib/include/commonlib/region.h
index b9a984f171..4d095b731d 100644
--- a/src/commonlib/include/commonlib/region.h
+++ b/src/commonlib/include/commonlib/region.h
@@ -6,6 +6,7 @@
#include <sys/types.h>
#include <stddef.h>
#include <stdbool.h>
+#include <commonlib/bsd/helpers.h>
#include <commonlib/mem_pool.h>
/*
@@ -210,14 +211,33 @@ void mmap_helper_device_init(struct mmap_helper_region_device *mdev,
void *mmap_helper_rdev_mmap(const struct region_device *, size_t, size_t);
int mmap_helper_rdev_munmap(const struct region_device *, void *);
-/* A translated region device provides the ability to publish a region device
- * in one address space and use an access mechanism within another address
- * space. The sub region is the window within the 1st address space and
- * the request is modified prior to accessing the second address space
- * provided by access_dev. */
-struct xlate_region_device {
+/*
+ * A translated region device provides the ability to publish a region device in one address
+ * space and use an access mechanism within another address space. The sub region is the window
+ * within the 1st address space and the request is modified prior to accessing the second
+ * address space provided by access_dev.
+ *
+ * Each xlate_region_device can support multiple translation windows described using
+ * xlate_window structure. The windows need not be contiguous in either address space. However,
+ * this poses restrictions on the operations being performed i.e. callers cannot perform
+ * operations across multiple windows of a translated region device. It is possible to support
+ * readat/writeat/eraseat by translating them into multiple calls one to access device in each
+ * window. However, mmap support is tricky because the caller expects that the memory mapped
+ * region is contiguous in both address spaces. Thus, to keep the semantics consistent for all
+ * region ops, xlate_region_device does not support any operations across the window
+ * boundary.
+ *
+ * Note: The platform is expected to ensure that the fmap description does not place any
+ * section (that will be operated using the translated region device) across multiple windows.
+ */
+struct xlate_window {
const struct region_device *access_dev;
struct region sub_region;
+};
+
+struct xlate_region_device {
+ size_t window_count;
+ const struct xlate_window *window_arr;
struct region_device rdev;
};
@@ -225,38 +245,31 @@ extern const struct region_device_ops xlate_rdev_ro_ops;
extern const struct region_device_ops xlate_rdev_rw_ops;
-#define XLATE_REGION_DEV_INIT(access_dev_, sub_offset_, sub_size_, \
- parent_sz_, ops_) \
+#define XLATE_REGION_DEV_INIT(window_arr_, parent_sz_, ops_) \
{ \
- .access_dev = access_dev_, \
- .sub_region = { \
- .offset = (sub_offset_), \
- .size = (sub_size_), \
- }, \
+ .window_count = ARRAY_SIZE(window_arr_), \
+ .window_arr = window_arr_, \
.rdev = REGION_DEV_INIT((ops_), 0, (parent_sz_)), \
}
-#define XLATE_REGION_DEV_RO_INIT(access_dev_, sub_offset_, sub_size_, \
- parent_sz_) \
- XLATE_REGION_DEV_INIT(access_dev_, sub_offset_, \
- sub_size_, parent_sz_, &xlate_rdev_ro_ops), \
+#define XLATE_REGION_DEV_RO_INIT(window_arr_, parent_sz_) \
+ XLATE_REGION_DEV_INIT(window_arr_, parent_sz_, &xlate_rdev_ro_ops)
-#define XLATE_REGION_DEV_RW_INIT(access_dev_, sub_offset_, sub_size_, \
- parent_sz_) \
- XLATE_REGION_DEV_INIT(access_dev_, sub_offset_, \
- sub_size_, parent_sz_, &xlate_rdev_rw_ops), \
+#define XLATE_REGION_DEV_RW_INIT(window_count_, window_arr_, parent_sz_) \
+ XLATE_REGION_DEV_INIT(window_arr_, parent_sz_, &xlate_rdev_rw_ops)
/* Helper to dynamically initialize xlate region device. */
void xlate_region_device_ro_init(struct xlate_region_device *xdev,
- const struct region_device *access_dev,
- size_t sub_offset, size_t sub_size,
+ size_t window_count, const struct xlate_window *window_arr,
size_t parent_size);
void xlate_region_device_rw_init(struct xlate_region_device *xdev,
- const struct region_device *access_dev,
- size_t sub_offset, size_t sub_size,
+ size_t window_count, const struct xlate_window *window_arr,
size_t parent_size);
+void xlate_window_init(struct xlate_window *window, const struct region_device *access_dev,
+ size_t sub_region_offset, size_t sub_region_size);
+
/* This type can be used for incoherent access where the read and write
* operations are backed by separate drivers. An example is x86 systems
* with memory mapped media for reading but use a spi flash driver for
diff --git a/src/commonlib/region.c b/src/commonlib/region.c
index 467e8ff629..a10702a6c5 100644
--- a/src/commonlib/region.c
+++ b/src/commonlib/region.c
@@ -188,33 +188,37 @@ void region_device_init(struct region_device *rdev,
static void xlate_region_device_init(struct xlate_region_device *xdev,
const struct region_device_ops *ops,
- const struct region_device *access_dev,
- size_t sub_offset, size_t sub_size,
+ size_t window_count, const struct xlate_window *window_arr,
size_t parent_size)
{
memset(xdev, 0, sizeof(*xdev));
- xdev->access_dev = access_dev;
- xdev->sub_region.offset = sub_offset;
- xdev->sub_region.size = sub_size;
+ xdev->window_count = window_count;
+ xdev->window_arr = window_arr;
region_device_init(&xdev->rdev, ops, 0, parent_size);
}
void xlate_region_device_ro_init(struct xlate_region_device *xdev,
- const struct region_device *access_dev,
- size_t sub_offset, size_t sub_size,
+ size_t window_count, const struct xlate_window *window_arr,
size_t parent_size)
{
- xlate_region_device_init(xdev, &xlate_rdev_ro_ops, access_dev,
- sub_offset, sub_size, parent_size);
+ xlate_region_device_init(xdev, &xlate_rdev_ro_ops, window_count, window_arr,
+ parent_size);
}
void xlate_region_device_rw_init(struct xlate_region_device *xdev,
- const struct region_device *access_dev,
- size_t sub_offset, size_t sub_size,
+ size_t window_count, const struct xlate_window *window_arr,
size_t parent_size)
{
- xlate_region_device_init(xdev, &xlate_rdev_rw_ops, access_dev,
- sub_offset, sub_size, parent_size);
+ xlate_region_device_init(xdev, &xlate_rdev_rw_ops, window_count, window_arr,
+ parent_size);
+}
+
+void xlate_window_init(struct xlate_window *window, const struct region_device *access_dev,
+ size_t sub_region_offset, size_t sub_region_size)
+{
+ window->access_dev = access_dev;
+ window->sub_region.offset = sub_region_offset;
+ window->sub_region.size = sub_region_size;
}
static void *mdev_mmap(const struct region_device *rd, size_t offset,
@@ -321,6 +325,21 @@ int mmap_helper_rdev_munmap(const struct region_device *rd, void *mapping)
return 0;
}
+static const struct xlate_window *xlate_find_window(const struct xlate_region_device *xldev,
+ const struct region *req)
+{
+ size_t i;
+ const struct xlate_window *xlwindow;
+
+ for (i = 0; i < xldev->window_count; i++) {
+ xlwindow = &xldev->window_arr[i];
+ if (region_is_subregion(&xlwindow->sub_region, req))
+ return xlwindow;
+ }
+
+ return NULL;
+}
+
static void *xlate_mmap(const struct region_device *rd, size_t offset,
size_t size)
{
@@ -329,24 +348,29 @@ static void *xlate_mmap(const struct region_device *rd, size_t offset,
.offset = offset,
.size = size,
};
+ const struct xlate_window *xlwindow;
xldev = container_of(rd, __typeof__(*xldev), rdev);
- if (!region_is_subregion(&xldev->sub_region, &req))
+ xlwindow = xlate_find_window(xldev, &req);
+ if (!xlwindow)
return NULL;
- offset -= region_offset(&xldev->sub_region);
+ offset -= region_offset(&xlwindow->sub_region);
- return rdev_mmap(xldev->access_dev, offset, size);
+ return rdev_mmap(xlwindow->access_dev, offset, size);
}
-static int xlate_munmap(const struct region_device *rd, void *mapping)
+static int xlate_munmap(const struct region_device *rd __unused, void *mapping __unused)
{
- const struct xlate_region_device *xldev;
-
- xldev = container_of(rd, __typeof__(*xldev), rdev);
-
- return rdev_munmap(xldev->access_dev, mapping);
+ /*
+ * xlate_region_device does not keep track of the access device that was used to service
+ * a mmap request. So, munmap does not do anything. If munmap functionality is required,
+ * then xlate_region_device will have to be updated to accept some pre-allocated space
+ * from caller to keep track of the mapping requests. Since xlate_region_device is only
+ * used for memory mapped boot media on the backend right now, skipping munmap is fine.
+ */
+ return 0;
}
static ssize_t xlate_readat(const struct region_device *rd, void *b,
@@ -356,16 +380,18 @@ static ssize_t xlate_readat(const struct region_device *rd, void *b,
.offset = offset,
.size = size,
};
+ const struct xlate_window *xlwindow;
const struct xlate_region_device *xldev;
xldev = container_of(rd, __typeof__(*xldev), rdev);
- if (!region_is_subregion(&xldev->sub_region, &req))
+ xlwindow = xlate_find_window(xldev, &req);
+ if (!xlwindow)
return -1;
- offset -= region_offset(&xldev->sub_region);
+ offset -= region_offset(&xlwindow->sub_region);
- return rdev_readat(xldev->access_dev, b, offset, size);
+ return rdev_readat(xlwindow->access_dev, b, offset, size);
}
static ssize_t xlate_writeat(const struct region_device *rd, const void *b,
@@ -375,16 +401,18 @@ static ssize_t xlate_writeat(const struct region_device *rd, const void *b,
.offset = offset,
.size = size,
};
+ const struct xlate_window *xlwindow;
const struct xlate_region_device *xldev;
xldev = container_of(rd, __typeof__(*xldev), rdev);
- if (!region_is_subregion(&xldev->sub_region, &req))
+ xlwindow = xlate_find_window(xldev, &req);
+ if (!xlwindow)
return -1;
- offset -= region_offset(&xldev->sub_region);
+ offset -= region_offset(&xlwindow->sub_region);
- return rdev_writeat(xldev->access_dev, b, offset, size);
+ return rdev_writeat(xlwindow->access_dev, b, offset, size);
}
static ssize_t xlate_eraseat(const struct region_device *rd,
@@ -394,16 +422,18 @@ static ssize_t xlate_eraseat(const struct region_device *rd,
.offset = offset,
.size = size,
};
+ const struct xlate_window *xlwindow;
const struct xlate_region_device *xldev;
xldev = container_of(rd, __typeof__(*xldev), rdev);
- if (!region_is_subregion(&xldev->sub_region, &req))
+ xlwindow = xlate_find_window(xldev, &req);
+ if (!xlwindow)
return -1;
- offset -= region_offset(&xldev->sub_region);
+ offset -= region_offset(&xlwindow->sub_region);
- return rdev_eraseat(xldev->access_dev, offset, size);
+ return rdev_eraseat(xlwindow->access_dev, offset, size);
}
const struct region_device_ops xlate_rdev_ro_ops = {