summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorArd Biesheuvel <ard.biesheuvel@linaro.org>2015-03-06 02:54:50 +0000
committerlgao4 <lgao4@Edk2>2015-03-06 02:54:50 +0000
commitf2c7daf675d246261bd5e034b78e200017057df6 (patch)
treeebabaf71cee521059603085d0c0c58f2f5650baf
parent7970100ccbd48e6f931b078cb8e615eabee26141 (diff)
downloadedk2-platforms-f2c7daf675d246261bd5e034b78e200017057df6.tar.xz
MdeModulePkg: improve scalability of memory pools
The existing linear mapping between allocation size and pool index does not scale when moving to a 64 KB granularity or beyond. With a granularity of 64 KB, 2048 (!) bins will be created for each memory type, each differing 32 bytes in size with the next one. Instead, introduce an exponential scheme where each bin size is the sum of the two previous ones. Contributed-under: TianoCore Contribution Agreement 1.0 Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org> Reviewed-by: Liming Gao <liming.gao@intel.com> git-svn-id: https://svn.code.sf.net/p/edk2/code/trunk/edk2@17012 6f19259b-4bc3-4df7-8a09-765794883524
-rw-r--r--MdeModulePkg/Core/Dxe/Mem/Pool.c36
1 files changed, 28 insertions, 8 deletions
diff --git a/MdeModulePkg/Core/Dxe/Mem/Pool.c b/MdeModulePkg/Core/Dxe/Mem/Pool.c
index f99293da5c..c7998e80cc 100644
--- a/MdeModulePkg/Core/Dxe/Mem/Pool.c
+++ b/MdeModulePkg/Core/Dxe/Mem/Pool.c
@@ -41,19 +41,24 @@ typedef struct {
UINTN Size;
} POOL_TAIL;
-
-#define POOL_SHIFT 7
-
#define POOL_OVERHEAD (SIZE_OF_POOL_HEAD + sizeof(POOL_TAIL))
#define HEAD_TO_TAIL(a) \
((POOL_TAIL *) (((CHAR8 *) (a)) + (a)->Size - sizeof(POOL_TAIL)));
+//
+// Each element is the sum of the 2 previous ones: this allows us to migrate
+// blocks between bins by splitting them up, while not wasting too much memory
+// as we would in a strict power-of-2 sequence
+//
+STATIC CONST UINT16 mPoolSizeTable[] = {
+ 64, 128, 192, 320, 512, 832, 1344, 2176, 3520, 5696, 9216, 14912, 24128
+};
-#define SIZE_TO_LIST(a) ((a) >> POOL_SHIFT)
-#define LIST_TO_SIZE(a) ((a+1) << POOL_SHIFT)
+#define SIZE_TO_LIST(a) (GetPoolIndexFromSize (a))
+#define LIST_TO_SIZE(a) (mPoolSizeTable [a])
-#define MAX_POOL_LIST SIZE_TO_LIST(DEFAULT_PAGE_ALLOCATION)
+#define MAX_POOL_LIST (sizeof (mPoolSizeTable) / sizeof (mPoolSizeTable[0]))
#define MAX_POOL_SIZE (MAX_ADDRESS - POOL_OVERHEAD)
@@ -80,6 +85,21 @@ POOL mPoolHead[EfiMaxMemoryType];
//
LIST_ENTRY mPoolHeadList = INITIALIZE_LIST_HEAD_VARIABLE (mPoolHeadList);
+STATIC
+UINTN
+GetPoolIndexFromSize (
+ UINTN Size
+ )
+{
+ UINTN Index;
+
+ for (Index = 0; Index < MAX_POOL_LIST; Index++) {
+ if (mPoolSizeTable [Index] >= Size) {
+ return Index;
+ }
+ }
+ return MAX_POOL_LIST;
+}
/**
Called to initialize the pool.
@@ -311,7 +331,7 @@ CoreAllocatePoolI (
// If allocation is over max size, just allocate pages for the request
// (slow)
//
- if (Index >= MAX_POOL_LIST) {
+ if (Index >= SIZE_TO_LIST (Granularity)) {
NoPages = EFI_SIZE_TO_PAGES(Size) + EFI_SIZE_TO_PAGES (Granularity) - 1;
NoPages &= ~(UINTN)(EFI_SIZE_TO_PAGES (Granularity) - 1);
Head = CoreAllocatePoolPages (PoolType, NoPages, Granularity);
@@ -539,7 +559,7 @@ CoreFreePoolI (
//
// If it's not on the list, it must be pool pages
//
- if (Index >= MAX_POOL_LIST) {
+ if (Index >= SIZE_TO_LIST (Granularity)) {
//
// Return the memory pages back to free memory