diff options
author | Aaron Durbin <adurbin@chromium.org> | 2013-02-27 22:50:12 -0600 |
---|---|---|
committer | Stefan Reinauer <stefan.reinauer@coreboot.org> | 2013-03-22 00:13:42 +0100 |
commit | dd4a6d2357decf0cf505370234b378985c68f97f (patch) | |
tree | 6c656e9d13fbc39a9a88766d9b3f533a98a9f89b | |
parent | 24d1d4b47274eb82893e6726472a991a36fce0aa (diff) | |
download | coreboot-dd4a6d2357decf0cf505370234b378985c68f97f.tar.xz |
coreboot: dynamic cbmem requirement
Dynamic cbmem is now a requirement for relocatable ramstage.
This patch replaces the reserve_* fields in the romstage_handoff
structure by using the dynamic cbmem library.
The haswell code is not moved over in this commit, but it should be
safe because there is a hard requirement for DYNAMIC_CBMEM when using
a reloctable ramstage.
Change-Id: I59ab4552c3ae8c2c3982df458cd81a4a9b712cc2
Signed-off-by: Aaron Durbin <adurbin@chromium.org>
Reviewed-on: http://review.coreboot.org/2849
Tested-by: build bot (Jenkins)
Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
-rw-r--r-- | src/Kconfig | 9 | ||||
-rw-r--r-- | src/arch/x86/boot/coreboot_table.c | 20 | ||||
-rw-r--r-- | src/include/cbfs.h | 21 | ||||
-rw-r--r-- | src/include/cbmem.h | 5 | ||||
-rw-r--r-- | src/include/rmodule.h | 16 | ||||
-rw-r--r-- | src/include/romstage_handoff.h | 3 | ||||
-rw-r--r-- | src/lib/cbfs.c | 91 | ||||
-rw-r--r-- | src/lib/hardwaremain.c | 9 | ||||
-rw-r--r-- | src/lib/rmodule.c | 38 | ||||
-rw-r--r-- | src/northbridge/intel/haswell/northbridge.c | 15 |
10 files changed, 94 insertions, 133 deletions
diff --git a/src/Kconfig b/src/Kconfig index 0297970173..18b5bad823 100644 --- a/src/Kconfig +++ b/src/Kconfig @@ -315,14 +315,7 @@ config HAVE_INIT_TIMER config HIGH_SCRATCH_MEMORY_SIZE hex - default 0x5000 if RELOCATABLE_RAMSTAGE default 0x0 - help - The amount of extra memory to reserve from the OS. If - RELOCATABLE_RAMSTAGE is enabled a size of 20KiB is reserved. This is - for the use of a stack in romstage after memory has been initialized. - The stack size required in romstage can be large when needing to - decompress the ramstage. config USE_OPTION_TABLE bool @@ -390,7 +383,7 @@ config RELOCATABLE_MODULES loaded anywhere and all the relocations are handled automatically. config RELOCATABLE_RAMSTAGE - depends on RELOCATABLE_MODULES + depends on (RELOCATABLE_MODULES && DYNAMIC_CBMEM) bool "Build the ramstage to be relocatable in 32-bit address space." default n help diff --git a/src/arch/x86/boot/coreboot_table.c b/src/arch/x86/boot/coreboot_table.c index 617fab29cc..530849f0b0 100644 --- a/src/arch/x86/boot/coreboot_table.c +++ b/src/arch/x86/boot/coreboot_table.c @@ -31,7 +31,6 @@ #include <stdlib.h> #include <cbfs.h> #include <cbmem.h> -#include <romstage_handoff.h> #if CONFIG_USE_OPTION_TABLE #include <option_table.h> #endif @@ -596,23 +595,6 @@ static void add_lb_reserved(struct lb_memory *mem) lb_add_rsvd_range, mem); } -static void add_romstage_resources(struct lb_memory *mem) -{ - struct romstage_handoff *handoff; - - /* Reserve memory requested to be reserved from romstage. */ - handoff = cbmem_find(CBMEM_ID_ROMSTAGE_INFO); - - if (handoff == NULL) - return; - - if (handoff->reserve_size == 0) - return; - - lb_add_memory_range(mem, LB_MEM_RESERVED, handoff->reserve_base, - handoff->reserve_size); -} - unsigned long write_coreboot_table( unsigned long low_table_start, unsigned long low_table_end, unsigned long rom_table_start, unsigned long rom_table_end) @@ -686,8 +668,6 @@ unsigned long write_coreboot_table( /* Add reserved regions */ add_lb_reserved(mem); - add_romstage_resources(mem); - lb_dump_memory_ranges(mem); /* Note: diff --git a/src/include/cbfs.h b/src/include/cbfs.h index 811df88884..ac249aabf5 100644 --- a/src/include/cbfs.h +++ b/src/include/cbfs.h @@ -87,21 +87,24 @@ int init_default_cbfs_media(struct cbfs_media *media); /* The cache_loaded_ramstage() and load_cached_ramstage() functions are defined * to be weak so that board and chipset code may override them. Their job is to * cache and load the ramstage for quick S3 resume. By default a copy of the - * relocated ramstage is saved just below the running ramstage region. These + * relocated ramstage is saved using the cbmem infrastructure. These * functions are only valid during romstage. */ struct romstage_handoff; +struct cbmem_entry; -/* The implementer of cache_loaded_ramstage() needs to ensure that the - * reserve_* fields in in romstage_handoff reflect the memory footprint of the - * ramstage (including cached region). Note that the handoff variable can be - * NULL. */ +/* The implementer of cache_loaded_ramstage() may use the romstage_handoff + * structure to store information, but note that the handoff variable can be + * NULL. The ramstage cbmem_entry represents the region occupied by the loaded + * ramstage. */ void __attribute__((weak)) -cache_loaded_ramstage(struct romstage_handoff *handoff, void *ramstage_base, - uint32_t ramstage_size, void *entry_point); -/* Return NULL on error or entry point on success. */ +cache_loaded_ramstage(struct romstage_handoff *handoff, + const struct cbmem_entry *ramstage, void *entry_point); +/* Return NULL on error or entry point on success. The ramstage cbmem_entry is + * the region where to load the cached contents to. */ void * __attribute__((weak)) -load_cached_ramstage(struct romstage_handoff *handoff); +load_cached_ramstage(struct romstage_handoff *handoff, + const struct cbmem_entry *ramstage); #endif /* CONFIG_RELOCATABLE_RAMSTAGE */ #endif diff --git a/src/include/cbmem.h b/src/include/cbmem.h index 41f5971c80..b3d9f869c8 100644 --- a/src/include/cbmem.h +++ b/src/include/cbmem.h @@ -131,11 +131,6 @@ void cbmem_add_lb_mem(struct lb_memory *mem); #ifndef __PRE_RAM__ extern uint64_t high_tables_base, high_tables_size; -#if CONFIG_EARLY_CBMEM_INIT -/* Return 0 on success, < 0 on error. */ -int __attribute__((weak)) cbmem_get_table_location(uint64_t *tables_base, - uint64_t *tables_size); -#endif void set_cbmem_toc(struct cbmem_entry *); #endif diff --git a/src/include/rmodule.h b/src/include/rmodule.h index 2d8fc0fdc8..631c63d219 100644 --- a/src/include/rmodule.h +++ b/src/include/rmodule.h @@ -41,13 +41,15 @@ int rmodule_entry_offset(const struct rmodule *m); int rmodule_memory_size(const struct rmodule *m); int rmodule_load(void *loc, struct rmodule *m); int rmodule_load_alignment(const struct rmodule *m); -/* Returns the an aligned pointer that reflects a region used below addr - * based on the rmodule_size. i.e. the returned pointer up to addr is memory - * that may be utilized by the rmodule. program_start and rmodule_start - * are pointers updated to reflect where the rmodule program starts and where - * the rmodule (including header) should be placed respectively. */ -void *rmodule_find_region_below(void *addr, size_t rmodule_size, - void **program_start, void **rmodule_start); +/* rmodule_calc_region() calculates the region size, offset to place an + * rmodule in memory, and load address offset based off of a region allocator + * with an alignment of region_alignment. This function helps place an rmodule + * in the same location in ram it will run from. The offset to place the + * rmodule into the region allocated of size region_size is returned. The + * load_offset is the address to load and relocate the rmodule. + * region_alignment must be a power of 2. */ +int rmodule_calc_region(unsigned int region_alignment, size_t rmodule_size, + size_t *region_size, int *load_offset); #define FIELD_ENTRY(x_) ((u32)&x_) #define RMODULE_HEADER(entry_, type_) \ diff --git a/src/include/romstage_handoff.h b/src/include/romstage_handoff.h index 4150e8e1cd..3152fb2e9d 100644 --- a/src/include/romstage_handoff.h +++ b/src/include/romstage_handoff.h @@ -28,9 +28,6 @@ * using the CBMEM_ID_ROMSTAGE_INFO id it needs to ensure it doesn't clobber * fields it doesn't own. */ struct romstage_handoff { - /* This indicates to the ramstage to reserve a chunk of memory. */ - uint32_t reserve_base; - uint32_t reserve_size; /* Inidicate if the current boot is an S3 resume. If * CONFIG_RELOCTABLE_RAMSTAGE is enabled the chipset code is * responsible for initializing this variable. Otherwise, ramstage diff --git a/src/lib/cbfs.c b/src/lib/cbfs.c index 7bd43097f8..45ae7c7683 100644 --- a/src/lib/cbfs.c +++ b/src/lib/cbfs.c @@ -120,41 +120,48 @@ void *cbfs_load_optionrom(struct cbfs_media *media, uint16_t vendor, #include <rmodule.h> #include <romstage_handoff.h> /* When CONFIG_RELOCATABLE_RAMSTAGE is enabled and this file is being compiled - * for the romstage, the rmodule loader is used. The ramstage is placed just - * below the cbmem location. */ - + * for the romstage, the rmodule loader is used. */ void __attribute__((weak)) -cache_loaded_ramstage(struct romstage_handoff *handoff, void *ramstage_base, - uint32_t ramstage_size, void *entry_point) +cache_loaded_ramstage(struct romstage_handoff *handoff, + const struct cbmem_entry *ramstage, void *entry_point) { + uint32_t ramstage_size; + const struct cbmem_entry *entry; + if (handoff == NULL) return; - /* Cache the loaded ramstage just below the to-be-run ramstage. Then - * save the base, size, and entry point in the handoff area. */ - handoff->reserve_base = (uint32_t)ramstage_base - ramstage_size; - handoff->reserve_size = ramstage_size; - handoff->ramstage_entry_point = (uint32_t)entry_point; + ramstage_size = cbmem_entry_size(ramstage); + /* cbmem_entry_add() does a find() before add(). */ + entry = cbmem_entry_add(CBMEM_ID_RAMSTAGE_CACHE, ramstage_size); - memcpy((void *)handoff->reserve_base, ramstage_base, ramstage_size); + if (entry == NULL) + return; + + /* Keep track of the entry point in the handoff structure. */ + handoff->ramstage_entry_point = (uint32_t)entry_point; - /* Update the reserve region by 2x in order to store the cached copy. */ - handoff->reserve_size += handoff->reserve_size; + memcpy(cbmem_entry_start(entry), cbmem_entry_start(ramstage), + ramstage_size); } void * __attribute__((weak)) -load_cached_ramstage(struct romstage_handoff *handoff) +load_cached_ramstage(struct romstage_handoff *handoff, + const struct cbmem_entry *ramstage) { - uint32_t ramstage_size; + const struct cbmem_entry *entry_cache; if (handoff == NULL) return NULL; - /* Load the cached ramstage copy into the to-be-run region. It is just - * above the cached copy. */ - ramstage_size = handoff->reserve_size / 2; - memcpy((void *)(handoff->reserve_base + ramstage_size), - (void *)handoff->reserve_base, ramstage_size); + entry_cache = cbmem_entry_find(CBMEM_ID_RAMSTAGE_CACHE); + + if (entry_cache == NULL) + return NULL; + + /* Load the cached ramstage copy into the to-be-run region. */ + memcpy(cbmem_entry_start(ramstage), cbmem_entry_start(entry_cache), + cbmem_entry_size(ramstage)); return (void *)handoff->ramstage_entry_point; } @@ -164,12 +171,12 @@ static void *load_stage_from_cbfs(struct cbfs_media *media, const char *name, { struct cbfs_stage *stage; struct rmodule ramstage; - char *cbmem_base; - char *ramstage_base; - void *decompression_loc; - void *ramstage_loc; void *entry_point; - uint32_t ramstage_size; + size_t region_size; + char *ramstage_region; + int rmodule_offset; + int load_offset; + const struct cbmem_entry *ramstage_entry; stage = (struct cbfs_stage *) cbfs_get_file_content(media, name, CBFS_TYPE_STAGE); @@ -177,34 +184,34 @@ static void *load_stage_from_cbfs(struct cbfs_media *media, const char *name, if (stage == NULL) return (void *) -1; - cbmem_base = (void *)get_cbmem_toc(); - if (cbmem_base == NULL) + rmodule_offset = + rmodule_calc_region(DYN_CBMEM_ALIGN_SIZE, + stage->memlen, ®ion_size, &load_offset); + + ramstage_entry = cbmem_entry_add(CBMEM_ID_RAMSTAGE, region_size); + + if (ramstage_entry == NULL) return (void *) -1; - ramstage_base = - rmodule_find_region_below(cbmem_base, stage->memlen, - &ramstage_loc, - &decompression_loc); + ramstage_region = cbmem_entry_start(ramstage_entry); LOG("Decompressing stage %s @ 0x%p (%d bytes)\n", - name, decompression_loc, stage->memlen); + name, &ramstage_region[rmodule_offset], stage->memlen); if (cbfs_decompress(stage->compression, &stage[1], - decompression_loc, stage->len)) + &ramstage_region[rmodule_offset], stage->len)) return (void *) -1; - if (rmodule_parse(decompression_loc, &ramstage)) + if (rmodule_parse(&ramstage_region[rmodule_offset], &ramstage)) return (void *) -1; /* The ramstage is responsible for clearing its own bss. */ - if (rmodule_load(ramstage_loc, &ramstage)) + if (rmodule_load(&ramstage_region[load_offset], &ramstage)) return (void *) -1; entry_point = rmodule_entry(&ramstage); - ramstage_size = cbmem_base - ramstage_base; - cache_loaded_ramstage(handoff, ramstage_base, ramstage_size, - entry_point); + cache_loaded_ramstage(handoff, ramstage_entry, entry_point); return entry_point; } @@ -212,6 +219,7 @@ static void *load_stage_from_cbfs(struct cbfs_media *media, const char *name, void * cbfs_load_stage(struct cbfs_media *media, const char *name) { struct romstage_handoff *handoff; + const struct cbmem_entry *ramstage; void *entry; handoff = romstage_handoff_find_or_add(); @@ -222,9 +230,14 @@ void * cbfs_load_stage(struct cbfs_media *media, const char *name) } else if (!handoff->s3_resume) return load_stage_from_cbfs(media, name, handoff); + ramstage = cbmem_entry_find(CBMEM_ID_RAMSTAGE); + + if (ramstage == NULL) + return load_stage_from_cbfs(name, handoff); + /* S3 resume path. Load a cached copy of the loaded ramstage. If * return value is NULL load from cbfs. */ - entry = load_cached_ramstage(handoff); + entry = load_cached_ramstage(handoff, ramstage); if (entry == NULL) return load_stage_from_cbfs(name, handoff); diff --git a/src/lib/hardwaremain.c b/src/lib/hardwaremain.c index bc18989ba0..a3ee10bef2 100644 --- a/src/lib/hardwaremain.c +++ b/src/lib/hardwaremain.c @@ -85,15 +85,6 @@ void hardwaremain(int boot_complete) /* FIXME: Is there a better way to handle this? */ init_timer(); - /* CONFIG_EARLY_CBMEM_INIT indicates that romstage initialized - * the cbmem area. Therefore the table location can be initialized - * early in ramstage if cbmem_get_table_location() is implemented. - */ -#if CONFIG_EARLY_CBMEM_INIT - if (cbmem_get_table_location != NULL && - !cbmem_get_table_location(&high_tables_base, &high_tables_size)) - cbmem_initialize(); -#endif init_cbmem_pre_device(); timestamp_stash(TS_DEVICE_ENUMERATE); diff --git a/src/lib/rmodule.c b/src/lib/rmodule.c index 4276ed33e8..b56ec322be 100644 --- a/src/lib/rmodule.c +++ b/src/lib/rmodule.c @@ -16,6 +16,7 @@ * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ +#include <assert.h> #include <stdint.h> #include <stdlib.h> #include <string.h> @@ -254,16 +255,22 @@ int rmodule_load(void *base, struct rmodule *module) return 0; } -void *rmodule_find_region_below(void *addr, size_t rmodule_size, - void **program_start, void **rmodule_start) +int rmodule_calc_region(unsigned int region_alignment, size_t rmodule_size, + size_t *region_size, int *load_offset) { - unsigned long ceiling; - unsigned long program_base; - unsigned long placement_loc; - unsigned long program_begin; + /* region_alignment must be a power of 2. */ + if (region_alignment & (region_alignment - 1)) + BUG(); - ceiling = (unsigned long)addr; - /* Place the rmodule just under the ceiling. The rmodule files + if (region_alignment < 4096) + region_alignment = 4096; + + /* Sanity check rmodule_header size. The code below assumes it is less + * than the minimum alignment required. */ + if (region_alignment < sizeof(struct rmodule_header)) + BUG(); + + /* Place the rmodule according to alignment. The rmodule files * themselves are packed as a header and a payload, however the rmodule * itself is linked along with the header. The header starts at address * 0. Immediately following the header in the file is the program, @@ -273,13 +280,13 @@ void *rmodule_find_region_below(void *addr, size_t rmodule_size, * to place the rmodule so that the program falls on the aligned * address with the header just before it. Therefore, we need at least * a page to account for the size of the header. */ - program_base = ALIGN((ceiling - (rmodule_size + 4096)), 4096); + *region_size = ALIGN(rmodule_size + region_alignment, 4096); /* The program starts immediately after the header. However, * it needs to be aligned to a 4KiB boundary. Therefore, adjust the * program location so that the program lands on a page boundary. The * layout looks like the following: * - * +--------------------------------+ ceiling + * +--------------------------------+ region_alignment + region_size * | >= 0 bytes from alignment | * +--------------------------------+ program end (4KiB aligned) * | program size | @@ -287,14 +294,9 @@ void *rmodule_find_region_below(void *addr, size_t rmodule_size, * | sizeof(struct rmodule_header) | * +--------------------------------+ rmodule header start * | >= 0 bytes from alignment | - * +--------------------------------+ program_base (4KiB aligned) + * +--------------------------------+ region_alignment */ - placement_loc = ALIGN(program_base + sizeof(struct rmodule_header), - 4096) - sizeof(struct rmodule_header); - program_begin = placement_loc + sizeof(struct rmodule_header); - - *program_start = (void *)program_begin; - *rmodule_start = (void *)placement_loc; + *load_offset = region_alignment; - return (void *)program_base; + return region_alignment - sizeof(struct rmodule_header); } diff --git a/src/northbridge/intel/haswell/northbridge.c b/src/northbridge/intel/haswell/northbridge.c index 87081385b5..53c2f366c2 100644 --- a/src/northbridge/intel/haswell/northbridge.c +++ b/src/northbridge/intel/haswell/northbridge.c @@ -543,21 +543,6 @@ static void northbridge_init(struct device *dev) MCHBAR32(0x5500) = 0x00100001; } -#if CONFIG_EARLY_CBMEM_INIT -int cbmem_get_table_location(uint64_t *tables_base, uint64_t *tables_size) -{ - uint32_t tseg; - - /* Put the CBMEM location just below TSEG. */ - *tables_size = HIGH_MEMORY_SIZE; - tseg = (pci_read_config32(dev_find_slot(0, PCI_DEVFN(0, 0)), - TSEG) & ~((1 << 20) - 1)) - HIGH_MEMORY_SIZE; - *tables_base = tseg; - - return 0; -} -#endif - static void northbridge_enable(device_t dev) { #if CONFIG_HAVE_ACPI_RESUME |