summaryrefslogtreecommitdiff
path: root/src/cpu/x86
diff options
context:
space:
mode:
authorMartin Roth <martin.roth@se-eng.com>2013-07-08 16:23:54 -0600
committerStefan Reinauer <stefan.reinauer@coreboot.org>2013-07-11 22:36:59 +0200
commit4c3ab7376ebb2e3e18919f1ab663d317dfec9b9c (patch)
tree6bd8440a05f6ea1184c0a5500d43cc92ab683f01 /src/cpu/x86
parent0cb07e3476d9408d0935253f9f26c0a8ddc28401 (diff)
downloadcoreboot-4c3ab7376ebb2e3e18919f1ab663d317dfec9b9c.tar.xz
cpu: Fix spelling
Change-Id: I69c46648de0689e9bed84c7726906024ad65e769 Signed-off-by: Martin Roth <martin.roth@se-eng.com> Reviewed-on: http://review.coreboot.org/3729 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
Diffstat (limited to 'src/cpu/x86')
-rw-r--r--src/cpu/x86/car.c2
-rw-r--r--src/cpu/x86/lapic/lapic_cpu_init.c2
-rw-r--r--src/cpu/x86/mtrr/earlymtrr.c4
-rw-r--r--src/cpu/x86/mtrr/mtrr.c18
-rw-r--r--src/cpu/x86/smm/smm_module_loader.c14
5 files changed, 20 insertions, 20 deletions
diff --git a/src/cpu/x86/car.c b/src/cpu/x86/car.c
index 88f280796b..87fa98b488 100644
--- a/src/cpu/x86/car.c
+++ b/src/cpu/x86/car.c
@@ -33,7 +33,7 @@ extern char _car_data_end[];
/*
* The car_migrated global variable determines if the cache-as-ram space has
- * been migrated to real RAM. It does this by asumming the following things:
+ * been migrated to real RAM. It does this by assuming the following things:
* 1. cache-as-ram space is zero'd out once it is set up.
* 2. Either the cache-as-ram space is memory-backed after getting torn down
* or the space returns 0xff's for each byte read.
diff --git a/src/cpu/x86/lapic/lapic_cpu_init.c b/src/cpu/x86/lapic/lapic_cpu_init.c
index fbc8aa4e45..a3bab4658a 100644
--- a/src/cpu/x86/lapic/lapic_cpu_init.c
+++ b/src/cpu/x86/lapic/lapic_cpu_init.c
@@ -40,7 +40,7 @@
* will return 0, meaning no CPU.
*
* We actually handling that case by noting which cpus startup
- * and not telling anyone about the ones that dont.
+ * and not telling anyone about the ones that don't.
*/
/* Start-UP IPI vector must be 4kB aligned and below 1MB. */
diff --git a/src/cpu/x86/mtrr/earlymtrr.c b/src/cpu/x86/mtrr/earlymtrr.c
index 55dbd2f4a4..f16da279c6 100644
--- a/src/cpu/x86/mtrr/earlymtrr.c
+++ b/src/cpu/x86/mtrr/earlymtrr.c
@@ -32,7 +32,7 @@ static void cache_ramstage(void)
const int addr_det = 0;
/* the fixed and variable MTTRs are power-up with random values,
- * clear them to MTRR_TYPE_UNCACHEABLE for safty.
+ * clear them to MTRR_TYPE_UNCACHEABLE for safety.
*/
static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
{
@@ -43,7 +43,7 @@ static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
msr_t msr;
const unsigned long *msr_addr;
- /* Inialize all of the relevant msrs to 0 */
+ /* Initialize all of the relevant msrs to 0 */
msr.lo = 0;
msr.hi = 0;
unsigned long msr_nr;
diff --git a/src/cpu/x86/mtrr/mtrr.c b/src/cpu/x86/mtrr/mtrr.c
index b69787bf4a..8f1c35ec6e 100644
--- a/src/cpu/x86/mtrr/mtrr.c
+++ b/src/cpu/x86/mtrr/mtrr.c
@@ -94,7 +94,7 @@ static inline unsigned int fms(unsigned int x)
return r;
}
-/* fls: find least sigificant bit set */
+/* fls: find least significant bit set */
static inline unsigned int fls(unsigned int x)
{
int r;
@@ -160,8 +160,8 @@ static struct memranges *get_physical_address_space(void)
static struct memranges addr_space_storage;
/* In order to handle some chipsets not being able to pre-determine
- * uncacheable ranges, such as graphics memory, at resource inseration
- * time remove unacheable regions from the cacheable ones. */
+ * uncacheable ranges, such as graphics memory, at resource insertion
+ * time remove uncacheable regions from the cacheable ones. */
if (addr_space == NULL) {
struct range_entry *r;
unsigned long mask;
@@ -216,7 +216,7 @@ static struct memranges *get_physical_address_space(void)
}
/* Fixed MTRR descriptor. This structure defines the step size and begin
- * and end (exclusive) address covered by a set of fixe MTRR MSRs.
+ * and end (exclusive) address covered by a set of fixed MTRR MSRs.
* It also describes the offset in byte intervals to store the calculated MTRR
* type in an array. */
struct fixed_mtrr_desc {
@@ -533,7 +533,7 @@ static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
struct range_entry *next;
/*
- * Determine MTRRs based on the following algoirthm for the given entry:
+ * Determine MTRRs based on the following algorithm for the given entry:
* +------------------+ b2 = ALIGN_UP(end)
* | 0 or more bytes | <-- hole is carved out between b1 and b2
* +------------------+ a2 = b1 = end
@@ -571,7 +571,7 @@ static void calc_var_mtrrs_with_hole(struct var_mtrr_state *var_state,
b1 = a2;
- /* First check if a1 is >= 4GiB and the current etnry is the last
+ /* First check if a1 is >= 4GiB and the current entry is the last
* entry. If so perform an optimization of covering a larger range
* defined by the base address' alignment. */
if (a1 >= RANGE_4GB && next == NULL) {
@@ -686,10 +686,10 @@ static int calc_var_mtrrs(struct memranges *addr_space,
* 1. UC as default type with no holes at top of range.
* 2. UC as default using holes at top of range.
* 3. WB as default.
- * The lowest count is then used as default after totalling all
- * MTRRs. Note that the optimal algoirthm for UC default is marked in
+ * The lowest count is then used as default after totaling all
+ * MTRRs. Note that the optimal algorithm for UC default is marked in
* the tag of each range regardless of final decision. UC takes
- * precedence in the MTRR archiecture. Therefore, only holes can be
+ * precedence in the MTRR architecture. Therefore, only holes can be
* used when the type of the region is MTRR_TYPE_WRBACK with
* MTRR_TYPE_UNCACHEABLE as the default type.
*/
diff --git a/src/cpu/x86/smm/smm_module_loader.c b/src/cpu/x86/smm/smm_module_loader.c
index 5eb4c5a0a2..478ae8c10a 100644
--- a/src/cpu/x86/smm/smm_module_loader.c
+++ b/src/cpu/x86/smm/smm_module_loader.c
@@ -24,16 +24,16 @@
#include <console/console.h>
/*
- * Compoments that make up the SMRAM:
+ * Components that make up the SMRAM:
* 1. Save state - the total save state memory used
* 2. Stack - stacks for the CPUs in the SMM handler
* 3. Stub - SMM stub code for calling into handler
* 4. Handler - C-based SMM handler.
*
- * The compoents are assumed to consist of one consecutive region.
+ * The components are assumed to consist of one consecutive region.
*/
-/* These paramters are used by the SMM stub code. A pointer to the params
+/* These parameters are used by the SMM stub code. A pointer to the params
* is also passed to the C-base handler. */
struct smm_stub_params {
u32 stack_size;
@@ -80,7 +80,7 @@ static void smm_place_jmp_instructions(void *entry_start, int stride, int num,
/* Each entry point has an IP value of 0x8000. The SMBASE for each
* cpu is different so the effective address of the entry instruction
- * is different. Therefore, the relative displacment for each entry
+ * is different. Therefore, the relative displacement for each entry
* instruction needs to be updated to reflect the current effective
* IP. Additionally, the IP result from the jmp instruction is
* calculated using the next instruction's address so the size of
@@ -140,7 +140,7 @@ static void smm_stub_place_staggered_entry_points(char *base,
stub_entry_offset = rmodule_entry_offset(smm_stub);
/* If there are staggered entry points or the stub is not located
- * at the SMM entry point then jmp instructionss need to be placed. */
+ * at the SMM entry point then jmp instructions need to be placed. */
if (params->num_concurrent_save_states > 1 || stub_entry_offset != 0) {
int num_entries;
@@ -297,7 +297,7 @@ int smm_setup_relocation_handler(struct smm_loader_params *params)
return -1;
/* Since the relocation handler always uses stack, adjust the number
- * of conccurent stack users to be CONFIG_MAX_CPUS. */
+ * of concurrent stack users to be CONFIG_MAX_CPUS. */
if (params->num_concurrent_stacks == 0)
params->num_concurrent_stacks = CONFIG_MAX_CPUS;
@@ -318,7 +318,7 @@ int smm_setup_relocation_handler(struct smm_loader_params *params)
*
* It should be noted that this algorithm will not work for
* SMM_DEFAULT_SIZE SMRAM regions such as the A segment. This algorithm
- * expectes a region large enough to encompass the handler and stacks
+ * expects a region large enough to encompass the handler and stacks
* as well as the SMM_DEFAULT_SIZE.
*/
int smm_load_module(void *smram, int size, struct smm_loader_params *params)