summaryrefslogtreecommitdiff
path: root/src/include/cpu/x86
diff options
context:
space:
mode:
Diffstat (limited to 'src/include/cpu/x86')
-rw-r--r--src/include/cpu/x86/cache.h2
-rw-r--r--src/include/cpu/x86/mp.h12
-rw-r--r--src/include/cpu/x86/msr.h2
-rw-r--r--src/include/cpu/x86/mtrr.h2
-rw-r--r--src/include/cpu/x86/smm.h8
5 files changed, 13 insertions, 13 deletions
diff --git a/src/include/cpu/x86/cache.h b/src/include/cpu/x86/cache.h
index 9c1af294e8..2c8c44b531 100644
--- a/src/include/cpu/x86/cache.h
+++ b/src/include/cpu/x86/cache.h
@@ -52,7 +52,7 @@ static inline void invd(void)
/* The following functions require the always_inline due to AMD
* function STOP_CAR_AND_CPU that disables cache as
- * ram, the cache as ram stack can no longer be used. Called
+ * RAM, the cache as RAM stack can no longer be used. Called
* functions must be inlined to avoid stack usage. Also, the
* compiler must keep local variables register based and not
* allocated them from the stack. With gcc 4.5.0, some functions
diff --git a/src/include/cpu/x86/mp.h b/src/include/cpu/x86/mp.h
index 9742df08c1..cea3139bb3 100644
--- a/src/include/cpu/x86/mp.h
+++ b/src/include/cpu/x86/mp.h
@@ -59,9 +59,9 @@ struct mp_ops {
void (*get_microcode_info)(const void **microcode, int *parallel);
/*
* Optionally provide a function which adjusts the APIC id
- * map to cpu number. By default the cpu number and APIC id
- * are 1:1. To change the APIC id for a given cpu return the
- * new APIC id. It's called for each cpu as indicated by
+ * map to CPU number. By default the CPU number and APIC id
+ * are 1:1. To change the APIC id for a given CPU return the
+ * new APIC id. It's called for each CPU as indicated by
* get_cpu_count().
*/
int (*adjust_cpu_apic_entry)(int cpu, int cur_apic_id);
@@ -78,7 +78,7 @@ struct mp_ops {
void (*adjust_smm_params)(struct smm_loader_params *slp, int is_perm);
/*
* Optionally provide a callback prior to the APs starting SMM
- * relocation or cpu driver initialization. However, note that
+ * relocation or CPU driver initialization. However, note that
* this callback is called after SMM handlers have been loaded.
*/
void (*pre_mp_smm_init)(void);
@@ -88,11 +88,11 @@ struct mp_ops {
*/
void (*per_cpu_smm_trigger)(void);
/*
- * This function is called while each cpu is in the SMM relocation
+ * This function is called while each CPU is in the SMM relocation
* handler. Its primary purpose is to adjust the SMBASE for the
* permanent handler. The parameters passed are the current cpu
* running the relocation handler, current SMBASE of relocation handler,
- * and the pre-calculated staggered cpu SMBASE address of the permanent
+ * and the pre-calculated staggered CPU SMBASE address of the permanent
* SMM handler.
*/
void (*relocation_handler)(int cpu, uintptr_t curr_smbase,
diff --git a/src/include/cpu/x86/msr.h b/src/include/cpu/x86/msr.h
index d644eddc2a..db35ef14eb 100644
--- a/src/include/cpu/x86/msr.h
+++ b/src/include/cpu/x86/msr.h
@@ -48,7 +48,7 @@ static inline __attribute__((always_inline)) void wrmsr(unsigned index,
/* The following functions require the always_inline due to AMD
* function STOP_CAR_AND_CPU that disables cache as
- * ram, the cache as ram stack can no longer be used. Called
+ * RAM, the cache as RAM stack can no longer be used. Called
* functions must be inlined to avoid stack usage. Also, the
* compiler must keep local variables register based and not
* allocated them from the stack. With gcc 4.5.0, some functions
diff --git a/src/include/cpu/x86/mtrr.h b/src/include/cpu/x86/mtrr.h
index d09c77e2af..f32bececfd 100644
--- a/src/include/cpu/x86/mtrr.h
+++ b/src/include/cpu/x86/mtrr.h
@@ -91,7 +91,7 @@ int get_free_var_mtrr(void);
(x>>6)|(x>>7)|(x>>8)|((1<<18)-1))
#define _ALIGN_UP_POW2(x) ((x + _POW2_MASK(x)) & ~_POW2_MASK(x))
-/* At the end of romstage, low ram 0..CACHE_TM_RAMTOP may be set
+/* At the end of romstage, low RAM 0..CACHE_TM_RAMTOP may be set
* as write-back cacheable to speed up ramstage decompression.
* Note MTRR boundaries, must be power of two.
*/
diff --git a/src/include/cpu/x86/smm.h b/src/include/cpu/x86/smm.h
index 2b13f8c711..c1051ad6ab 100644
--- a/src/include/cpu/x86/smm.h
+++ b/src/include/cpu/x86/smm.h
@@ -491,8 +491,8 @@ u16 smm_get_pmbase(void);
struct smm_runtime {
u32 smbase;
u32 save_state_size;
- /* The apic_id_to_cpu provides a mapping from APIC id to cpu number.
- * The cpu number is indicated by the index into the array by matching
+ /* The apic_id_to_cpu provides a mapping from APIC id to CPU number.
+ * The CPU number is indicated by the index into the array by matching
* the default APIC id and value at the index. The stub loader
* initializes this array with a 1:1 mapping. If the APIC ids are not
* contiguous like the 1:1 mapping it is up to the caller of the stub
@@ -525,7 +525,7 @@ void *smm_get_save_state(int cpu);
/* The smm_loader_params structure provides direction to the SMM loader:
* - stack_top - optional external stack provided to loader. It must be at
* least per_cpu_stack_size * num_concurrent_stacks in size.
- * - per_cpu_stack_size - stack size per cpu for smm modules.
+ * - per_cpu_stack_size - stack size per CPU for smm modules.
* - num_concurrent_stacks - number of concurrent cpus in handler needing stack
* optional for setting up relocation handler.
* - per_cpu_save_state_size - the smm save state size per cpu
@@ -537,7 +537,7 @@ void *smm_get_save_state(int cpu);
* the address of the module's parameters (if present).
* - runtime - this field is a result only. The SMM runtime location is filled
* into this field so the code doing the loading can manipulate the
- * runtime's assumptions. e.g. updating the apic id to cpu map to
+ * runtime's assumptions. e.g. updating the apic id to CPU map to
* handle sparse apic id space.
*/
struct smm_loader_params {