summaryrefslogtreecommitdiff
path: root/src/include/cpu/x86
diff options
context:
space:
mode:
Diffstat (limited to 'src/include/cpu/x86')
-rw-r--r--src/include/cpu/x86/cache.h9
-rw-r--r--src/include/cpu/x86/cr.h15
-rw-r--r--src/include/cpu/x86/lapic.h13
-rw-r--r--src/include/cpu/x86/msr.h16
4 files changed, 27 insertions, 26 deletions
diff --git a/src/include/cpu/x86/cache.h b/src/include/cpu/x86/cache.h
index e0a335971b..81d2ae7223 100644
--- a/src/include/cpu/x86/cache.h
+++ b/src/include/cpu/x86/cache.h
@@ -16,6 +16,7 @@
#ifndef CPU_X86_CACHE
#define CPU_X86_CACHE
+#include <compiler.h>
#include <cpu/x86/cr.h>
#define CR0_CacheDisable (CR0_CD)
@@ -55,7 +56,7 @@ static inline void clflush(void *addr)
asm volatile ("clflush (%0)"::"r" (addr));
}
-/* The following functions require the always_inline due to AMD
+/* The following functions require the __always_inline due to AMD
* function STOP_CAR_AND_CPU that disables cache as
* RAM, the cache as RAM stack can no longer be used. Called
* functions must be inlined to avoid stack usage. Also, the
@@ -63,9 +64,9 @@ static inline void clflush(void *addr)
* allocated them from the stack. With gcc 4.5.0, some functions
* declared as inline are not being inlined. This patch forces
* these functions to always be inlined by adding the qualifier
- * __attribute__((always_inline)) to their declaration.
+ * __always_inline to their declaration.
*/
-static inline __attribute__((always_inline)) void enable_cache(void)
+static __always_inline void enable_cache(void)
{
unsigned long cr0;
cr0 = read_cr0();
@@ -73,7 +74,7 @@ static inline __attribute__((always_inline)) void enable_cache(void)
write_cr0(cr0);
}
-static inline __attribute__((always_inline)) void disable_cache(void)
+static __always_inline void disable_cache(void)
{
/* Disable and write back the cache */
unsigned long cr0;
diff --git a/src/include/cpu/x86/cr.h b/src/include/cpu/x86/cr.h
index 5183c77f89..229a67d422 100644
--- a/src/include/cpu/x86/cr.h
+++ b/src/include/cpu/x86/cr.h
@@ -18,6 +18,7 @@
#if !defined(__ASSEMBLER__)
+#include <compiler.h>
#include <stdint.h>
#include <arch/cpu.h>
@@ -37,7 +38,7 @@
#define CRx_IN "r"
#define CRx_RET "=r"
#endif
-static alwaysinline CRx_TYPE read_cr0(void)
+static __always_inline CRx_TYPE read_cr0(void)
{
CRx_TYPE value;
__asm__ __volatile__ (
@@ -49,7 +50,7 @@ static alwaysinline CRx_TYPE read_cr0(void)
return value;
}
-static alwaysinline void write_cr0(CRx_TYPE data)
+static __always_inline void write_cr0(CRx_TYPE data)
{
__asm__ __volatile__ (
"mov %0, %%cr0"
@@ -59,7 +60,7 @@ static alwaysinline void write_cr0(CRx_TYPE data)
);
}
-static alwaysinline CRx_TYPE read_cr2(void)
+static __always_inline CRx_TYPE read_cr2(void)
{
CRx_TYPE value;
__asm__ __volatile__ (
@@ -71,7 +72,7 @@ static alwaysinline CRx_TYPE read_cr2(void)
return value;
}
-static alwaysinline CRx_TYPE read_cr3(void)
+static __always_inline CRx_TYPE read_cr3(void)
{
CRx_TYPE value;
__asm__ __volatile__ (
@@ -83,7 +84,7 @@ static alwaysinline CRx_TYPE read_cr3(void)
return value;
}
-static alwaysinline void write_cr3(CRx_TYPE data)
+static __always_inline void write_cr3(CRx_TYPE data)
{
__asm__ __volatile__ (
"mov %0, %%cr3"
@@ -92,7 +93,7 @@ static alwaysinline void write_cr3(CRx_TYPE data)
: COMPILER_BARRIER
);
}
-static alwaysinline CRx_TYPE read_cr4(void)
+static __always_inline CRx_TYPE read_cr4(void)
{
CRx_TYPE value;
__asm__ __volatile__ (
@@ -104,7 +105,7 @@ static alwaysinline CRx_TYPE read_cr4(void)
return value;
}
-static alwaysinline void write_cr4(CRx_TYPE data)
+static __always_inline void write_cr4(CRx_TYPE data)
{
__asm__ __volatile__ (
"mov %0, %%cr4"
diff --git a/src/include/cpu/x86/lapic.h b/src/include/cpu/x86/lapic.h
index 6121230a22..5ac3c5e2e8 100644
--- a/src/include/cpu/x86/lapic.h
+++ b/src/include/cpu/x86/lapic.h
@@ -1,24 +1,23 @@
#ifndef CPU_X86_LAPIC_H
#define CPU_X86_LAPIC_H
+#include <compiler.h>
#include <cpu/x86/lapic_def.h>
#include <cpu/x86/msr.h>
#include <halt.h>
#include <smp/node.h>
-static inline __attribute__((always_inline)) unsigned long lapic_read(
- unsigned long reg)
+static __always_inline unsigned long lapic_read(unsigned long reg)
{
return *((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg));
}
-static inline __attribute__((always_inline)) void lapic_write(unsigned long reg,
- unsigned long v)
+static __always_inline void lapic_write(unsigned long reg, unsigned long v)
{
*((volatile unsigned long *)(LAPIC_DEFAULT_BASE+reg)) = v;
}
-static inline __attribute__((always_inline)) void lapic_wait_icr_idle(void)
+static __always_inline void lapic_wait_icr_idle(void)
{
do { } while (lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY);
}
@@ -42,7 +41,7 @@ static inline void disable_lapic(void)
wrmsr(LAPIC_BASE_MSR, msr);
}
-static inline __attribute__((always_inline)) unsigned long lapicid(void)
+static __always_inline unsigned long lapicid(void)
{
return lapic_read(LAPIC_ID) >> 24;
}
@@ -51,7 +50,7 @@ static inline __attribute__((always_inline)) unsigned long lapicid(void)
/* If we need to go back to sipi wait, we use the long non-inlined version of
* this function in lapic_cpu_init.c
*/
-static inline __attribute__((always_inline)) void stop_this_cpu(void)
+static __always_inline void stop_this_cpu(void)
{
/* Called by an AP when it is ready to halt and wait for a new task */
halt();
diff --git a/src/include/cpu/x86/msr.h b/src/include/cpu/x86/msr.h
index 74c2521af5..290c54a499 100644
--- a/src/include/cpu/x86/msr.h
+++ b/src/include/cpu/x86/msr.h
@@ -1,6 +1,8 @@
#ifndef CPU_X86_MSR_H
#define CPU_X86_MSR_H
+#include <compiler.h>
+
/* Intel SDM: Table 2-1
* IA-32 architectural MSR: Extended Feature Enable Register
*/
@@ -50,19 +52,18 @@ msr_t soc_msr_read(unsigned int index);
void soc_msr_write(unsigned int index, msr_t msr);
/* Handle MSR references in the other source code */
-static inline __attribute__((always_inline)) msr_t rdmsr(unsigned int index)
+static __always_inline msr_t rdmsr(unsigned int index)
{
return soc_msr_read(index);
}
-static inline __attribute__((always_inline)) void wrmsr(unsigned int index,
- msr_t msr)
+static __always_inline void wrmsr(unsigned int index, msr_t msr)
{
soc_msr_write(index, msr);
}
#else /* CONFIG_SOC_SETS_MSRS */
-/* The following functions require the always_inline due to AMD
+/* The following functions require the __always_inline due to AMD
* function STOP_CAR_AND_CPU that disables cache as
* RAM, the cache as RAM stack can no longer be used. Called
* functions must be inlined to avoid stack usage. Also, the
@@ -70,9 +71,9 @@ static inline __attribute__((always_inline)) void wrmsr(unsigned int index,
* allocated them from the stack. With gcc 4.5.0, some functions
* declared as inline are not being inlined. This patch forces
* these functions to always be inlined by adding the qualifier
- * __attribute__((always_inline)) to their declaration.
+ * __always_inline to their declaration.
*/
-static inline __attribute__((always_inline)) msr_t rdmsr(unsigned int index)
+static __always_inline msr_t rdmsr(unsigned int index)
{
msr_t result;
__asm__ __volatile__ (
@@ -83,8 +84,7 @@ static inline __attribute__((always_inline)) msr_t rdmsr(unsigned int index)
return result;
}
-static inline __attribute__((always_inline)) void wrmsr(unsigned int index,
- msr_t msr)
+static __always_inline void wrmsr(unsigned int index, msr_t msr)
{
__asm__ __volatile__ (
"wrmsr"