summaryrefslogtreecommitdiff
path: root/src/include
diff options
context:
space:
mode:
authorScott Duplichan <scott@notabs.org>2010-09-17 21:38:40 +0000
committerMarc Jones <marc.jones@amd.com>2010-09-17 21:38:40 +0000
commit78301d02b01d01302e6f9ce95db1e59c360a0ba9 (patch)
tree50760e76bf88020342759aad7f5b452911657e42 /src/include
parent01d56d4276c800f4aa53acd657aed24676a52f4d (diff)
downloadcoreboot-78301d02b01d01302e6f9ce95db1e59c360a0ba9.tar.xz
AMD Fam10 code breaks with gcc 4.5.0.
Root cause: After function STOP_CAR_AND_CPU disables cache as ram, the cache as ram stack can no longer be used. Called functions must be inlined to avoid stack usage. Also, the compiler must keep local variables register based and not allocated them from the stack. With gcc 4.5.0, some functions declared as inline are not being inlined. This patch forces these functions to always be inlined by adding the qualifier __attribute__((always_inline)) to their declaration. Signed-off-by: Scott Duplichan <scott@notabs.org> Acked-by: Stefan Reinauer <stepan@coresystems.de> Acked-by: Marc Jones <marcj303@gmail.com> git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5818 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/include')
-rw-r--r--src/include/cpu/x86/cache.h14
-rw-r--r--src/include/cpu/x86/msr.h14
2 files changed, 24 insertions, 4 deletions
diff --git a/src/include/cpu/x86/cache.h b/src/include/cpu/x86/cache.h
index 65979fff77..a473d66c8f 100644
--- a/src/include/cpu/x86/cache.h
+++ b/src/include/cpu/x86/cache.h
@@ -74,7 +74,17 @@ static inline void invd(void)
asm volatile("invd" ::: "memory");
}
-static inline void enable_cache(void)
+/* The following functions require the always_inline due to AMD
+ * function STOP_CAR_AND_CPU that disables cache as
+ * ram, the cache as ram stack can no longer be used. Called
+ * functions must be inlined to avoid stack usage. Also, the
+ * compiler must keep local variables register based and not
+ * allocated them from the stack. With gcc 4.5.0, some functions
+ * declared as inline are not being inlined. This patch forces
+ * these functions to always be inlined by adding the qualifier
+ * __attribute__((always_inline)) to their declaration.
+ */
+static inline __attribute__((always_inline)) void enable_cache(void)
{
unsigned long cr0;
cr0 = read_cr0();
@@ -82,7 +92,7 @@ static inline void enable_cache(void)
write_cr0(cr0);
}
-static inline void disable_cache(void)
+static inline __attribute__((always_inline)) void disable_cache(void)
{
/* Disable and write back the cache */
unsigned long cr0;
diff --git a/src/include/cpu/x86/msr.h b/src/include/cpu/x86/msr.h
index a201ef42f2..e8bc195307 100644
--- a/src/include/cpu/x86/msr.h
+++ b/src/include/cpu/x86/msr.h
@@ -29,7 +29,17 @@ typedef struct msrinit_struct
msr_t msr;
} msrinit_t;
-static inline msr_t rdmsr(unsigned index)
+/* The following functions require the always_inline due to AMD
+ * function STOP_CAR_AND_CPU that disables cache as
+ * ram, the cache as ram stack can no longer be used. Called
+ * functions must be inlined to avoid stack usage. Also, the
+ * compiler must keep local variables register based and not
+ * allocated them from the stack. With gcc 4.5.0, some functions
+ * declared as inline are not being inlined. This patch forces
+ * these functions to always be inlined by adding the qualifier
+ * __attribute__((always_inline)) to their declaration.
+ */
+static inline __attribute__((always_inline)) msr_t rdmsr(unsigned index)
{
msr_t result;
__asm__ __volatile__ (
@@ -40,7 +50,7 @@ static inline msr_t rdmsr(unsigned index)
return result;
}
-static inline void wrmsr(unsigned index, msr_t msr)
+static inline __attribute__((always_inline)) void wrmsr(unsigned index, msr_t msr)
{
__asm__ __volatile__ (
"wrmsr"