summaryrefslogtreecommitdiff
path: root/src/cpu/x86/car
diff options
context:
space:
mode:
authorarch import user (historical) <svn@openbios.org>2005-07-06 17:17:25 +0000
committerarch import user (historical) <svn@openbios.org>2005-07-06 17:17:25 +0000
commit6ca7636c8f52560e732cdd5b1c7829cda5aa2bde (patch)
treecc45ae7c4dea6e2c5338f52b4314106bf07023be /src/cpu/x86/car
parentb2ed53dd5669c2c3839633bd2b3b4af709a5b149 (diff)
downloadcoreboot-6ca7636c8f52560e732cdd5b1c7829cda5aa2bde.tar.xz
Revision: linuxbios@linuxbios.org--devel/freebios--devel--2.0--patch-51
Creator: Yinghai Lu <yhlu@tyan.com> cache_as_ram for AMD and some intel git-svn-id: svn://svn.coreboot.org/coreboot/trunk@1967 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/cpu/x86/car')
-rw-r--r--src/cpu/x86/car/cache_as_ram.inc192
-rw-r--r--src/cpu/x86/car/cache_as_ram.lds11
-rw-r--r--src/cpu/x86/car/cache_as_ram_post.c82
-rw-r--r--src/cpu/x86/car/copy_and_run.c132
4 files changed, 417 insertions, 0 deletions
diff --git a/src/cpu/x86/car/cache_as_ram.inc b/src/cpu/x86/car/cache_as_ram.inc
new file mode 100644
index 0000000000..d610efbb3b
--- /dev/null
+++ b/src/cpu/x86/car/cache_as_ram.inc
@@ -0,0 +1,192 @@
+/* We will use 4K bytes only */
+#define CacheSize DCACHE_RAM_SIZE
+#define CacheBase DCACHE_RAM_BASE
+
+#include <cpu/x86/mtrr.h>
+
+ /* Save the BIST result */
+ movl %eax, %ebp
+
+CacheAsRam:
+ /* hope we can skip the double set for normal part */
+#if USE_FALLBACK_IMAGE == 1
+
+ /*Clear all MTRRs */
+
+ xorl %edx, %edx
+ movl $fixed_mtrr_msr, %esi
+clear_fixed_var_mtrr:
+ lodsl (%esi), %eax
+ testl %eax, %eax
+ jz clear_fixed_var_mtrr_out
+
+ movl %eax, %ecx
+ xorl %eax, %eax
+ wrmsr
+
+ jmp clear_fixed_var_mtrr
+clear_fixed_var_mtrr_out:
+
+ /* enable caching for 64K using variable mtrr */
+ movl $0x200, %ecx
+ xorl %edx, %edx
+ movl $(CacheBase | MTRR_TYPE_WRBACK), %eax
+ wrmsr
+
+ movl $0x201, %ecx
+ movl $0x0000000f, %edx
+ movl $((~((CacheBase + CacheSize) - 1)) | 0x800), %eax
+ wrmsr
+
+ /* Set the default memory type and enable variable MTRRs */
+ movl $MTRRdefType_MSR, %ecx
+ xorl %edx, %edx
+ /* Enable Variable MTRRs */
+ movl $0x00000800, %eax
+ wrmsr
+
+ /* Disable fast string operation */
+ movl $0x1a0, %ecx
+ rdmsr
+ andl $(~0x1), %eax
+ wrmsr
+#else
+ /* disable cache */
+ movl %cr0, %eax
+ orl $(0x1<<30),%eax
+ movl %eax, %cr0
+
+#endif /* USE_FALLBACK_IMAGE == 1*/
+
+#if 0
+#if defined(XIP_ROM_SIZE) && defined(XIP_ROM_BASE)
+ /* enable write base caching so we can do execute in place
+ * on the flash rom.
+ */
+ movl $0x202, %ecx
+ xorl %edx, %edx
+ movl $(XIP_ROM_BASE | MTRR_TYPE_WRBACK), %eax
+ wrmsr
+
+ movl $0x203, %ecx
+ movl $0x0000000f, %edx
+ movl $(~(XIP_ROM_SIZE - 1) | 0x800), %eax
+ wrmsr
+#endif /* XIP_ROM_SIZE && XIP_ROM_BASE */
+#endif
+
+ /* enable cache */
+ movl %cr0, %eax
+ andl $0x9fffffff,%eax
+ movl %eax, %cr0
+
+#if USE_FALLBACK_IMAGE == 1
+
+// intel_chip_post_macro(0x11) /* post 11 */
+
+ /* Read the range with lodsl*/
+ movl $CacheBase, %esi
+ cld
+ movl $(CacheSize>>2), %ecx
+ rep lodsl
+
+ // Disable the cache. This is the trick. Processors
+ // Pentium Pro and above are designed to respond to cache
+ // hits with CD=1 and NW=1. That is read hits access the
+ // cache; write hits update the cache. With the tags
+ // established above and no snoop hit, the cache will
+ // behave as RAM.
+ movl %cr0, %eax
+ orl $0x60000000, %eax
+ movl %eax, %cr0
+
+ /* Clear the range */
+ movl $CacheBase, %edi
+ cld
+ movl $(CacheSize>>2), %ecx
+ xorl %eax, %eax
+ rep stosl
+
+
+#if 1
+ /* check the cache as ram */
+ movl $CacheBase, %esi
+ movl $(CacheSize>>2), %ecx
+.xin1:
+ movl %esi, %eax
+ movl %eax, (%esi)
+ decl %ecx
+ je .xout1
+ add $4, %esi
+ jmp .xin1
+.xout1:
+
+ movl $CacheBase, %esi
+// movl $(CacheSize>>2), %ecx
+ movl $4, %ecx
+.xin1x:
+ movl %esi, %eax
+
+ movl $0x4000, %edx
+ movb %ah, %al
+.testx1:
+ outb %al, $0x80
+ decl %edx
+ jnz .testx1
+
+ movl (%esi), %eax
+ cmpb 0xff, %al
+ je .xin2 /* dont show */
+
+ movl $0x4000, %edx
+.testx2:
+ outb %al, $0x80
+ decl %edx
+ jnz .testx2
+
+.xin2: decl %ecx
+ je .xout1x
+ add $4, %esi
+ jmp .xin1x
+.xout1x:
+
+#endif
+#endif /*USE_FALLBACK_IMAGE == 1*/
+
+// intel_chip_post_macro(0x12) /* post 12 */
+
+ movl $(CacheBase+CacheSize-4), %eax
+ movl %eax, %esp
+
+ /* Load a different set of data segments */
+#if CONFIG_USE_INIT
+ movw $CACHE_RAM_DATA_SEG, %ax
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %ss
+#endif
+
+lout:
+// intel_chip_post_macro(0x13) /* post 13 */
+
+ /* Restore the BIST result */
+ movl %ebp, %eax
+ /* We need to set ebp ? No need */
+ movl %esp, %ebp
+ pushl %eax /* bist */
+ call amd64_main
+ /* We will not go back */
+
+
+fixed_mtrr_msr:
+ .long 0x250, 0x258, 0x259
+ .long 0x268, 0x269, 0x26A
+ .long 0x26B, 0x26C, 0x26D
+ .long 0x26E, 0x26F
+var_mtrr_msr:
+ .long 0x200, 0x201, 0x202, 0x203
+ .long 0x204, 0x205, 0x206, 0x207
+ .long 0x208, 0x209, 0x20A, 0x20B
+ .long 0x20C, 0x20D, 0x20E, 0x20F
+ .long 0x000 /* NULL, end of table */
+.CacheAsRam_out:
diff --git a/src/cpu/x86/car/cache_as_ram.lds b/src/cpu/x86/car/cache_as_ram.lds
new file mode 100644
index 0000000000..7f1b373dba
--- /dev/null
+++ b/src/cpu/x86/car/cache_as_ram.lds
@@ -0,0 +1,11 @@
+SECTIONS {
+ .init . : {
+ _init = .;
+ *(.init.text);
+ *(.init.rodata);
+ *(.init.rodata.*);
+ . = ALIGN(16);
+ _einit = .;
+ }
+
+}
diff --git a/src/cpu/x86/car/cache_as_ram_post.c b/src/cpu/x86/car/cache_as_ram_post.c
new file mode 100644
index 0000000000..8cbc1e9cba
--- /dev/null
+++ b/src/cpu/x86/car/cache_as_ram_post.c
@@ -0,0 +1,82 @@
+
+ __asm__ volatile (
+ /*
+ FIXME : backup stack in CACHE_AS_RAM into mmx and sse and after we get STACK up, we restore that.
+ It is only needed if we want to go back
+ */
+
+ /* We don't need cache as ram for now on */
+ /* disable cache */
+ "movl %cr0, %eax\n\t"
+ "orl $(0x1<<30),%eax\n\t"
+ "movl %eax, %cr0\n\t"
+
+ /* clear sth */
+ "movl $0x200, %ecx\n\t"
+ "xorl %edx, %edx\n\t"
+ "xorl %eax, %eax\n\t"
+ "wrmsr\n\t"
+ "movl $0x201, %ecx\n\t"
+ "wrmsr\n\t"
+
+ /* enable fast string operation */
+ "movl $0x1a0, %ecx\n\t"
+ "rdmsr\n\t"
+ "orl $1, %eax\n\t"
+ "wrmsr\n\t"
+
+#if defined(CLEAR_FIRST_1M_RAM)
+ /* enable caching for first 1M using variable mtrr */
+ "movl $0x200, %ecx\n\t"
+ "xorl %edx, %edx\n\t"
+ "movl $(0 | 1), %eax\n\t"
+// "movl $(0 | MTRR_TYPE_WRCOMB), %eax\n\t"
+ "wrmsr\n\t"
+
+ "movl $0x201, %ecx\n\t"
+ "movl $0x0000000f, %edx\n\t" /* AMD 40 bit 0xff*/
+ "movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
+ "wrmsr\n\t"
+#endif
+
+ /* enable cache */
+ "movl %cr0, %eax\n\t"
+ "andl $0x9fffffff,%eax\n\t"
+ "movl %eax, %cr0\n\t"
+#if defined(CLEAR_FIRST_1M_RAM)
+ /* clear the first 1M */
+ "movl $0x0, %edi\n\t"
+ "cld\n\t"
+ "movl $(0x100000>>2), %ecx\n\t"
+ "xorl %eax, %eax\n\t"
+ "rep stosl\n\t"
+
+ /* disable cache */
+ "movl %cr0, %eax\n\t"
+ "orl $(0x1<<30),%eax\n\t"
+ "movl %eax, %cr0\n\t"
+
+ /* enable caching for first 1M using variable mtrr */
+ "movl $0x200, %ecx\n\t"
+ "xorl %edx, %edx\n\t"
+ "movl $(0 | 6), %eax\n\t"
+// "movl $(0 | MTRR_TYPE_WRBACK), %eax\n\t"
+ "wrmsr\n\t"
+
+ "movl $0x201, %ecx\n\t"
+ "movl $0x0000000f, %edx\n\t" /* AMD 40 bit 0xff*/
+ "movl $((~(( 0 + 0x100000) - 1)) | 0x800), %eax\n\t"
+ "wrmsr\n\t"
+
+ /* enable cache */
+ "movl %cr0, %eax\n\t"
+ "andl $0x9fffffff,%eax\n\t"
+ "movl %eax, %cr0\n\t"
+ "invd\n\t"
+
+ /*
+ FIXME: I hope we don't need to change esp and ebp value here, so we can restore value from mmx sse back
+ But the problem is the range is some io related, So don't go back
+ */
+#endif
+ );
diff --git a/src/cpu/x86/car/copy_and_run.c b/src/cpu/x86/car/copy_and_run.c
new file mode 100644
index 0000000000..89a864d4fc
--- /dev/null
+++ b/src/cpu/x86/car/copy_and_run.c
@@ -0,0 +1,132 @@
+/* by yhlu 6.2005
+ moved from nrv2v.c and some lines from crt0.S
+*/
+#ifndef ENDIAN
+#define ENDIAN 0
+#endif
+#ifndef BITSIZE
+#define BITSIZE 32
+#endif
+
+#define GETBIT_8(bb, src, ilen) \
+ (((bb = bb & 0x7f ? bb*2 : ((unsigned)src[ilen++]*2+1)) >> 8) & 1)
+
+#define GETBIT_LE16(bb, src, ilen) \
+ (bb*=2,bb&0xffff ? (bb>>16)&1 : (ilen+=2,((bb=(src[ilen-2]+src[ilen-1]*256u)*2+1)>>16)&1))
+
+#define GETBIT_LE32(bb, src, ilen) \
+ (bc > 0 ? ((bb>>--bc)&1) : (bc=31,\
+ bb=*(const uint32_t *)((src)+ilen),ilen+=4,(bb>>31)&1))
+
+#if ENDIAN == 0 && BITSIZE == 8
+#define GETBIT(bb, src, ilen) GETBIT_8(bb, src, ilen)
+#endif
+#if ENDIAN == 0 && BITSIZE == 16
+#define GETBIT(bb, src, ilen) GETBIT_LE16(bb, src, ilen)
+#endif
+#if ENDIAN == 0 && BITSIZE == 32
+#define GETBIT(bb, src, ilen) GETBIT_LE32(bb, src, ilen)
+#endif
+
+static void copy_and_run(unsigned cpu_reset)
+{
+ uint8_t *src, *dst;
+ unsigned long dst_len;
+ unsigned long ilen = 0, olen = 0, last_m_off = 1;
+ uint32_t bb = 0;
+ unsigned bc = 0;
+
+ print_debug("Copying LinuxBIOS to ram.\r\n");
+
+#if !CONFIG_COMPRESS
+ __asm__ volatile (
+ "leal _liseg, %0\n\t"
+ "leal _iseg, %1\n\t"
+ "leal _eiseg, %2\n\t"
+ "subl %1, %2\n\t"
+ : "=a" (src), "=b" (dst), "=c" (dst_len)
+ );
+ memcpy(src, dst, dst_len);
+#else
+
+ __asm__ volatile (
+ "leal 4+_liseg, %0\n\t"
+ "leal _iseg, %1\n\t"
+ : "=a" (src) , "=b" (dst)
+ );
+
+#if CONFIG_USE_INIT
+ printk_debug("src=%08x\r\n",src);
+ printk_debug("dst=%08x\r\n",dst);
+#else
+ print_debug("src="); print_debug_hex32(src); print_debug("\r\n");
+ print_debug("dst="); print_debug_hex32(dst); print_debug("\r\n");
+#endif
+
+ for(;;) {
+ unsigned int m_off, m_len;
+ while(GETBIT(bb, src, ilen)) {
+ dst[olen++] = src[ilen++];
+ }
+ m_off = 1;
+ do {
+ m_off = m_off*2 + GETBIT(bb, src, ilen);
+ } while (!GETBIT(bb, src, ilen));
+ if (m_off == 2)
+ {
+ m_off = last_m_off;
+ }
+ else
+ {
+ m_off = (m_off - 3)*256 + src[ilen++];
+ if(m_off == 0xffffffffU)
+ break;
+ last_m_off = ++m_off;
+ }
+ m_len = GETBIT(bb, src, ilen);
+ m_len = m_len*2 + GETBIT(bb, src, ilen);
+ if (m_len == 0)
+ {
+ m_len++;
+ do {
+ m_len = m_len*2 + GETBIT(bb, src, ilen);
+ } while(!GETBIT(bb, src, ilen));
+ m_len += 2;
+ }
+ m_len += (m_off > 0xd00);
+ {
+ const uint8_t *m_pos;
+ m_pos = dst + olen - m_off;
+ dst[olen++] = *m_pos++;
+ do {
+ dst[olen++] = *m_pos++;
+ } while(--m_len > 0);
+ }
+ }
+#endif
+// dump_mem(dst, dst+0x100);
+#if CONFIG_USE_INIT
+ printk_debug("linxbios_ram.bin length = %08x\r\n", olen);
+#else
+ print_debug("linxbios_ram.bin length = "); print_debug_hex32(olen); print_debug("\r\n");
+#endif
+ print_debug("Jumping to LinuxBIOS.\r\n");
+
+ if(cpu_reset == 1 ) {
+ __asm__ volatile (
+ "movl $0xffffffff, %ebp\n\t"
+ );
+ }
+ else {
+ __asm__ volatile (
+ "xorl %ebp, %ebp\n\t"
+ );
+ }
+
+ __asm__ volatile (
+ "cli\n\t"
+ "leal _iseg, %edi\n\t"
+ "jmp %edi\n\t"
+ );
+
+}