summaryrefslogtreecommitdiff
path: root/src/cpu
diff options
context:
space:
mode:
authorYinghai Lu <yinghailu@gmail.com>2006-10-04 20:46:15 +0000
committerYinghai Lu <yinghailu@gmail.com>2006-10-04 20:46:15 +0000
commitd4b278c02c1da92219ebeb34204b9768934aeca3 (patch)
tree488d097cac9744cfc9b8ff7c89ce69bcb21370cb /src/cpu
parent2e3757d11c565a8fe68dc2a2c34975e98304533c (diff)
downloadcoreboot-d4b278c02c1da92219ebeb34204b9768934aeca3.tar.xz
AMD Rev F support
git-svn-id: svn://svn.coreboot.org/coreboot/trunk@2435 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/cpu')
-rw-r--r--src/cpu/amd/car/cache_as_ram.inc47
-rw-r--r--src/cpu/amd/car/copy_and_run.c60
-rw-r--r--src/cpu/amd/car/post_cache_as_ram.c34
-rw-r--r--src/cpu/amd/dualcore/amd_sibling.c10
-rw-r--r--src/cpu/amd/dualcore/dualcore.c2
-rw-r--r--src/cpu/amd/dualcore/dualcore_id.c10
-rw-r--r--src/cpu/amd/model_fxx/Config.lb3
-rw-r--r--src/cpu/amd/model_fxx/fidvid.c78
-rw-r--r--src/cpu/amd/model_fxx/init_cpus.c107
-rw-r--r--src/cpu/amd/model_fxx/model_fxx_init.c154
-rw-r--r--src/cpu/amd/model_fxx/model_fxx_update_microcode.c19
-rw-r--r--src/cpu/amd/model_gx2/vsmsetup.c4
-rw-r--r--src/cpu/amd/model_lx/vsmsetup.c4
-rw-r--r--src/cpu/amd/mtrr/amd_earlymtrr.c1
-rw-r--r--src/cpu/amd/mtrr/amd_mtrr.c4
-rw-r--r--src/cpu/amd/socket_AM2/Config.lb19
-rw-r--r--src/cpu/amd/socket_AM2/chip.h4
-rw-r--r--src/cpu/amd/socket_AM2/socket_AM2.c6
-rw-r--r--src/cpu/amd/socket_F/Config.lb19
-rw-r--r--src/cpu/amd/socket_F/chip.h4
-rw-r--r--src/cpu/amd/socket_F/socket_F.c6
-rw-r--r--src/cpu/x86/car/copy_and_run.c2
-rw-r--r--src/cpu/x86/lapic/lapic.c2
-rw-r--r--src/cpu/x86/lapic/lapic_cpu_init.c11
-rw-r--r--src/cpu/x86/mtrr/earlymtrr.c22
-rw-r--r--src/cpu/x86/mtrr/mtrr.c42
26 files changed, 594 insertions, 80 deletions
diff --git a/src/cpu/amd/car/cache_as_ram.inc b/src/cpu/amd/car/cache_as_ram.inc
index 1cf141230b..58570167a9 100644
--- a/src/cpu/amd/car/cache_as_ram.inc
+++ b/src/cpu/amd/car/cache_as_ram.inc
@@ -17,7 +17,7 @@
cache_as_ram_setup:
/* hope we can skip the double set for normal part */
-#if USE_FALLBACK_IMAGE == 1
+#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE==1)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==1))
/* check if cpu_init_detected */
movl $MTRRdefType_MSR, %ecx
rdmsr
@@ -56,16 +56,32 @@ clear_fixed_var_mtrr_out:
wrmsr
movl $0x269, %ecx
wrmsr
-#else
+#endif
- #if CacheSize == 0x8000
+#if CacheSize == 0xc000
+ /* enable caching for 16K using fixed mtrr */
+ movl $0x268, %ecx /* fix4k_c4000*/
+ movl $0x06060606, %edx /* WB IO type */
+ xorl %eax, %eax
+ wrmsr
+ /* enable caching for 32K using fixed mtrr */
+ movl $0x269, %ecx /* fix4k_c8000*/
+ movl $0x06060606, %eax /* WB IO type */
+ movl %eax, %edx
+ wrmsr
+
+#endif
+
+
+#if CacheSize == 0x8000
/* enable caching for 32K using fixed mtrr */
movl $0x269, %ecx /* fix4k_c8000*/
movl $0x06060606, %eax /* WB IO type */
movl %eax, %edx
wrmsr
- #else
+#endif
+#if CacheSize < 0x8000
/* enable caching for 16K/8K/4K using fixed mtrr */
movl $0x269, %ecx /* fix4k_cc000*/
#if CacheSize == 0x4000
@@ -79,8 +95,6 @@ clear_fixed_var_mtrr_out:
#endif
xorl %eax, %eax
wrmsr
- #endif
-
#endif
/* enable memory access for first MBs using top_mem */
@@ -88,9 +102,10 @@ clear_fixed_var_mtrr_out:
xorl %edx, %edx
movl $(((CONFIG_LB_MEM_TOPK << 10) + TOP_MEM_MASK) & ~TOP_MEM_MASK) , %eax
wrmsr
-#endif /* USE_FALLBACK_IMAGE == 1*/
+#endif /* USE_FAILOVER_IMAGE == 1*/
+
-#if USE_FALLBACK_IMAGE == 0
+#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE == 0)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==0))
/* disable cache */
movl %cr0, %eax
orl $(0x1<<30),%eax
@@ -108,12 +123,12 @@ clear_fixed_var_mtrr_out:
wrmsr
movl $0x203, %ecx
- movl $0x0000000f, %edx /* AMD 40 bit */
+ movl $((1<<(CPU_ADDR_BITS-32))-1), %edx /* AMD 40 bit */
movl $(~(XIP_ROM_SIZE - 1) | 0x800), %eax
wrmsr
#endif /* XIP_ROM_SIZE && XIP_ROM_BASE */
-#if USE_FALLBACK_IMAGE == 1
+#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE==1)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==1))
/* Set the default memory type and enable fixed and variable MTRRs */
movl $MTRRdefType_MSR, %ecx
xorl %edx, %edx
@@ -133,23 +148,25 @@ clear_fixed_var_mtrr_out:
andl $0x9fffffff,%eax
movl %eax, %cr0
-#if USE_FALLBACK_IMAGE == 1
+#if ((HAVE_FAILOVER_BOOT==1) && (USE_FAILOVER_IMAGE==1)) || ((HAVE_FAILOVER_BOOT==0) && (USE_FALLBACK_IMAGE==1))
/* Read the range with lodsl*/
cld
movl $CacheBase, %esi
movl $(CacheSize>>2), %ecx
- rep lodsl
+ rep
+ lodsl
/* Clear the range */
movl $CacheBase, %edi
movl $(CacheSize>>2), %ecx
xorl %eax, %eax
- rep stosl
+ rep
+ stosl
-#endif /*USE_FALLBACK_IMAGE == 1*/
+#endif /*USE_FAILOVER_IMAGE == 1*/
/* set up the stack pointer */
- movl $(CacheBase+CacheSize - 4 - GlobalVarSize), %eax
+ movl $(CacheBase+CacheSize - GlobalVarSize), %eax
movl %eax, %esp
/* Restore the BIST result */
diff --git a/src/cpu/amd/car/copy_and_run.c b/src/cpu/amd/car/copy_and_run.c
index 76b8a8b847..7471a53076 100644
--- a/src/cpu/amd/car/copy_and_run.c
+++ b/src/cpu/amd/car/copy_and_run.c
@@ -2,7 +2,6 @@
moved from nrv2v.c and some lines from crt0.S
2006/05/02 - stepan: move nrv2b to an extra file.
*/
-
static inline void print_debug_cp_run(const char *strval, uint32_t val)
{
#if CONFIG_USE_INIT
@@ -46,8 +45,13 @@ static void copy_and_run(void)
print_debug_cp_run("src=",(uint32_t)src);
print_debug_cp_run("dst=",(uint32_t)dst);
- olen = unrv2b(src, dst);
+// dump_mem(src, src+0x100);
+
+ olen = unrv2b(src, dst, &ilen);
+ print_debug_cp_run("linxbios_ram.nrv2b length = ", ilen);
+
#endif
+// dump_mem(dst, dst+0x100);
print_debug_cp_run("linxbios_ram.bin length = ", olen);
@@ -61,3 +65,55 @@ static void copy_and_run(void)
);
}
+
+#if CONFIG_AP_CODE_IN_CAR == 1
+
+static void copy_and_run_ap_code_in_car(unsigned ret_addr)
+{
+ uint8_t *src, *dst;
+ unsigned long ilen, olen;
+
+// print_debug("Copying LinuxBIOS AP code to CAR.\r\n");
+
+#if !CONFIG_COMPRESS
+ __asm__ volatile (
+ "leal _liseg_apc, %0\n\t"
+ "leal _iseg_apc, %1\n\t"
+ "leal _eiseg_apc, %2\n\t"
+ "subl %1, %2\n\t"
+ : "=a" (src), "=b" (dst), "=c" (olen)
+ );
+ memcpy(dst, src, olen);
+#else
+
+ __asm__ volatile (
+ "leal _liseg_apc, %0\n\t"
+ "leal _iseg_apc, %1\n\t"
+ : "=a" (src) , "=b" (dst)
+ );
+
+// print_debug_cp_run("src=",(uint32_t)src);
+// print_debug_cp_run("dst=",(uint32_t)dst);
+
+// dump_mem(src, src+0x100);
+
+ olen = unrv2b(src, dst, &ilen);
+// print_debug_cp_run("linxbios_apc.nrv2b length = ", ilen);
+
+#endif
+// dump_mem(dst, dst+0x100);
+
+// print_debug_cp_run("linxbios_apc.bin length = ", olen);
+
+// print_debug("Jumping to LinuxBIOS AP code in CAR.\r\n");
+
+ __asm__ volatile (
+ "movl %0, %%ebp\n\t" /* cpu_reset for hardwaremain dummy */
+ "cli\n\t"
+ "leal _iseg_apc, %%edi\n\t"
+ "jmp *%%edi\n\t"
+ :: "a"(ret_addr)
+ );
+
+}
+#endif
diff --git a/src/cpu/amd/car/post_cache_as_ram.c b/src/cpu/amd/car/post_cache_as_ram.c
index 0a91326b85..813745d66e 100644
--- a/src/cpu/amd/car/post_cache_as_ram.c
+++ b/src/cpu/amd/car/post_cache_as_ram.c
@@ -18,12 +18,13 @@ static void inline __attribute__((always_inline)) memcopy(void *dest, const voi
{
__asm__ volatile(
"cld\n\t"
- "rep movsl\n\t"
+ "rep; movsl\n\t"
: /* No outputs */
: "S" (src), "D" (dest), "c" ((bytes)>>2)
);
}
+
static void post_cache_as_ram(void)
{
@@ -49,24 +50,34 @@ static void post_cache_as_ram(void)
#error "You need to set CONFIG_LB_MEM_TOPK greater than 1024"
#endif
- set_init_ram_access();
+ set_init_ram_access(); /* So we can access RAM from [1M, CONFIG_LB_MEM_TOPK) */
+// dump_mem(DCACHE_RAM_BASE+DCACHE_RAM_SIZE-0x8000, DCACHE_RAM_BASE+DCACHE_RAM_SIZE-0x7c00);
print_debug("Copying data from cache to ram -- switching to use ram as stack... ");
/* from here don't store more data in CAR */
+#if 0
__asm__ volatile (
"pushl %eax\n\t"
);
- memcopy((CONFIG_LB_MEM_TOPK<<10)-DCACHE_RAM_SIZE, DCACHE_RAM_BASE, DCACHE_RAM_SIZE); //inline
+#endif
+
+ memcopy((void *)((CONFIG_LB_MEM_TOPK<<10)-DCACHE_RAM_SIZE), (void *)DCACHE_RAM_BASE, DCACHE_RAM_SIZE); //inline
+// dump_mem((CONFIG_LB_MEM_TOPK<<10) - 0x8000, (CONFIG_LB_MEM_TOPK<<10) - 0x7c00);
+
__asm__ volatile (
/* set new esp */ /* before _RAMBASE */
"subl %0, %%ebp\n\t"
"subl %0, %%esp\n\t"
::"a"( (DCACHE_RAM_BASE + DCACHE_RAM_SIZE)- (CONFIG_LB_MEM_TOPK<<10) )
); // We need to push %eax to the stack (CAR) before copy stack and pop it later after copy stack and change esp
+#if 0
__asm__ volatile (
"popl %eax\n\t"
);
+#endif
+
+
/* We can put data to stack again */
/* only global variable sysinfo in cache need to be offset */
@@ -77,14 +88,27 @@ static void post_cache_as_ram(void)
disable_cache_as_ram_bsp();
print_debug("Clearing initial memory region: ");
- clear_init_ram(); //except the range from [(CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_SIZE, (CONFIG_LB_MEM_TOPK<<10)), that is used as stack in ram
+ clear_init_ram(); //except the range from [(CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_SIZE, (CONFIG_LB_MEM_TOPK<<10))
print_debug("Done\r\n");
+// dump_mem((CONFIG_LB_MEM_TOPK<<10) - 0x8000, (CONFIG_LB_MEM_TOPK<<10) - 0x7c00);
+
+#ifndef MEM_TRAIN_SEQ
+#define MEM_TRAIN_SEQ 0
+#endif
+ set_sysinfo_in_ram(1); // So other core0 could start to train mem
+
+#if MEM_TRAIN_SEQ == 1
+// struct sys_info *sysinfox = ((CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_GLOBAL_VAR_SIZE);
+
+ // wait for ap memory to trained
+// wait_all_core0_mem_trained(sysinfox); // moved to lapic_init_cpus.c
+#endif
/*copy and execute linuxbios_ram */
copy_and_run();
/* We will not return */
- print_debug("should not be here -\r\n");
+ print_debug("should not be here -\r\n");
}
diff --git a/src/cpu/amd/dualcore/amd_sibling.c b/src/cpu/amd/dualcore/amd_sibling.c
index f961f50b2c..3bd604a79f 100644
--- a/src/cpu/amd/dualcore/amd_sibling.c
+++ b/src/cpu/amd/dualcore/amd_sibling.c
@@ -11,6 +11,7 @@
#include <cpu/x86/mtrr.h>
#include <cpu/amd/model_fxx_msr.h>
#include <cpu/amd/model_fxx_rev.h>
+#include <cpu/amd/amdk8_sysconf.h>
static int first_time = 1;
static int disable_siblings = !CONFIG_LOGICAL_CPUS;
@@ -168,6 +169,15 @@ void amd_sibling_init(device_t cpu)
/* Build the cpu device path */
cpu_path.type = DEVICE_PATH_APIC;
cpu_path.u.apic.apic_id = cpu->path.u.apic.apic_id + i * (nb_cfg_54?1:8);
+ if(id.nodeid == 0) {
+ // need some special processing, because may the bsp is not lifted, but the core1 is lifted
+ //defined in northbridge.c
+ if(sysconf.enabled_apic_ext_id && (!sysconf.lift_bsp_apicid)) {
+ cpu->path.u.apic.apic_id += sysconf.apicid_offset;
+ }
+
+ }
+
/* See if I can find the cpu */
new = find_dev_path(cpu->bus, &cpu_path);
diff --git a/src/cpu/amd/dualcore/dualcore.c b/src/cpu/amd/dualcore/dualcore.c
index e2158424e6..51ef6034f7 100644
--- a/src/cpu/amd/dualcore/dualcore.c
+++ b/src/cpu/amd/dualcore/dualcore.c
@@ -19,7 +19,9 @@ static inline unsigned get_core_num_in_bsp(unsigned nodeid)
#if SET_NB_CFG_54 == 1
static inline uint8_t set_apicid_cpuid_lo(void)
{
+#if K8_REV_F_SUPPORT == 0
if(is_cpu_pre_e0()) return 0; // pre_e0 can not be set
+#endif
// set the NB_CFG[54]=1; why the OS will be happy with that ???
msr_t msr;
diff --git a/src/cpu/amd/dualcore/dualcore_id.c b/src/cpu/amd/dualcore/dualcore_id.c
index 389969795b..c8c8d8d443 100644
--- a/src/cpu/amd/dualcore/dualcore_id.c
+++ b/src/cpu/amd/dualcore/dualcore_id.c
@@ -20,6 +20,8 @@ static inline unsigned get_initial_apicid(void)
}
//called by amd_siblings too
+#define CORE_ID_BIT 1
+#define NODE_ID_BIT 3
struct node_core_id get_node_core_id(unsigned nb_cfg_54)
{
struct node_core_id id;
@@ -27,15 +29,15 @@ struct node_core_id get_node_core_id(unsigned nb_cfg_54)
if( nb_cfg_54) {
// when NB_CFG[54] is set, nodeid = ebx[27:25], coreid = ebx[24]
id.coreid = (cpuid_ebx(1) >> 24) & 0xf;
- id.nodeid = (id.coreid>>1);
- id.coreid &= 1;
+ id.nodeid = (id.coreid>>CORE_ID_BIT);
+ id.coreid &= ((1<<CORE_ID_BIT)-1);
}
else
{
// when NB_CFG[54] is clear, nodeid = ebx[26:24], coreid = ebx[27]
id.nodeid = (cpuid_ebx(1) >> 24) & 0xf;
- id.coreid = (id.nodeid>>3);
- id.nodeid &= 7;
+ id.coreid = (id.nodeid>>NODE_ID_BIT);
+ id.nodeid &= ((1<<NODE_ID_BIT)-1);
}
return id;
}
diff --git a/src/cpu/amd/model_fxx/Config.lb b/src/cpu/amd/model_fxx/Config.lb
index ca83a1def1..0adfa28d08 100644
--- a/src/cpu/amd/model_fxx/Config.lb
+++ b/src/cpu/amd/model_fxx/Config.lb
@@ -1,7 +1,10 @@
uses HAVE_INIT_TIMER
uses HAVE_MOVNTI
+uses CPU_ADDR_BITS
+
default HAVE_INIT_TIMER=1
default HAVE_MOVNTI=1
+default CPU_ADDR_BITS=40
dir /cpu/x86/tsc
dir /cpu/x86/fpu
dir /cpu/x86/mmx
diff --git a/src/cpu/amd/model_fxx/fidvid.c b/src/cpu/amd/model_fxx/fidvid.c
index 8d1b84c24f..255a6828ed 100644
--- a/src/cpu/amd/model_fxx/fidvid.c
+++ b/src/cpu/amd/model_fxx/fidvid.c
@@ -4,6 +4,12 @@
#define K8_SET_FIDVID_STORE_AP_APICID_AT_FIRST 1
+#ifndef SB_VFSMAF
+#define SB_VFSMAF 1
+#endif
+
+#define FX_SUPPORT 1
+
static inline void print_debug_fv(const char *str, unsigned val)
{
#if K8_SET_FIDVID_DEBUG == 1
@@ -59,7 +65,7 @@ static void enable_fid_change(void)
dword |= (1<<14);// disable the DRAM interface at first, it will be enabled by raminit again
pci_write_config32(PCI_DEV(0, 0x18+i, 2), 0x94, dword);
- dword = 0x23070000; //enable FID/VID change
+ dword = 0x23070700; //enable FID/VID change
// dword = 0x00070000; //enable FID/VID change
pci_write_config32(PCI_DEV(0, 0x18+i, 3), 0x80, dword);
@@ -122,13 +128,27 @@ static unsigned set_fidvid(unsigned apicid, unsigned fidvid, int showmessage)
if((vid_cur==vid) && (fid_cur==fid)) return fidvid;
vid_max = (msr.hi>>(48-32)) & 0x3f;
- fid_max = (msr.lo>>16) & 0x3f;
+ fid_max = ((msr.lo>>16) & 0x3f); //max fid
+#if FX_SUPPORT
+ if(fid_max>=((25-4)*2)) { // FX max fid is 5G
+ fid_max = ((msr.lo>>8) & 0x3f) + 5*2; // max FID is min fid + 1G
+ if(fid_max >= ((25-4)*2)) {
+ fid_max = (10-4)*2; // hard set to 2G
+ }
+ }
+#endif
//set vid to max
msr.hi = 1;
msr.lo = (vid_max<<8) | (fid_cur);
+#if SB_VFSMAF == 1
msr.lo |= (1<<16); // init changes
+#endif
wrmsr(0xc0010041, msr);
+#if SB_VFSMAF == 0
+ ldtstop_sb();
+#endif
+
for(loop=0;loop<100000;loop++){
msr = rdmsr(0xc0010042);
@@ -159,8 +179,13 @@ static unsigned set_fidvid(unsigned apicid, unsigned fidvid, int showmessage)
//set target fid
msr.hi = (100000/5);
msr.lo = (vid_cur<<8) | fid_cur;
+#if SB_VFSMAF == 1
msr.lo |= (1<<16); // init changes
+#endif
wrmsr(0xc0010041, msr);
+#if SB_VFSMAF == 0
+ ldtstop_sb();
+#endif
#if K8_SET_FIDVID_DEBUG == 1
@@ -186,8 +211,13 @@ static unsigned set_fidvid(unsigned apicid, unsigned fidvid, int showmessage)
//set vid to final
msr.hi = 1;
msr.lo = (vid<<8) | (fid_cur);
+#if SB_VFSMAF == 1
msr.lo |= (1<<16); // init changes
+#endif
wrmsr(0xc0010041, msr);
+#if SB_VFSMAF == 0
+ ldtstop_sb();
+#endif
for(loop=0;loop<100000;loop++){
msr = rdmsr(0xc0010042);
@@ -215,10 +245,21 @@ static void init_fidvid_ap(unsigned bsp_apicid, unsigned apicid)
msr_t msr;
uint32_t vid_cur;
uint32_t fid_cur;
+ uint32_t fid_max;
int loop;
msr = rdmsr(0xc0010042);
- send = ((msr.lo>>16) & 0x3f) << 8; //max fid
+ fid_max = ((msr.lo>>16) & 0x3f); //max fid
+#if FX_SUPPORT
+ if(fid_max>=((25-4)*2)) { // FX max fid is 5G
+ fid_max = ((msr.lo>>8) & 0x3f) + 5*2; // max FID is min fid + 1G
+ if(fid_max >= ((25-4)*2)) {
+ fid_max = (10-4)*2; // hard set to 2G
+ }
+ }
+#endif
+ send = fid_max<<8;
+
send |= ((msr.hi>>(48-32)) & 0x3f) << 16; //max vid
send |= (apicid<<24); // ap apicid
@@ -342,6 +383,14 @@ static void init_fidvid_bsp(unsigned bsp_apicid)
msr_t msr;
msr = rdmsr(0xc0010042);
fid_max = ((msr.lo>>16) & 0x3f); //max fid
+#if FX_SUPPORT == 1
+ if(fid_max>=((25-4)*2)) { // FX max fid is 5G
+ fid_max = ((msr.lo>>8) & 0x3f) + 5*2; // max FID is min fid + 1G
+ if(fid_max >= ((25-4)*2)) {
+ fid_max = (10-4)*2; // hard set to 2G
+ }
+ }
+#endif
vid_max = ((msr.hi>>(48-32)) & 0x3f); //max vid
fv.common_fidvid = (fid_max<<8)|(vid_max<<16);
@@ -366,6 +415,29 @@ static void init_fidvid_bsp(unsigned bsp_apicid)
#endif
+#if 0
+ unsigned fid, vid;
+ // Can we use max only? So we can only set fid in one around, otherwise we need to set that to max after raminit
+ // set fid vid to DQS training required
+ fid = (fv.common_fidvid >> 8) & 0x3f;
+ vid = (fv.common_fidvid >> 16) & 0x3f;
+
+ if(fid>(10-4)*2) {
+ fid = (10-4)*2; //x10
+ }
+
+ if(vid>=0x1f) {
+ vid+= 4; //unit is 12.5mV
+ } else {
+ vid+= 2; //unit is 25mV
+ }
+
+ fv.common_fidvid = (fid<<8) | (vid<<16);
+
+ print_debug_fv("common_fidvid=", fv.common_fidvid);
+
+#endif
+
// set BSP fid and vid
print_debug_fv("bsp apicid=", bsp_apicid);
fv.common_fidvid = set_fidvid(bsp_apicid, fv.common_fidvid, 1);
diff --git a/src/cpu/amd/model_fxx/init_cpus.c b/src/cpu/amd/model_fxx/init_cpus.c
index 50b1532f26..4e38a23be0 100644
--- a/src/cpu/amd/model_fxx/init_cpus.c
+++ b/src/cpu/amd/model_fxx/init_cpus.c
@@ -1,6 +1,12 @@
//it takes the ENABLE_APIC_EXT_ID and APIC_ID_OFFSET and LIFT_BSP_APIC_ID
#ifndef K8_SET_FIDVID
- #define K8_SET_FIDVID 0
+ #if K8_REV_F_SUPPORT == 0
+ #define K8_SET_FIDVID 0
+ #else
+ // for rev F, need to set FID to max
+ #define K8_SET_FIDVID 1
+ #endif
+
#endif
#ifndef K8_SET_FIDVID_CORE0_ONLY
@@ -8,6 +14,43 @@
#define K8_SET_FIDVID_CORE0_ONLY 1
#endif
+static inline void print_initcpu8 (const char *strval, unsigned val)
+{
+#if CONFIG_USE_INIT
+ printk_debug("%s%02x\r\n", strval, val);
+#else
+ print_debug(strval); print_debug_hex8(val); print_debug("\r\n");
+#endif
+}
+
+static inline void print_initcpu8_nocr (const char *strval, unsigned val)
+{
+#if CONFIG_USE_INIT
+ printk_debug("%s%02x", strval, val);
+#else
+ print_debug(strval); print_debug_hex8(val);
+#endif
+}
+
+
+static inline void print_initcpu16 (const char *strval, unsigned val)
+{
+#if CONFIG_USE_INIT
+ printk_debug("%s%04x\r\n", strval, val);
+#else
+ print_debug(strval); print_debug_hex16(val); print_debug("\r\n");
+#endif
+}
+
+static inline void print_initcpu(const char *strval, unsigned val)
+{
+#if CONFIG_USE_INIT
+ printk_debug("%s%08x\r\n", strval, val);
+#else
+ print_debug(strval); print_debug_hex32(val); print_debug("\r\n");
+#endif
+}
+
typedef void (*process_ap_t)(unsigned apicid, void *gp);
//core_range = 0 : all cores
@@ -44,7 +87,11 @@ static void for_each_ap(unsigned bsp_apicid, unsigned core_range, process_ap_t p
j = ((pci_read_config32(PCI_DEV(0, 0x18+i, 3), 0xe8) >> 12) & 3);
if(nb_cfg_54) {
if(j == 0 ){ // if it is single core, we need to increase siblings for apic calculation
- e0_later_single_core = is_e0_later_in_bsp(i); // single core
+ #if K8_REV_F_SUPPORT == 0
+ e0_later_single_core = is_e0_later_in_bsp(i); // single core
+ #else
+ e0_later_single_core = is_cpu_f0_in_bsp(i); // We can read cpuid(1) from Func3
+ #endif
}
if(e0_later_single_core) {
j=1;
@@ -57,14 +104,17 @@ static void for_each_ap(unsigned bsp_apicid, unsigned core_range, process_ap_t p
if(core_range == 2) {
jstart = 1;
}
+ else {
+ jstart = 0;
+ }
if(e0_later_single_core || disable_siblings || (core_range==1)) {
jend = 0;
} else {
jend = siblings;
- }
-
-
+ }
+
+
for(j=jstart; j<=jend; j++) {
ap_apicid = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:8);
@@ -141,10 +191,10 @@ static void wait_cpu_state(unsigned apicid, unsigned state)
if((readback & 0xff) == state) break; //target cpu is in stage started
}
}
-
static void wait_ap_started(unsigned ap_apicid, void *gp )
{
wait_cpu_state(ap_apicid, 0x33); // started
+ print_initcpu8_nocr(" ", ap_apicid);
}
static void wait_all_aps_started(unsigned bsp_apicid)
@@ -152,9 +202,11 @@ static void wait_all_aps_started(unsigned bsp_apicid)
for_each_ap(bsp_apicid, 0 , wait_ap_started, (void *)0);
}
-static void wait_all_other_cores_started(unsigned bsp_apicid)
+static void wait_all_other_cores_started(unsigned bsp_apicid) // all aps other than core0
{
+ print_debug("started ap apicid: ");
for_each_ap(bsp_apicid, 2 , wait_ap_started, (void *)0);
+ print_debug("\r\n");
}
static void allow_all_aps_stop(unsigned bsp_apicid)
@@ -162,8 +214,23 @@ static void allow_all_aps_stop(unsigned bsp_apicid)
lapic_write(LAPIC_MSG_REG, (bsp_apicid<<24) | 0x44); // allow aps to stop
}
+static void STOP_CAR_AND_CPU(void)
+{
+ disable_cache_as_ram(); // inline
+ stop_this_cpu(); // inline, it will stop all cores except node0/core0 the bsp ....
+}
+#if RAMINIT_SYSINFO == 1
+
+#if MEM_TRAIN_SEQ != 1
+static inline void train_ram_on_node(unsigned nodeid, unsigned coreid, struct sys_info *sysinfo, unsigned retcall) {}
+#else
+static inline void train_ram_on_node(unsigned nodeid, unsigned coreid, struct sys_info *sysinfo, unsigned retcall);
+#endif
+
+#endif
#if RAMINIT_SYSINFO == 1
+
static unsigned init_cpus(unsigned cpu_init_detectedx ,struct sys_info *sysinfo)
#else
static unsigned init_cpus(unsigned cpu_init_detectedx)
@@ -251,14 +318,16 @@ static unsigned init_cpus(unsigned cpu_init_detectedx)
init_fidvid_ap(bsp_apicid, apicid);
#endif
- // We need to stop the CACHE as RAM for this CPU, really?
- wait_cpu_state(bsp_apicid, 0x44);
- lapic_write(LAPIC_MSG_REG, (apicid<<24) | 0x44); // bsp can not check it before stop_this_cpu
+ // We need to stop the CACHE as RAM for this CPU, really?
+ wait_cpu_state(bsp_apicid, 0x44);
+ lapic_write(LAPIC_MSG_REG, (apicid<<24) | 0x44); // bsp can not check it before stop_this_cpu
+ set_init_ram_access();
+ #if RAMINIT_SYSINFO == 1
+ train_ram_on_node(id.nodeid, id.coreid, sysinfo, STOP_CAR_AND_CPU);
+ #endif
- set_init_ram_access();
- disable_cache_as_ram(); // inline
- stop_this_cpu(); // inline, it will stop all cores except node0/core0 the bsp ....
- }
+ STOP_CAR_AND_CPU();
+ }
return bsp_apicid;
}
@@ -281,9 +350,13 @@ static void wait_all_core0_started(void)
unsigned i;
unsigned nodes = get_nodes();
- for(i=1;i<nodes;i++) { // skip bsp, because it is running on bsp
- while(!is_core0_started(i)) {}
- }
+ print_debug("core0 started: ");
+ for(i=1;i<nodes;i++) { // skip bsp, because it is running on bsp
+ while(!is_core0_started(i)) {}
+ print_initcpu8_nocr(" ", i);
+ }
+ print_debug("\r\n");
+
}
#endif
diff --git a/src/cpu/amd/model_fxx/model_fxx_init.c b/src/cpu/amd/model_fxx/model_fxx_init.c
index 16d6509430..9e0ba84802 100644
--- a/src/cpu/amd/model_fxx/model_fxx_init.c
+++ b/src/cpu/amd/model_fxx/model_fxx_init.c
@@ -13,6 +13,7 @@
#include <device/device.h>
#include <device/pci.h>
#include <string.h>
+#include <cpu/x86/msr.h>
#include <cpu/x86/pae.h>
#include <pc80/mc146818rtc.h>
#include <cpu/x86/lapic.h>
@@ -29,6 +30,17 @@
#include <cpu/amd/model_fxx_msr.h>
+void cpus_ready_for_init(void)
+{
+#if MEM_TRAIN_SEQ == 1
+ struct sys_info *sysinfox = (struct sys_info *)((CONFIG_LB_MEM_TOPK<<10) - DCACHE_RAM_GLOBAL_VAR_SIZE);
+ // wait for ap memory to trained
+ wait_all_core0_mem_trained(sysinfox);
+#endif
+}
+
+
+#if K8_REV_F_SUPPORT == 0
int is_e0_later_in_bsp(int nodeid)
{
uint32_t val;
@@ -53,6 +65,18 @@ int is_e0_later_in_bsp(int nodeid)
return e0_later;
}
+#endif
+
+#if K8_REV_F_SUPPORT == 1
+int is_cpu_f0_in_bsp(int nodeid)
+{
+ uint32_t dword;
+ device_t dev;
+ dev = dev_find_slot(0, PCI_DEVFN(0x18+nodeid, 3));
+ dword = pci_read_config32(dev, 0xfc);
+ return (dword & 0xfff00) == 0x40f00;
+}
+#endif
#define MCI_STATUS 0x401
@@ -265,16 +289,20 @@ static void init_ecc_memory(unsigned node_id)
startk = (pci_read_config32(f1_dev, 0x40 + (node_id*8)) & 0xffff0000) >> 2;
endk = ((pci_read_config32(f1_dev, 0x44 + (node_id*8)) & 0xffff0000) >> 2) + 0x4000;
-#if K8_HW_MEM_HOLE_SIZEK != 0
+#if HW_MEM_HOLE_SIZEK != 0
+ #if K8_REV_F_SUPPORT == 0
if (!is_cpu_pre_e0())
{
+ #endif
uint32_t val;
val = pci_read_config32(f1_dev, 0xf0);
if(val & 1) {
hole_startk = ((val & (0xff<<24)) >> 10);
}
+ #if K8_REV_F_SUPPORT == 0
}
+ #endif
#endif
@@ -294,7 +322,7 @@ static void init_ecc_memory(unsigned node_id)
disable_lapic();
/* Walk through 2M chunks and zero them */
-#if K8_HW_MEM_HOLE_SIZEK != 0
+#if HW_MEM_HOLE_SIZEK != 0
/* here hole_startk can not be equal to begink, never. Also hole_startk is in 2M boundary, 64M? */
if ( (hole_startk != 0) && ((begink < hole_startk) && (endk>(4*1024*1024)))) {
for(basek = begink; basek < hole_startk;
@@ -336,9 +364,11 @@ static void init_ecc_memory(unsigned node_id)
printk_debug(" done\n");
}
+
static inline void k8_errata(void)
{
msr_t msr;
+#if K8_REV_F_SUPPORT == 0
if (is_cpu_pre_c0()) {
/* Erratum 63... */
msr = rdmsr(HWCR_MSR);
@@ -406,8 +436,11 @@ static inline void k8_errata(void)
msr.hi |=1;
wrmsr_amd(CPU_ID_HYPER_EXT_FEATURES, msr);
}
+#endif
+#if K8_REV_F_SUPPORT == 0
if (!is_cpu_pre_e0())
+#endif
{
/* Erratum 110 ... */
msr = rdmsr_amd(CPU_ID_EXT_FEATURES_MSR);
@@ -420,8 +453,95 @@ static inline void k8_errata(void)
msr.lo |= 1 << 6;
wrmsr(HWCR_MSR, msr);
+#if K8_REV_F_SUPPORT == 1
+ /* Erratum 131... */
+ msr = rdmsr(NB_CFG_MSR);
+ msr.lo |= 1 << 20;
+ wrmsr(NB_CFG_MSR, msr);
+#endif
+
}
+#if K8_REV_F_SUPPORT == 1
+static void amd_set_name_string_f(device_t dev)
+{
+ unsigned socket;
+ unsigned cmpCap;
+ unsigned pwrLmt;
+ unsigned brandId;
+ unsigned brandTableIndex;
+ unsigned nN;
+ unsigned unknown = 1;
+
+ uint8_t str[48];
+ uint32_t *p;
+
+ msr_t msr;
+ unsigned i;
+
+ brandId = cpuid_ebx(0x80000001) & 0xffff;
+
+ printk_debug("brandId=%04x\n", brandId);
+ pwrLmt = ((brandId>>14) & 1) | ((brandId>>5) & 0x0e);
+ brandTableIndex = (brandId>>9) & 0x1f;
+ nN = (brandId & 0x3f) | ((brandId>>(15-6)) &(1<<6));
+
+ socket = (dev->device >> 4) & 0x3;
+
+ cmpCap = cpuid_ecx(0x80000008) & 0xff;
+
+
+ if((brandTableIndex == 0) && (pwrLmt == 0)) {
+ memset(str, 0, 48);
+ sprintf(str, "AMD Engineering Sample");
+ unknown = 0;
+ } else {
+
+ memset(str, 0, 48);
+ sprintf(str, "AMD Processor model unknown");
+
+ #if CPU_SOCKET_TYPE == 0x10
+ if(socket == 0x01) { // socket F
+ if ((cmpCap == 1) && ((brandTableIndex==0) ||(brandTableIndex ==1) ||(brandTableIndex == 4)) ) {
+ uint8_t pc[2];
+ unknown = 0;
+ switch (pwrLmt) {
+ case 2: pc[0]= 'E'; pc[1] = 'E'; break;
+ case 6: pc[0]= 'H'; pc[1] = 'E'; break;
+ case 0xa: pc[0]= ' '; pc[1] = ' '; break;
+ case 0xc: pc[0]= 'S'; pc[1] = 'E'; break;
+ default: unknown = 1;
+
+ }
+ if(!unknown) {
+ memset(str, 0, 48);
+ sprintf(str, "Dual-Core AMD Opteron(tm) Processor %1d2%2d %c%c", brandTableIndex<<1, (nN-1)&0x3f, pc[0], pc[1]);
+ }
+ }
+ }
+ #else
+ #if CPU_SOCKET_TYPE == 0x11
+ if(socket == 0x00) { // socket AM2
+ if(cmpCap == 0) {
+ sprintf(str, "Athlon 64");
+ } else {
+ sprintf(str, "Athlon 64 Dual Core");
+ }
+
+ }
+ #endif
+ #endif
+ }
+
+ p = str;
+ for(i=0;i<6;i++) {
+ msr.lo = *p; p++; msr.hi = *p; p++;
+ wrmsr(0xc0010030+i, msr);
+ }
+
+
+}
+#endif
extern void model_fxx_update_microcode(unsigned cpu_deviceid);
int init_processor_name(void);
@@ -435,6 +555,16 @@ void model_fxx_init(device_t dev)
unsigned siblings;
#endif
+#if K8_REV_F_SUPPORT == 1
+ struct cpuinfo_x86 c;
+
+ get_fms(&c, dev->device);
+
+ if((c.x86_model & 0xf0) == 0x40) {
+ amd_set_name_string_f(dev);
+ }
+#endif
+
/* Turn on caching if we haven't already */
x86_enable_cache();
amd_setup_mtrrs();
@@ -504,6 +634,7 @@ static struct device_operations cpu_dev_ops = {
.init = model_fxx_init,
};
static struct cpu_device_id cpu_table[] = {
+#if K8_REV_F_SUPPORT == 0
{ X86_VENDOR_AMD, 0xf50 }, /* B3 */
{ X86_VENDOR_AMD, 0xf51 }, /* SH7-B3 */
{ X86_VENDOR_AMD, 0xf58 }, /* SH7-C0 */
@@ -540,6 +671,25 @@ static struct cpu_device_id cpu_table[] = {
{ X86_VENDOR_AMD, 0x20fc2 },
{ X86_VENDOR_AMD, 0x20f12 }, /* JH-E6 */
{ X86_VENDOR_AMD, 0x20f32 },
+#endif
+
+#if K8_REV_F_SUPPORT == 1
+//AMD_F0_SUPPORT
+ { X86_VENDOR_AMD, 0x40f50 }, /* SH-F0 Socket F (1207): Opteron */
+ { X86_VENDOR_AMD, 0x40f70 }, /* AM2: Athlon64/Athlon64 FX */
+ { X86_VENDOR_AMD, 0x40f40 }, /* S1g1: Mobile Athlon64 */
+ { X86_VENDOR_AMD, 0x40f11 }, /* JH-F1 Socket F (1207): Opteron Dual Core */
+ { X86_VENDOR_AMD, 0x40f31 }, /* AM2: Athlon64 x2/Athlon64 FX Dual Core */
+ { X86_VENDOR_AMD, 0x40f01 }, /* S1g1: Mobile Athlon64 */
+ { X86_VENDOR_AMD, 0x40f12 }, /* JH-F2 Socket F (1207): Opteron Dual Core */
+ { X86_VENDOR_AMD, 0x40f32 }, /* AM2 : Opteron Dual Core/Athlon64 x2/ Athlon64 FX Dual Core */
+ { X86_VENDOR_AMD, 0x40fb2 }, /* BH-F2 Socket AM2:Athlon64 x2/ Mobile Athlon64 x2 */
+ { X86_VENDOR_AMD, 0x40f82 }, /* S1g1:Turion64 x2 */
+ { X86_VENDOR_AMD, 0x40ff2 }, /* DH-F2 Socket AM2: Athlon64 */
+ { X86_VENDOR_AMD, 0x40fc2 }, /* S1g1:Turion64 */
+ { X86_VENDOR_AMD, 0x40f13 }, /* JH-F3 Socket F (1207): Opteron Dual Core */
+ { X86_VENDOR_AMD, 0x40f33 }, /* AM2 : Opteron Dual Core/Athlon64 x2/ Athlon64 FX Dual Core */
+#endif
{ 0, 0 },
};
diff --git a/src/cpu/amd/model_fxx/model_fxx_update_microcode.c b/src/cpu/amd/model_fxx/model_fxx_update_microcode.c
index 63820f5bed..e210846479 100644
--- a/src/cpu/amd/model_fxx/model_fxx_update_microcode.c
+++ b/src/cpu/amd/model_fxx/model_fxx_update_microcode.c
@@ -1,5 +1,5 @@
/* Copyright 2005 AMD
- * 2005.08 yhlu add microcode support
+ * 2005.08 yhlu add microcode support
*/
/*============================================================================
Copyright 2005 ADVANCED MICRO DEVICES, INC. All Rights Reserved.
@@ -52,10 +52,15 @@ $1.0$
static uint8_t microcode_updates[] __attribute__ ((aligned(16))) = {
-#include "microcode_rev_c.h"
-#include "microcode_rev_d.h"
-#include "microcode_rev_e.h"
+#if K8_REV_F_SUPPORT == 0
+ #include "microcode_rev_c.h"
+ #include "microcode_rev_d.h"
+ #include "microcode_rev_e.h"
+#endif
+#if K8_REV_F_SUPPORT == 1
+// #include "microcode_rev_f.h"
+#endif
/* Dummy terminator */
0x0, 0x0, 0x0, 0x0,
0x0, 0x0, 0x0, 0x0,
@@ -65,6 +70,7 @@ static uint8_t microcode_updates[] __attribute__ ((aligned(16))) = {
static unsigned get_equivalent_processor_rev_id(unsigned orig_id) {
static unsigned id_mapping_table[] = {
+ #if K8_REV_F_SUPPORT == 0
0x0f48, 0x0048,
0x0f58, 0x0048,
@@ -85,6 +91,11 @@ static unsigned get_equivalent_processor_rev_id(unsigned orig_id) {
0x20f12, 0x0210,
0x20f32, 0x0210,
0x20fb1, 0x0210,
+ #endif
+
+ #if K8_REV_F_SUPPORT == 1
+
+ #endif
};
diff --git a/src/cpu/amd/model_gx2/vsmsetup.c b/src/cpu/amd/model_gx2/vsmsetup.c
index 61cc915488..add010b790 100644
--- a/src/cpu/amd/model_gx2/vsmsetup.c
+++ b/src/cpu/amd/model_gx2/vsmsetup.c
@@ -254,6 +254,7 @@ void do_vsmbios(void)
unsigned char *buf;
unsigned int size = SMM_SIZE*1024;
int i;
+ unsigned long ilen, olen;
printk_err("do_vsmbios\n");
/* clear vsm bios data area */
@@ -273,7 +274,8 @@ void do_vsmbios(void)
rom = ((unsigned long) 0) - (ROM_SIZE + 64*1024);
buf = (unsigned char *) 0x60000;
- unrv2b((uint8_t *)rom, buf);
+ olen = unrv2b((uint8_t *)rom, buf, &ilen);
+ printk_debug("buf ilen %d olen%d\n", ilen, olen);
printk_debug("buf %p *buf %d buf[256k] %d\n",
buf, buf[0], buf[SMM_SIZE*1024]);
printk_debug("buf[0x20] signature is %x:%x:%x:%x\n",
diff --git a/src/cpu/amd/model_lx/vsmsetup.c b/src/cpu/amd/model_lx/vsmsetup.c
index 3edced36e3..0a5f792b49 100644
--- a/src/cpu/amd/model_lx/vsmsetup.c
+++ b/src/cpu/amd/model_lx/vsmsetup.c
@@ -295,6 +295,7 @@ void do_vsmbios(void)
unsigned char *buf;
unsigned int size = SMM_SIZE*1024;
int i;
+ unsigned long ilen, olen;
printk_err("do_vsmbios\n");
/* clear vsm bios data area */
@@ -316,7 +317,8 @@ void do_vsmbios(void)
rom = 0xfffc8000;
buf = (unsigned char *) VSA2_BUFFER;
- unrv2b((uint8_t *)rom, buf);
+ olen = unrv2b((uint8_t *)rom, buf, &ilen);
+ printk_debug("buf ilen %d olen%d\n", ilen, olen);
printk_debug("buf %p *buf %d buf[256k] %d\n",
buf, buf[0], buf[SMM_SIZE*1024]);
printk_debug("buf[0x20] signature is %x:%x:%x:%x\n",
diff --git a/src/cpu/amd/mtrr/amd_earlymtrr.c b/src/cpu/amd/mtrr/amd_earlymtrr.c
index f2de79102b..948d4ac83b 100644
--- a/src/cpu/amd/mtrr/amd_earlymtrr.c
+++ b/src/cpu/amd/mtrr/amd_earlymtrr.c
@@ -15,7 +15,6 @@ static void do_amd_early_mtrr_init(const unsigned long *mtrr_msrs)
*/
msr_t msr;
const unsigned long *msr_addr;
- unsigned long cr0;
#if 0
/* Enable the access to AMD RdDram and WrDram extension bits */
msr = rdmsr(SYSCFG_MSR);
diff --git a/src/cpu/amd/mtrr/amd_mtrr.c b/src/cpu/amd/mtrr/amd_mtrr.c
index b422f9f425..e8e9273868 100644
--- a/src/cpu/amd/mtrr/amd_mtrr.c
+++ b/src/cpu/amd/mtrr/amd_mtrr.c
@@ -149,7 +149,7 @@ void amd_setup_mtrrs(void)
msr.lo = state.mmio_basek << 10;
wrmsr(TOP_MEM, msr);
- if(state.tomk>(4*1024*1024)) {
+ if(state.tomk > (4*1024*1024)) {
/* Setup TOP_MEM2 */
msr.hi = state.tomk >> 22;
msr.lo = state.tomk << 10;
@@ -180,7 +180,7 @@ void amd_setup_mtrrs(void)
/* FIXME we should probably query the cpu for this
* but so far this is all any recent AMD cpu has supported.
*/
- address_bits = 40;
+ address_bits = CPU_ADDR_BITS; //K8 could be 40, and GH could be 48
/* Now that I have mapped what is memory and what is not
* Setup the mtrrs so we can cache the memory.
diff --git a/src/cpu/amd/socket_AM2/Config.lb b/src/cpu/amd/socket_AM2/Config.lb
new file mode 100644
index 0000000000..4b12629ad7
--- /dev/null
+++ b/src/cpu/amd/socket_AM2/Config.lb
@@ -0,0 +1,19 @@
+uses CONFIG_CHIP_NAME
+uses K8_REV_F_SUPPORT
+uses K8_HT_FREQ_1G_SUPPORT
+uses DIMM_SUPPORT
+uses CPU_SOCKET_TYPE
+
+if CONFIG_CHIP_NAME
+ config chip.h
+end
+
+default K8_REV_F_SUPPORT=1
+#Opteron K8 1G HT Support
+default K8_HT_FREQ_1G_SUPPORT=1
+default DIMM_SUPPORT=0x0004 #DDR2 unbuffered
+default CPU_SOCKET_TYPE=0x11
+
+object socket_AM2.o
+
+dir /cpu/amd/model_fxx
diff --git a/src/cpu/amd/socket_AM2/chip.h b/src/cpu/amd/socket_AM2/chip.h
new file mode 100644
index 0000000000..6b3290c90e
--- /dev/null
+++ b/src/cpu/amd/socket_AM2/chip.h
@@ -0,0 +1,4 @@
+extern struct chip_operations cpu_amd_socket_AM2_ops;
+
+struct cpu_amd_socket_AM2_config {
+};
diff --git a/src/cpu/amd/socket_AM2/socket_AM2.c b/src/cpu/amd/socket_AM2/socket_AM2.c
new file mode 100644
index 0000000000..0fd649d984
--- /dev/null
+++ b/src/cpu/amd/socket_AM2/socket_AM2.c
@@ -0,0 +1,6 @@
+#include <device/device.h>
+#include "chip.h"
+
+struct chip_operations cpu_amd_socket_AM2_ops = {
+ CHIP_NAME("socket AM2")
+};
diff --git a/src/cpu/amd/socket_F/Config.lb b/src/cpu/amd/socket_F/Config.lb
new file mode 100644
index 0000000000..7406391a52
--- /dev/null
+++ b/src/cpu/amd/socket_F/Config.lb
@@ -0,0 +1,19 @@
+uses CONFIG_CHIP_NAME
+uses K8_REV_F_SUPPORT
+uses K8_HT_FREQ_1G_SUPPORT
+uses DIMM_SUPPORT
+uses CPU_SOCKET_TYPE
+
+if CONFIG_CHIP_NAME
+ config chip.h
+end
+
+default K8_REV_F_SUPPORT=1
+#Opteron K8 1G HT Support
+default K8_HT_FREQ_1G_SUPPORT=1
+default DIMM_SUPPORT=0x0104 #DDR2 and REG
+default CPU_SOCKET_TYPE=0x10
+
+object socket_F.o
+
+dir /cpu/amd/model_fxx
diff --git a/src/cpu/amd/socket_F/chip.h b/src/cpu/amd/socket_F/chip.h
new file mode 100644
index 0000000000..ce2fde0bff
--- /dev/null
+++ b/src/cpu/amd/socket_F/chip.h
@@ -0,0 +1,4 @@
+extern struct chip_operations cpu_amd_socket_F_ops;
+
+struct cpu_amd_socket_F_config {
+};
diff --git a/src/cpu/amd/socket_F/socket_F.c b/src/cpu/amd/socket_F/socket_F.c
new file mode 100644
index 0000000000..a4cbefe3ac
--- /dev/null
+++ b/src/cpu/amd/socket_F/socket_F.c
@@ -0,0 +1,6 @@
+#include <device/device.h>
+#include "chip.h"
+
+struct chip_operations cpu_amd_socket_F_ops = {
+ CHIP_NAME("socket F")
+};
diff --git a/src/cpu/x86/car/copy_and_run.c b/src/cpu/x86/car/copy_and_run.c
index 5c61177a10..4000a09e18 100644
--- a/src/cpu/x86/car/copy_and_run.c
+++ b/src/cpu/x86/car/copy_and_run.c
@@ -44,7 +44,7 @@ static void copy_and_run(unsigned cpu_reset)
// dump_mem(src, src+0x100);
- olen=unrv2b(src, dst);
+ olen = unrv2b(src, dst, &ilen);
#endif
// dump_mem(dst, dst+0x100);
diff --git a/src/cpu/x86/lapic/lapic.c b/src/cpu/x86/lapic/lapic.c
index 8282890bf7..8c111e8d71 100644
--- a/src/cpu/x86/lapic/lapic.c
+++ b/src/cpu/x86/lapic/lapic.c
@@ -55,7 +55,7 @@ void setup_lapic(void)
LAPIC_DELIVERY_MODE_NMI)
);
- printk_debug(" apic_id: %d ", lapicid());
+ printk_debug(" apic_id: 0x%02x ", lapicid());
#else /* !NEED_LLAPIC */
/* Only Pentium Pro and later have those MSR stuff */
diff --git a/src/cpu/x86/lapic/lapic_cpu_init.c b/src/cpu/x86/lapic/lapic_cpu_init.c
index 4b5acad2c4..1adafc8d18 100644
--- a/src/cpu/x86/lapic/lapic_cpu_init.c
+++ b/src/cpu/x86/lapic/lapic_cpu_init.c
@@ -322,7 +322,7 @@ static void start_other_cpus(struct bus *cpu_bus, device_t bsp_cpu)
if (!start_cpu(cpu)) {
/* Record the error in cpu? */
- printk_err("CPU %u would not start!\n",
+ printk_err("CPU 0x%02x would not start!\n",
cpu->path.u.apic.apic_id);
}
#if SERIAL_CPU_INIT == 1
@@ -354,7 +354,7 @@ static void wait_other_cpus_stop(struct bus *cpu_bus)
continue;
}
if (!cpu->initialized) {
- printk_err("CPU %u did not initialize!\n",
+ printk_err("CPU 0x%02x did not initialize!\n",
cpu->path.u.apic.apic_id);
#warning "FIXME do I need a mainboard_cpu_fixup function?"
}
@@ -366,6 +366,10 @@ static void wait_other_cpus_stop(struct bus *cpu_bus)
#define initialize_other_cpus(root) do {} while(0)
#endif /* CONFIG_SMP */
+#if WAIT_BEFORE_CPUS_INIT==0
+ #define cpus_ready_for_init() do {} while(0)
+#endif
+
void initialize_cpus(struct bus *cpu_bus)
{
struct device_path cpu_path;
@@ -394,6 +398,8 @@ void initialize_cpus(struct bus *cpu_bus)
copy_secondary_start_to_1m_below(); // why here? In case some day we can start core1 in amd_sibling_init
#endif
+ cpus_ready_for_init();
+
#if CONFIG_SMP == 1
#if SERIAL_CPU_INIT == 0
/* start all aps at first, so we can init ECC all together */
@@ -407,7 +413,6 @@ void initialize_cpus(struct bus *cpu_bus)
#if CONFIG_SMP == 1
#if SERIAL_CPU_INIT == 1
- /* start all aps */
start_other_cpus(cpu_bus, info->cpu);
#endif
diff --git a/src/cpu/x86/mtrr/earlymtrr.c b/src/cpu/x86/mtrr/earlymtrr.c
index aea8e258d4..1c00bd7dcc 100644
--- a/src/cpu/x86/mtrr/earlymtrr.c
+++ b/src/cpu/x86/mtrr/earlymtrr.c
@@ -47,10 +47,29 @@ static void set_var_mtrr(
basem.hi = 0;
wrmsr(MTRRphysBase_MSR(reg), basem);
maskm.lo = ~(size - 1) | 0x800;
- maskm.hi = 0x0f;
+ maskm.hi = (1<<(CPU_ADDR_BITS-32))-1;
wrmsr(MTRRphysMask_MSR(reg), maskm);
}
+static void set_var_mtrr_x(
+ unsigned reg, uint32_t base_lo, uint32_t base_hi, uint32_t size_lo, uint32_t size_hi, unsigned type)
+
+{
+ /* Bit Bit 32-35 of MTRRphysMask should be set to 1 */
+ msr_t basem, maskm;
+ basem.lo = (base_lo & 0xfffff000) | type;
+ basem.hi = base_hi & ((1<<(CPU_ADDR_BITS-32))-1);
+ wrmsr(MTRRphysBase_MSR(reg), basem);
+ maskm.hi = (1<<(CPU_ADDR_BITS-32))-1;
+ if(size_lo) {
+ maskm.lo = ~(size_lo - 1) | 0x800;
+ } else {
+ maskm.lo = 0x800;
+ maskm.hi &= ~(size_hi - 1);
+ }
+ wrmsr(MTRRphysMask_MSR(reg), maskm);
+}
+
static void cache_lbmem(int type)
{
/* Enable caching for 0 - 1MB using variable mtrr */
@@ -70,7 +89,6 @@ static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
*/
msr_t msr;
const unsigned long *msr_addr;
- unsigned long cr0;
/* Inialize all of the relevant msrs to 0 */
msr.lo = 0;
diff --git a/src/cpu/x86/mtrr/mtrr.c b/src/cpu/x86/mtrr/mtrr.c
index 1226713cf5..101d11d5e2 100644
--- a/src/cpu/x86/mtrr/mtrr.c
+++ b/src/cpu/x86/mtrr/mtrr.c
@@ -70,6 +70,25 @@ static void set_var_mtrr(
msr_t base, mask;
unsigned address_mask_high;
+ if (reg >= 8)
+ return;
+
+ // it is recommended that we disable and enable cache when we
+ // do this.
+ if (sizek == 0) {
+ disable_cache();
+
+ msr_t zero;
+ zero.lo = zero.hi = 0;
+ /* The invalid bit is kept in the mask, so we simply clear the
+ relevant mask register to disable a range. */
+ wrmsr (MTRRphysMask_MSR(reg), zero);
+
+ enable_cache();
+ return;
+ }
+
+
address_mask_high = ((1u << (address_bits - 32u)) - 1u);
base.hi = basek >> 22;
@@ -86,25 +105,16 @@ static void set_var_mtrr(
mask.lo = 0;
}
- if (reg >= 8)
- return;
-
// it is recommended that we disable and enable cache when we
// do this.
disable_cache();
- if (sizek == 0) {
- msr_t zero;
- zero.lo = zero.hi = 0;
- /* The invalid bit is kept in the mask, so we simply clear the
- relevant mask register to disable a range. */
- wrmsr (MTRRphysMask_MSR(reg), zero);
- } else {
- /* Bit 32-35 of MTRRphysMask should be set to 1 */
- base.lo |= type;
- mask.lo |= 0x800;
- wrmsr (MTRRphysBase_MSR(reg), base);
- wrmsr (MTRRphysMask_MSR(reg), mask);
- }
+
+ /* Bit 32-35 of MTRRphysMask should be set to 1 */
+ base.lo |= type;
+ mask.lo |= 0x800;
+ wrmsr (MTRRphysBase_MSR(reg), base);
+ wrmsr (MTRRphysMask_MSR(reg), mask);
+
enable_cache();
}