summaryrefslogtreecommitdiff
path: root/src/northbridge
diff options
context:
space:
mode:
authorJason Schildt <jschildt@gmail.com>2005-10-25 21:24:23 +0000
committerJason Schildt <jschildt@gmail.com>2005-10-25 21:24:23 +0000
commit8b26cab08f11ff1d5daa517ee04bdf9ceb5ddc60 (patch)
tree3725f853a5e56a503ecc8f2ae0605c3c4e3c1d1a /src/northbridge
parent6a2c09a386970257188824f8901706c5579d5b50 (diff)
downloadcoreboot-8b26cab08f11ff1d5daa517ee04bdf9ceb5ddc60.tar.xz
- See Issue Tracker id-4 "lnxi-patch-4"
- In addition: modified apic_id lifting to always lift all CPUs. This may cause problems with older kernels. git-svn-id: svn://svn.coreboot.org/coreboot/trunk@2068 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src/northbridge')
-rw-r--r--src/northbridge/amd/amdk8/amdk8.h1
-rw-r--r--src/northbridge/amd/amdk8/coherent_ht.c89
-rw-r--r--src/northbridge/amd/amdk8/northbridge.c211
-rw-r--r--src/northbridge/amd/amdk8/raminit.c147
4 files changed, 184 insertions, 264 deletions
diff --git a/src/northbridge/amd/amdk8/amdk8.h b/src/northbridge/amd/amdk8/amdk8.h
index ca8e8dc3d2..89c03fc16b 100644
--- a/src/northbridge/amd/amdk8/amdk8.h
+++ b/src/northbridge/amd/amdk8/amdk8.h
@@ -136,6 +136,7 @@
#define DCL_DisInRcvrs (1<<24)
#define DCL_BypMax_SHIFT 25
#define DCL_En2T (1<<28)
+#define DCL_UpperCSMap (1<<29)
#define DRAM_CONFIG_HIGH 0x94
#define DCH_ASYNC_LAT_SHIFT 0
#define DCH_ASYNC_LAT_MASK 0xf
diff --git a/src/northbridge/amd/amdk8/coherent_ht.c b/src/northbridge/amd/amdk8/coherent_ht.c
index c79a432ab5..db646a5265 100644
--- a/src/northbridge/amd/amdk8/coherent_ht.c
+++ b/src/northbridge/amd/amdk8/coherent_ht.c
@@ -155,23 +155,6 @@ static void disable_probes(void)
}
-#ifndef ENABLE_APIC_EXT_ID
-#define ENABLE_APIC_EXT_ID 0
-#endif
-
-static void enable_apic_ext_id(u8 node)
-{
-#if ENABLE_APIC_EXT_ID==1
-#warning "FIXME Is the right place to enable apic ext id here?"
-
- u32 val;
-
- val = pci_read_config32(NODE_HT(node), 0x68);
- val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
- pci_write_config32(NODE_HT(node), 0x68, val);
-#endif
-}
-
static void enable_routing(u8 node)
{
u32 val;
@@ -292,20 +275,18 @@ static int verify_connection(u8 dest)
return 1;
}
-static uint16_t read_freq_cap(device_t dev, uint8_t pos)
+static unsigned read_freq_cap(device_t dev, unsigned pos)
{
/* Handle bugs in valid hypertransport frequency reporting */
- uint16_t freq_cap;
+ unsigned freq_cap;
uint32_t id;
freq_cap = pci_read_config16(dev, pos);
freq_cap &= ~(1 << HT_FREQ_VENDOR); /* Ignore Vendor HT frequencies */
-#if K8_HT_FREQ_1G_SUPPORT == 1
if (!is_cpu_pre_e0()) {
return freq_cap;
}
-#endif
id = pci_read_config32(dev, 0);
@@ -339,8 +320,10 @@ static int optimize_connection(device_t node1, uint8_t link1, device_t node2, ui
/* See if I am changing the link freqency */
old_freq = pci_read_config8(node1, link1 + PCI_HT_CAP_HOST_FREQ);
+ old_freq &= 0x0f;
needs_reset |= old_freq != freq;
old_freq = pci_read_config8(node2, link2 + PCI_HT_CAP_HOST_FREQ);
+ old_freq &= 0x0f;
needs_reset |= old_freq != freq;
/* Set the Calulcated link frequency */
@@ -382,7 +365,6 @@ static int optimize_connection(device_t node1, uint8_t link1, device_t node2, ui
/* Set node2's widths */
pci_write_config8(node2, link2 + PCI_HT_CAP_HOST_WIDTH + 1, width);
-
return needs_reset;
}
@@ -1625,9 +1607,9 @@ static void clear_dead_routes(unsigned nodes)
}
#endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
-#if CONFIG_LOGICAL_CPUS==1
-static unsigned verify_dualcore(unsigned nodes)
+static unsigned count_cpus(unsigned nodes)
{
+#if CONFIG_LOGICAL_CPUS==1
unsigned node, totalcpus, tmp;
totalcpus = 0;
@@ -1637,25 +1619,21 @@ static unsigned verify_dualcore(unsigned nodes)
}
return totalcpus;
+#else
+ return nodes;
+#endif
}
-#endif
static void coherent_ht_finalize(unsigned nodes)
{
+ unsigned total_cpus;
+ unsigned cpu_node_count;
unsigned node;
int rev_a0;
-#if CONFIG_LOGICAL_CPUS==1
- unsigned total_cpus;
+ total_cpus = count_cpus(nodes);
+ cpu_node_count = ((total_cpus -1)<<16)|((nodes - 1) << 4);
- if(read_option(CMOS_VSTART_dual_core, CMOS_VLEN_dual_core, 0) == 0) { /* dual_core */
- total_cpus = verify_dualcore(nodes);
- }
- else {
- total_cpus = nodes;
- }
-#endif
-
/* set up cpu count and node count and enable Limit
* Config Space Range for all available CPUs.
* Also clear non coherent hypertransport bus range
@@ -1672,11 +1650,7 @@ static void coherent_ht_finalize(unsigned nodes)
/* Set the Total CPU and Node count in the system */
val = pci_read_config32(dev, 0x60);
val &= (~0x000F0070);
-#if CONFIG_LOGICAL_CPUS==1
- val |= ((total_cpus-1)<<16)|((nodes-1)<<4);
-#else
- val |= ((nodes-1)<<16)|((nodes-1)<<4);
-#endif
+ val |= cpu_node_count;
pci_write_config32(dev, 0x60, val);
/* Only respond to real cpu pci configuration cycles
@@ -1786,6 +1760,33 @@ static int optimize_link_read_pointers(unsigned nodes, int needs_reset)
return needs_reset;
}
+static void startup_other_cores(unsigned nodes)
+{
+ unsigned node;
+ for(node = 0; node < nodes; node++) {
+ device_t dev;
+ unsigned siblings;
+ dev = NODE_MC(node);
+ siblings = (pci_read_config32(dev, 0xe8) >> 12) & 0x3;
+
+ if (siblings) {
+ device_t dev_f0;
+ unsigned val;
+ /* Redirect all MC4 accesses and error logging to core0 */
+ val = pci_read_config32(dev, 0x44);
+ val |= (1 << 27); //NbMcaToMstCpuEn bit
+ pci_write_config32(dev, 0x44, val);
+
+ /* Enable the second core */
+ dev_f0 = NODE_HT(node);
+ val = pci_read_config32(dev_f0, 0x68);
+ val |= ( 1 << 5);
+ pci_write_config32(dev_f0, 0x68, val);
+ }
+ }
+}
+
+
static int setup_coherent_ht_domain(void)
{
struct setup_smp_result result;
@@ -1799,15 +1800,15 @@ static int setup_coherent_ht_domain(void)
enable_bsp_routing();
#if CONFIG_MAX_PHYSICAL_CPUS > 1
- result = setup_smp();
- result.nodes = verify_mp_capabilities(result.nodes);
- clear_dead_routes(result.nodes);
+ result = setup_smp();
#endif
-
+ result.nodes = verify_mp_capabilities(result.nodes);
+ clear_dead_routes(result.nodes);
if (result.nodes == 1) {
setup_uniprocessor();
}
coherent_ht_finalize(result.nodes);
+ startup_other_cores(result.nodes);
result.needs_reset = apply_cpu_errata_fixes(result.nodes, result.needs_reset);
result.needs_reset = optimize_link_read_pointers(result.nodes, result.needs_reset);
return result.needs_reset;
diff --git a/src/northbridge/amd/amdk8/northbridge.c b/src/northbridge/amd/amdk8/northbridge.c
index e45aff8242..7ef1266130 100644
--- a/src/northbridge/amd/amdk8/northbridge.c
+++ b/src/northbridge/amd/amdk8/northbridge.c
@@ -17,9 +17,9 @@
#include <cpu/cpu.h>
#include <cpu/x86/lapic.h>
+#include <cpu/amd/dualcore.h>
#if CONFIG_LOGICAL_CPUS==1
-#include <cpu/amd/dualcore.h>
#include <pc80/mc146818rtc.h>
#endif
@@ -27,10 +27,7 @@
#include "root_complex/chip.h"
#include "northbridge.h"
#include "amdk8.h"
-
-#if K8_E0_MEM_HOLE_SIZEK != 0
-#include "./cpu_rev.c"
-#endif
+#include "cpu_rev.c"
#define FX_DEVS 8
static device_t __f0_dev[FX_DEVS];
@@ -640,6 +637,41 @@ static uint32_t find_pci_tolm(struct bus *bus)
return tolm;
}
+static uint32_t hoist_memory(unsigned long mmio_basek, int i)
+{
+ int ii;
+ uint32_t carry_over;
+ device_t dev;
+ uint32_t base, limit;
+ uint32_t basek;
+ uint32_t hoist;
+
+ carry_over = (4*1024*1024) - mmio_basek;
+ for(ii=7;ii>i;ii--) {
+
+ base = f1_read_config32(0x40 + (ii << 3));
+ limit = f1_read_config32(0x44 + (ii << 3));
+ if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
+ continue;
+ }
+ f1_write_config32(0x44 + (ii << 3),limit + (carry_over << 2));
+ f1_write_config32(0x40 + (ii << 3),base + (carry_over << 2));
+ }
+ limit = f1_read_config32(0x44 + (i << 3));
+ f1_write_config32(0x44 + (i << 3),limit + (carry_over << 2));
+ dev = __f1_dev[i];
+ base = pci_read_config32(dev, 0x40 + (i << 3));
+ basek = (pci_read_config32(dev, 0x40 + (i << 3)) & 0xffff0000) >> 2;
+ hoist = /* hole start address */
+ ((mmio_basek << 10) & 0xff000000) +
+ /* hole address to memory controller address */
+ (((basek + carry_over) >> 6) & 0x0000ff00) +
+ /* enable */
+ 1;
+ pci_write_config32(dev, 0xf0, hoist);
+ return carry_over;
+}
+
static void pci_domain_set_resources(device_t dev)
{
unsigned long mmio_basek;
@@ -648,41 +680,23 @@ static void pci_domain_set_resources(device_t dev)
pci_tolm = find_pci_tolm(&dev->link[0]);
+ /* Work around for NUMA bug in all kernels before 2.6.13.
+ If pci memory hole is too small, the kernel memory to NUMA
+ node mapping will fail to initialize and system will run in
+ non-NUMA mode.
+ */
+ if(pci_tolm > 0xf8000000) pci_tolm = 0xf8000000;
+
#warning "FIXME handle interleaved nodes"
mmio_basek = pci_tolm >> 10;
/* Round mmio_basek to something the processor can support */
mmio_basek &= ~((1 << 6) -1);
-#if 1
-#warning "FIXME improve mtrr.c so we don't use up all of the mtrrs with a 64M MMIO hole"
- /* Round the mmio hold to 64M */
- mmio_basek &= ~((64*1024) - 1);
-#endif
-
-#if K8_E0_MEM_HOLE_SIZEK != 0
- if (!is_cpu_pre_e0())
- for (i = 0; i < 8; i++) {
- uint32_t base;
- base = f1_read_config32(0x40 + (i << 3));
- if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
- continue;
- }
-
- base = pci_read_config32(__f1_dev[i], 0xf0);
- if((base & 1)==0) continue;
- base &= 0xff<<24;
- base >>= 10;
- if (mmio_basek > base) {
- mmio_basek = base;
- }
- break; // only one hole
- }
-#endif
-
idx = 10;
for(i = 0; i < 8; i++) {
uint32_t base, limit;
unsigned basek, limitk, sizek;
+
base = f1_read_config32(0x40 + (i << 3));
limit = f1_read_config32(0x44 + (i << 3));
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
@@ -708,6 +722,9 @@ static void pci_domain_set_resources(device_t dev)
pre_sizek = mmio_basek - basek;
ram_resource(dev, idx++, basek, pre_sizek);
sizek -= pre_sizek;
+ if(! is_cpu_pre_e0() ) {
+ sizek += hoist_memory(mmio_basek,i);
+ }
basek = mmio_basek;
}
if ((basek + sizek) <= 4*1024*1024) {
@@ -767,54 +784,16 @@ static struct device_operations pci_domain_ops = {
.ops_pci_bus = &pci_cf8_conf1,
};
-#define APIC_ID_OFFSET 0x10
-
static unsigned int cpu_bus_scan(device_t dev, unsigned int max)
{
struct bus *cpu_bus;
device_t dev_mc;
- int bsp_apic_id;
- int apic_id_offset;
int i,j;
- unsigned nb_cfg_54;
- int enable_apic_ext_id;
- unsigned siblings;
-#if CONFIG_LOGICAL_CPUS == 1
- int e0_later_single_core;
- int disable_siblings;
-#endif
-
- nb_cfg_54 = 0;
- enable_apic_ext_id = 0;
- siblings = 0;
-
- /* Find the bootstrap processors apicid */
- bsp_apic_id = lapicid();
-
- /* See if I will enable extended ids' */
- apic_id_offset = bsp_apic_id;
-
-#if CONFIG_LOGICAL_CPUS == 1
- disable_siblings = !CONFIG_LOGICAL_CPUS;
- get_option(&disable_siblings, "dual_core");
- // for pre_e0, nb_cfg_54 can not be set, ( even set, when you read it still be 0)
- // How can I get the nb_cfg_54 of every node' nb_cfg_54 in bsp??? and differ d0 and e0 single core
-
- nb_cfg_54 = read_nb_cfg_54();
-#endif
dev_mc = dev_find_slot(0, PCI_DEVFN(0x18, 0));
if (!dev_mc) {
die("0:18.0 not found?");
}
- if (pci_read_config32(dev_mc, 0x68) & (HTTC_APIC_EXT_ID|HTTC_APIC_EXT_BRD_CST))
- {
- enable_apic_ext_id = 1;
- if (apic_id_offset == 0) {
- /* bsp apic id is not changed */
- apic_id_offset = APIC_ID_OFFSET;
- }
- }
/* Find which cpus are present */
cpu_bus = &dev->link[0];
@@ -834,82 +813,36 @@ static unsigned int cpu_bus_scan(device_t dev, unsigned int max)
PCI_DEVFN(0x18 + i, j));
}
}
+
+ /* Build the cpu device path */
+ cpu_path.type = DEVICE_PATH_APIC;
+ cpu_path.u.apic.apic_id = 0x10 + i;
-#if CONFIG_LOGICAL_CPUS == 1
- e0_later_single_core = 0;
- if ((!disable_siblings) && dev && dev->enabled) {
- j = (pci_read_config32(dev, 0xe8) >> 12) & 3; // dev is func 3
- printk_debug(" %s siblings=%d\r\n", dev_path(dev), j);
-
- if(nb_cfg_54) {
- // For e0 single core if nb_cfg_54 is set, apicid will be 0, 2, 4....
- // ----> you can mixed single core e0 and dual core e0 at any sequence
- // That is the typical case
-
- if(j == 0 ){
- e0_later_single_core = is_e0_later_in_bsp(i); // single core
- } else {
- e0_later_single_core = 0;
- }
- if(e0_later_single_core) {
- printk_debug("\tFound e0 single core\r\n");
- j=1;
- }
-
- if(siblings > j ) {
- //actually we can't be here, because d0 nb_cfg_54 can not be set
- //even worse is_e0_later_in_bsp() can not find out if it is d0 or e0
+ /* See if I can find the cpu */
+ cpu = find_dev_path(cpu_bus, &cpu_path);
- die("When NB_CFG_54 is set, if you want to mix e0 (single core and dual core) and single core(pre e0) CPUs, you need to put all the single core (pre e0) CPUs before all the (e0 single or dual core) CPUs\r\n");
- }
- else {
- siblings = j;
- }
- } else {
- siblings = j;
- }
- }
-#endif
-#if CONFIG_LOGICAL_CPUS==1
- for (j = 0; j <= (e0_later_single_core?0:siblings); j++ ) {
-#else
- for (j = 0; j <= siblings; j++ ) {
-#endif
- /* Build the cpu device path */
- cpu_path.type = DEVICE_PATH_APIC;
- cpu_path.u.apic.apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:8);
-
- /* See if I can find the cpu */
- cpu = find_dev_path(cpu_bus, &cpu_path);
-
- /* Enable the cpu if I have the processor */
- if (dev && dev->enabled) {
- if (!cpu) {
- cpu = alloc_dev(cpu_bus, &cpu_path);
- }
- if (cpu) {
- cpu->enabled = 1;
- }
- }
-
- /* Disable the cpu if I don't have the processor */
- if (cpu && (!dev || !dev->enabled)) {
- cpu->enabled = 0;
+ /* Enable the cpu if I have the processor */
+ if (dev && dev->enabled) {
+ if (!cpu) {
+ cpu = alloc_dev(cpu_bus, &cpu_path);
}
-
- /* Report what I have done */
if (cpu) {
- if(enable_apic_ext_id) {
- if(cpu->path.u.apic.apic_id<apic_id_offset) { //all add offset except bsp core0
- if( (cpu->path.u.apic.apic_id > siblings) || (bsp_apic_id!=0) )
- cpu->path.u.apic.apic_id += apic_id_offset;
- }
- }
- printk_debug("CPU: %s %s\n",
- dev_path(cpu), cpu->enabled?"enabled":"disabled");
+ cpu->enabled = 1;
}
- } //j
+ }
+
+ /* Disable the cpu if I don't have the processor */
+ if (cpu && (!dev || !dev->enabled)) {
+ cpu->enabled = 0;
+ }
+
+ /* Report what I have done */
+ if (cpu) {
+ printk_debug("CPU: %s %s\n",
+ dev_path(cpu), cpu->enabled?"enabled":"disabled");
+ }
}
+
return max;
}
diff --git a/src/northbridge/amd/amdk8/raminit.c b/src/northbridge/amd/amdk8/raminit.c
index 5d9c320637..74e432d084 100644
--- a/src/northbridge/amd/amdk8/raminit.c
+++ b/src/northbridge/amd/amdk8/raminit.c
@@ -585,6 +585,16 @@ static void hw_enable_ecc(const struct mem_controller *ctrl)
}
+static void e_step_cpu(const struct mem_controller *ctrl)
+{
+ uint32_t dcl,data32;
+
+ /* set bit 29 (upper cs map) of function 2 offset 0x90 */
+ dcl = pci_read_config32(ctrl->f2, DRAM_CONFIG_LOW);
+ dcl |= DCL_UpperCSMap;
+ pci_write_config32(ctrl->f2, DRAM_CONFIG_LOW, dcl);
+}
+
static int is_dual_channel(const struct mem_controller *ctrl)
{
uint32_t dcl;
@@ -714,28 +724,14 @@ hw_err:
return sz;
}
-static const unsigned cs_map_aa[15] = {
- /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
- 0, 1, 3, 6, 0,
- 0, 2, 4, 7, 9,
- 0, 0, 5, 8,10,
-};
-
static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz, unsigned index)
{
- uint32_t base0, base1, map;
+ uint32_t base0, base1;
uint32_t dch;
if (sz.side1 != sz.side2) {
sz.side2 = 0;
}
- map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
- map &= ~(0xf << (index * 4));
-#if K8_4RANK_DIMM_SUPPORT == 1
- if(sz.rank == 4) {
- map &= ~(0xf << ( (index + 2) * 4));
- }
-#endif
/* For each base register.
* Place the dimm size in 32 MB quantities in the bits 31 - 21.
@@ -747,22 +743,6 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
/* Make certain side1 of the dimm is at least 32MB */
if (sz.side1 >= (25 +3)) {
- if(is_cpu_pre_d0()) {
- map |= (sz.side1 - (25 + 3)) << (index *4);
-#if K8_4RANK_DIMM_SUPPORT == 1
- if(sz.rank == 4) {
- map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
- }
-#endif
- }
- else {
- map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
-#if K8_4RANK_DIMM_SUPPORT == 1
- if(sz.rank == 4) {
- map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
- }
-#endif
- }
base0 = (1 << ((sz.side1 - (25 + 3)) + 21)) | 1;
}
@@ -791,8 +771,6 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
}
#endif
- pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
-
/* Enable the memory clocks for this DIMM */
if (base0) {
dch = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
@@ -806,6 +784,52 @@ static void set_dimm_size(const struct mem_controller *ctrl, struct dimm_size sz
}
}
+
+static void set_dimm_map(const struct mem_controller *ctrl,
+ struct dimm_size sz, unsigned index)
+{
+ static const unsigned cs_map_aa[15] = {
+ /* (row=12, col=8)(14, 12) ---> (0, 0) (2, 4) */
+ 0, 1, 3, 6, 0,
+ 0, 2, 4, 7, 9,
+ 0, 0, 5, 8,10,
+ };
+ uint32_t map;
+ int row,col;
+
+ map = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
+ map &= ~(0xf << (index * 4));
+
+#if K8_4RANK_DIMM_SUPPORT == 1
+ if(sz.rank == 4) {
+ map &= ~(0xf << ( (index + 2) * 4));
+ }
+#endif
+
+ if (is_cpu_pre_d0()) {
+ map |= (sz.side1 - (25 + 3)) << (index *4);
+#if K8_4RANK_DIMM_SUPPORT == 1
+ if(sz.rank == 4) {
+ map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
+ }
+#endif
+ } else {
+ unsigned val;
+ val = cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ];
+ if(val == 0) {
+ print_err("Invalid Column or Row count\r\n");
+ val = 7;
+ }
+ map |= val << (index*4);
+#if K8_4RANK_DIMM_SUPPORT == 1
+ if(sz.rank == 4) {
+ map |= val << ( (index + 2) * 4);
+ }
+#endif
+ }
+ pci_write_config32(ctrl->f2, DRAM_BANK_ADDR_MAP, map);
+}
+
static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
{
int i;
@@ -820,6 +844,7 @@ static long spd_set_ram_size(const struct mem_controller *ctrl, long dimm_mask)
return -1; /* Report SPD error */
}
set_dimm_size(ctrl, sz, i);
+ set_dimm_map(ctrl, sz, i);
}
return dimm_mask;
}
@@ -971,7 +996,7 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
if(is_dual_channel(ctrl)) {
/* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
if ((bits == 3) && (common_size == (1 << (32 - 3)))) {
-// print_debug("8 4GB chip selects cannot be interleaved\r\n");
+ print_spew("8 4GB chip selects cannot be interleaved\r\n");
return 0;
}
csbase_inc <<=1;
@@ -981,7 +1006,7 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
csbase_inc = csbase_low_d0[common_cs_mode];
if(is_dual_channel(ctrl)) {
if( (bits==3) && (common_cs_mode > 8)) {
-// print_debug("8 cs_mode>8 chip selects cannot be interleaved\r\n");
+ print_spew("8 cs_mode>8 chip selects cannot be interleaved\r\n");
return 0;
}
csbase_inc <<=1;
@@ -1100,25 +1125,6 @@ unsigned long memory_end_k(const struct mem_controller *ctrl, int max_node_id)
return end_k;
}
-#if K8_E0_MEM_HOLE_SIZEK != 0
-#define K8_E0_MEM_HOLE_LIMITK 4*1024*1024
-#define K8_E0_MEM_HOLE_BASEK (K8_E0_MEM_HOLE_LIMITK - K8_E0_MEM_HOLE_SIZEK )
-
-static void set_e0_mem_hole(const struct mem_controller *ctrl, unsigned base_k)
-{
- /* Route the addresses to the controller node */
- unsigned val;
-
- val = pci_read_config32(ctrl->f1,0xf0);
-
- val &= 0x00ff00fe;
- val = (K8_E0_MEM_HOLE_BASEK << 10) | ((K8_E0_MEM_HOLE_SIZEK+base_k)>>(16-10)) | 1;
-
- pci_write_config32(ctrl->f1, 0xf0, val);
-}
-
-#endif
-
static void order_dimms(const struct mem_controller *ctrl)
{
unsigned long tom_k, base_k;
@@ -1135,14 +1141,6 @@ static void order_dimms(const struct mem_controller *ctrl)
/* Compute the memory base address */
base_k = memory_end_k(ctrl, ctrl->node_id);
tom_k += base_k;
-#if K8_E0_MEM_HOLE_SIZEK != 0
- if(!is_cpu_pre_e0()) {
- /* See if I need to check the range cover hole */
- if ((base_k <= K8_E0_MEM_HOLE_BASEK) && (tom_k > K8_E0_MEM_HOLE_BASEK)) {
- tom_k += K8_E0_MEM_HOLE_SIZEK;
- }
- }
-#endif
route_dram_accesses(ctrl, base_k, tom_k);
set_top_mem(tom_k);
}
@@ -2145,12 +2143,11 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl)
struct spd_set_memclk_result result;
const struct mem_param *param;
long dimm_mask;
-#if 1
+
if (!controller_present(ctrl)) {
-// print_debug("No memory controller present\r\n");
+ print_debug("No memory controller present\r\n");
return;
}
-#endif
hw_enable_ecc(ctrl);
activate_spd_rom(ctrl);
dimm_mask = spd_detect_dimms(ctrl);
@@ -2176,6 +2173,10 @@ static void sdram_set_spd_registers(const struct mem_controller *ctrl)
if (dimm_mask < 0)
goto hw_spd_err;
order_dimms(ctrl);
+ if( !is_cpu_pre_e0() ) {
+ print_debug("E step CPU\r\n");
+ // e_step_cpu(ctrl); // Socket 939 only.
+ }
return;
hw_spd_err:
/* Unrecoverable error reading SPD data */
@@ -2280,22 +2281,6 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
} while(((dcl & DCL_MemClrStatus) == 0) || ((dcl & DCL_DramEnable) == 0) );
}
- // init e0 mem hole here
-#if K8_E0_MEM_HOLE_SIZEK != 0
- if (!is_cpu_pre_e0()) {
- uint32_t base, limit;
- unsigned base_k, limit_k;
- base = pci_read_config32(ctrl->f1, 0x40 + (i << 3));
- limit = pci_read_config32(ctrl->f1, 0x44 + (i << 3));
- base_k = (base & 0xffff0000) >> 2;
- limit_k = ((limit + 0x00010000) & 0xffff0000) >> 2;
- if ((base_k <= K8_E0_MEM_HOLE_BASEK) && (limit_k > K8_E0_MEM_HOLE_BASEK)) {
- set_e0_mem_hole(ctrl+i, base_k);
- }
- }
-
-#endif
-
print_debug(" done\r\n");
}