summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/northbridge/amd/amdk8/acpi.c26
-rw-r--r--src/northbridge/amd/amdk8/coherent_ht.c124
-rw-r--r--src/northbridge/amd/amdk8/debug.c10
-rw-r--r--src/northbridge/amd/amdk8/early_ht.c4
-rw-r--r--src/northbridge/amd/amdk8/exit_from_self.c2
-rw-r--r--src/northbridge/amd/amdk8/f.h6
-rw-r--r--src/northbridge/amd/amdk8/get_sblk_pci1234.c8
-rw-r--r--src/northbridge/amd/amdk8/incoherent_ht.c72
-rw-r--r--src/northbridge/amd/amdk8/misc_control.c12
-rw-r--r--src/northbridge/amd/amdk8/northbridge.c30
-rw-r--r--src/northbridge/amd/amdk8/raminit.c40
-rw-r--r--src/northbridge/amd/amdk8/raminit_f.c182
-rw-r--r--src/northbridge/amd/amdk8/raminit_f_dqs.c160
-rw-r--r--src/northbridge/amd/amdk8/setup_resource_map.c8
-rw-r--r--src/northbridge/amd/amdk8/util.asl14
15 files changed, 349 insertions, 349 deletions
diff --git a/src/northbridge/amd/amdk8/acpi.c b/src/northbridge/amd/amdk8/acpi.c
index 992a85ed46..906dc0b54a 100644
--- a/src/northbridge/amd/amdk8/acpi.c
+++ b/src/northbridge/amd/amdk8/acpi.c
@@ -101,7 +101,7 @@ static void set_srat_mem(void *gp, struct device *dev, struct resource *res)
*/
if ((basek+sizek)<1024) return;
- if (basek<1024) {
+ if (basek < 1024) {
sizek -= 1024 - basek;
basek = 1024;
}
@@ -158,29 +158,29 @@ static unsigned long acpi_fill_slit(unsigned long current)
p += 8;
#if 0
- for (i=0;i<sysconf.hc_possible_num;i++) {
- if ((sysconf.pci1234[i]&1) !=1 ) continue;
+ for (i = 0; i < sysconf.hc_possible_num; i++) {
+ if ((sysconf.pci1234[i]&1) !=1) continue;
outer_node[(sysconf.pci1234[i] >> 4) & 0xf] = 1; // mark the outer node
}
#endif
- for (i=0;i<nodes;i++) {
- for (j=0;j<nodes; j++) {
- if (i==j) {
+ for (i = 0; i < nodes; i++) {
+ for (j = 0; j < nodes; j++) {
+ if (i == j) {
p[i*nodes+j] = 10;
} else {
#if 0
int k;
u8 latency_factor = 0;
int k_start, k_end;
- if (i<j) {
+ if (i < j) {
k_start = i;
k_end = j;
} else {
k_start = j;
k_end = i;
}
- for (k=k_start;k<=k_end; k++) {
+ for (k = k_start; k <= k_end; k++) {
if (outer_node[k]) {
latency_factor = 1;
break;
@@ -238,10 +238,10 @@ static void k8acpi_write_HT(void) {
acpigen_write_name("HCLK");
acpigen_write_package(HC_POSSIBLE_NUM);
- for (i=0;i<sysconf.hc_possible_num;i++) {
+ for (i = 0; i < sysconf.hc_possible_num; i++) {
acpigen_write_dword(sysconf.pci1234[i]);
}
- for (i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8
+ for (i = sysconf.hc_possible_num; i < HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8
acpigen_write_dword(0x0);
}
@@ -250,10 +250,10 @@ static void k8acpi_write_HT(void) {
acpigen_write_name("HCDN");
acpigen_write_package(HC_POSSIBLE_NUM);
- for (i=0;i<sysconf.hc_possible_num;i++) {
+ for (i = 0; i < sysconf.hc_possible_num; i++) {
acpigen_write_dword(sysconf.hcdn[i]);
}
- for (i=sysconf.hc_possible_num; i<HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8
+ for (i = sysconf.hc_possible_num; i < HC_POSSIBLE_NUM; i++) { // in case we set array size to other than 8
acpigen_write_dword(0x20202020);
}
acpigen_pop_len();
@@ -268,7 +268,7 @@ static void k8acpi_write_pci_data(int dlen, const char *name, int offset) {
acpigen_write_name(name);
acpigen_write_package(dlen);
- for (i=0; i<dlen; i++) {
+ for (i = 0; i < dlen; i++) {
dword = pci_read_config32(dev, offset+i*4);
acpigen_write_dword(dword);
}
diff --git a/src/northbridge/amd/amdk8/coherent_ht.c b/src/northbridge/amd/amdk8/coherent_ht.c
index 7e33feb61a..887a23b1f0 100644
--- a/src/northbridge/amd/amdk8/coherent_ht.c
+++ b/src/northbridge/amd/amdk8/coherent_ht.c
@@ -139,7 +139,7 @@ static void disable_probes(void)
printk(BIOS_SPEW, "Disabling read/write/fill probes for UP... ");
- val=pci_read_config32(NODE_HT(0), HT_TRANSACTION_CONTROL);
+ val = pci_read_config32(NODE_HT(0), HT_TRANSACTION_CONTROL);
val |= HTTC_DIS_FILL_P | HTTC_DIS_RMT_MEM_C | HTTC_DIS_P_MEM_C |
HTTC_DIS_MTS | HTTC_DIS_WR_DW_P | HTTC_DIS_WR_B_P |
HTTC_DIS_RD_DW_P | HTTC_DIS_RD_B_P;
@@ -193,7 +193,7 @@ static void enable_routing(u8 node)
/* Enable routing table */
printk(BIOS_SPEW, "Enabling routing table for node %d", node);
- val=pci_read_config32(NODE_HT(node), 0x6c);
+ val = pci_read_config32(NODE_HT(node), 0x6c);
val &= ~((1<<1)|(1<<0));
pci_write_config32(NODE_HT(node), 0x6c, val);
@@ -241,7 +241,7 @@ static void rename_temp_node(u8 node)
printk(BIOS_SPEW, "Renaming current temporary node to %d", node);
- val=pci_read_config32(NODE_HT(7), 0x60);
+ val = pci_read_config32(NODE_HT(7), 0x60);
val &= (~7); /* clear low bits. */
val |= node; /* new node */
pci_write_config32(NODE_HT(7), 0x60, val);
@@ -401,7 +401,7 @@ static void setup_row_local(u8 source, u8 row) /* source will be 7 when it is fo
uint8_t linkn;
uint32_t val;
val = 1;
- for (linkn = 0; linkn<3; linkn++) {
+ for (linkn = 0; linkn < 3; linkn++) {
uint8_t regpos;
uint32_t reg;
regpos = 0x98 + 0x20 * linkn;
@@ -423,10 +423,10 @@ static void setup_row_direct_x(u8 temp, u8 source, u8 dest, u8 linkn)
if (((source &1)!=(dest &1))
#if CROSS_BAR_47_56
- && ( (source<4)||(source>5) ) //(6,7) (7,6) should still be here
+ && ((source < 4)||(source > 5)) //(6,7) (7,6) should still be here
//(6,5) (7,4) should be here
#endif
- ){
+ ) {
val |= (1<<16);
} else {
/*for CROSS_BAR_47_56 47, 56, should be here too
@@ -437,7 +437,7 @@ static void setup_row_direct_x(u8 temp, u8 source, u8 dest, u8 linkn)
val |= ((val_s>>16) - (1<<(linkn+1)))<<16;
}
- fill_row(temp,dest, val );
+ fill_row(temp,dest, val);
}
#if CROSS_BAR_47_56
@@ -453,7 +453,7 @@ static void opt_broadcast_rt_group(const u8 *conn, int num)
{
int i;
- for (i=0; i<num; i+=3) {
+ for (i = 0; i < num; i+=3) {
opt_broadcast_rt(conn[i], conn[i+1],conn[i+2]);
}
}
@@ -470,7 +470,7 @@ static void opt_broadcast_rt_plus_group(const u8 *conn, int num)
{
int i;
- for (i=0; i<num; i+=3) {
+ for (i = 0; i < num; i+=3) {
opt_broadcast_rt_plus(conn[i], conn[i+1],conn[i+2]);
}
}
@@ -535,7 +535,7 @@ static void setup_row_indirect_x(u8 temp, u8 source, u8 dest, u8 gateway, u8 dif
#if !CROSS_BAR_47_56
u8 gateway;
u8 diff;
- if (source<dest) {
+ if (source < dest) {
gateway = source + 2;
} else {
gateway = source - 2;
@@ -552,7 +552,7 @@ static void setup_row_indirect_x(u8 temp, u8 source, u8 dest, u8 gateway, u8 dif
diff = ((source&1)!=(dest &1));
#endif
- if (diff && (val_s!=(val&0xff)) ) { /* use another connect as response*/
+ if (diff && (val_s!=(val&0xff))) { /* use another connect as response*/
val_s -= val & 0xff;
#if (CONFIG_MAX_PHYSICAL_CPUS > 4) || CONFIG_MAX_PHYSICAL_CPUS_4_BUT_MORE_INSTALLED
uint8_t byte;
@@ -562,14 +562,14 @@ static void setup_row_indirect_x(u8 temp, u8 source, u8 dest, u8 gateway, u8 dif
byte = val_s;
byte = get_linkn_last_count(byte);
if ((byte>>2)>1) { /* make sure not the corner*/
- if (source<dest) {
+ if (source < dest) {
val_s-=link_connection(temp, source-2); /* -down*/
} else {
#if CROSS_BAR_47_56
#if 0
- if (source==7) {
+ if (source == 7) {
val_s-=link_connection(temp, 6); // for 7,2 via 5
- } else if (source==6){
+ } else if (source == 6) {
val_s-=link_connection(temp, 7); // for 6,3 via 4
} else
#endif
@@ -614,10 +614,10 @@ static void setup_row_indirect_group(const u8 *conn, int num)
int i;
#if !CROSS_BAR_47_56
- for (i=0; i<num; i+=2) {
+ for (i = 0; i < num; i+=2) {
setup_row_indirect(conn[i], conn[i+1]);
#else
- for (i=0; i<num; i+=4) {
+ for (i = 0; i < num; i+=4) {
setup_row_indirect(conn[i], conn[i+1],conn[i+2], conn[i+3]);
#endif
@@ -641,10 +641,10 @@ static void setup_remote_row_indirect_group(const u8 *conn, int num)
int i;
#if !CROSS_BAR_47_56
- for (i=0; i<num; i+=2) {
+ for (i = 0; i < num; i+=2) {
setup_remote_row_indirect(conn[i], conn[i+1]);
#else
- for (i=0; i<num; i+=4) {
+ for (i = 0; i < num; i+=4) {
setup_remote_row_indirect(conn[i], conn[i+1],conn[i+2], conn[i+3]);
#endif
}
@@ -658,7 +658,7 @@ static void setup_uniprocessor(void)
printk(BIOS_SPEW, "Enabling UP settings\n");
#if CONFIG_LOGICAL_CPUS
unsigned tmp = (pci_read_config32(NODE_MC(0), 0xe8) >> 12) & 3;
- if (tmp>0) return;
+ if (tmp > 0) return;
#endif
disable_probes();
}
@@ -668,10 +668,10 @@ static int optimize_connection_group(const u8 *opt_conn, int num)
{
int needs_reset = 0;
int i;
- for (i=0; i<num; i+=2) {
+ for (i = 0; i < num; i+=2) {
needs_reset = optimize_connection(
NODE_HT(opt_conn[i]), 0x80 + link_to_register(link_connection(opt_conn[i],opt_conn[i+1])),
- NODE_HT(opt_conn[i+1]), 0x80 + link_to_register(link_connection(opt_conn[i+1],opt_conn[i])) );
+ NODE_HT(opt_conn[i+1]), 0x80 + link_to_register(link_connection(opt_conn[i+1],opt_conn[i])));
}
return needs_reset;
}
@@ -689,7 +689,7 @@ static unsigned setup_smp2(void)
val = get_row(0,0);
byte = (val>>16) & 0xfe;
- if (byte<0x2) { /* no coherent connection so get out.*/
+ if (byte < 0x2) { /* no coherent connection so get out.*/
nodes = 1;
return nodes;
}
@@ -717,7 +717,7 @@ static unsigned setup_smp2(void)
val = get_row(7,1);
byte = (val>>16) & 0xfe;
byte = get_linkn_last_count(byte);
- if ((byte>>2)==3) { /* Oh! we need to treat it as node2. So use another link*/
+ if ((byte>>2) == 3) { /* Oh! we need to treat it as node2. So use another link*/
val = get_row(0,0);
byte = (val>>16) & 0xfe;
#if TRY_HIGH_FIRST == 1
@@ -760,14 +760,14 @@ static unsigned setup_smp4(void)
u8 byte;
uint32_t val;
- nodes=4;
+ nodes = 4;
/* Setup and check temporary connection from Node 0 to Node 2 */
val = get_row(0,0);
byte = ((val>>16) & 0xfe) - link_connection(0,1);
byte = get_linkn_last_count(byte);
- if ((byte>>2)==0) { /* We should have two coherent for 4p and above*/
+ if ((byte>>2) == 0) { /* We should have two coherent for 4p and above*/
nodes = 2;
return nodes;
}
@@ -841,7 +841,7 @@ static unsigned setup_smp4(void)
#if (CONFIG_MAX_PHYSICAL_CPUS > 4) || CONFIG_MAX_PHYSICAL_CPUS_4_BUT_MORE_INSTALLED
/* We need to find out which link is to node3 */
- if ((byte>>2)==2) { /* one to node3, one to node0, one to node4*/
+ if ((byte>>2) == 2) { /* one to node3, one to node0, one to node4*/
val = get_row(7,3);
if ((val>>16) == 1) { /* that link is to node4, because via node1 it has been set, recompute it*/
val = get_row(2,2);
@@ -865,7 +865,7 @@ static unsigned setup_smp4(void)
val = get_row(7,3);
byte = ((val>>16) & 0xfe) - link_connection(7,2) - link_connection(7,1);
byte = get_linkn_last_count(byte);
- if ((byte>>2)==1) { /* We should have three coherent links on node 3 for 6p and above*/
+ if ((byte>>2) == 1) { /* We should have three coherent links on node 3 for 6p and above*/
byte &= 3; /*bit [3,2] is count-2*/
print_linkn("(3,5) link=", byte);
setup_remote_row_direct(3, 5, byte);
@@ -875,7 +875,7 @@ static unsigned setup_smp4(void)
byte = ((val>>16) & 0xfe) - link_connection(2,3) - link_connection(2,0);
byte = get_linkn_last_count(byte);
- if ((byte>>2)==1) { /* We should have three coherent link on node 2 for 6p and above*/
+ if ((byte>>2) == 1) { /* We should have three coherent link on node 2 for 6p and above*/
byte &= 3; /* bit [3,2] is count-2*/
print_linkn("(2,4) link=", byte);
setup_row_direct(2, 4, byte);
@@ -931,14 +931,14 @@ static unsigned setup_smp6(void)
u8 byte;
uint32_t val;
- nodes=6;
+ nodes = 6;
/* Setup and check temporary connection from Node 0 to Node 4 through 2*/
val = get_row(2,2);
byte = ((val>>16) & 0xfe) - link_connection(2,3) - link_connection(2,0);
byte = get_linkn_last_count(byte);
- if ((byte>>2)==0) { /* We should have three coherent link on node 2 for 6p and above*/
+ if ((byte>>2) == 0) { /* We should have three coherent link on node 2 for 6p and above*/
nodes = 4;
return nodes;
}
@@ -948,7 +948,7 @@ static unsigned setup_smp6(void)
val = get_row(3,3);
byte = ((val>>16) & 0xfe) - link_connection(3,2) - link_connection(3,1);
byte = get_linkn_last_count(byte);
- if ((byte>>2)==0) { /* We should have three coherent links on node 3 for 6p and above*/
+ if ((byte>>2) == 0) { /* We should have three coherent links on node 3 for 6p and above*/
nodes = 4;
return nodes;
}
@@ -975,7 +975,7 @@ static unsigned setup_smp6(void)
setup_row_indirect_group(conn6_1, ARRAY_SIZE(conn6_1));
- for (byte=0; byte<4; byte+=2) {
+ for (byte = 0; byte < 4; byte+=2) {
setup_temp_row(byte,byte+2);
}
verify_connection(7);
@@ -1003,7 +1003,7 @@ static unsigned setup_smp6(void)
enable_routing(4);
setup_temp_row(0,1);
- for (byte=0; byte<4; byte+=2) {
+ for (byte = 0; byte < 4; byte+=2) {
setup_temp_row(byte+1,byte+3);
}
verify_connection(7);
@@ -1031,7 +1031,7 @@ static unsigned setup_smp6(void)
#if CONFIG_MAX_PHYSICAL_CPUS > 6
/* We need to find out which link is to node5 */
- if ((byte>>2)==2) { /* one to node5, one to node2, one to node6*/
+ if ((byte>>2) == 2) { /* one to node5, one to node2, one to node6*/
val = get_row(7,5);
if ((val>>16) == 1) { /* that link is to node6, because via node 3 node 5 has been set*/
val = get_row(4,4);
@@ -1054,7 +1054,7 @@ static unsigned setup_smp6(void)
val = get_row(7,5);
byte = ((val>>16) & 0xfe) - link_connection(7,4) - link_connection(7,3);
byte = get_linkn_last_count(byte);
- if ((byte>>2)==1) { /* We should have three coherent links on node 5 for 6p and above*/
+ if ((byte>>2) == 1) { /* We should have three coherent links on node 5 for 6p and above*/
byte &= 3; /*bit [3,2] is count-2*/
print_linkn("(5,7) link=", byte);
setup_remote_row_direct(5, 7, byte);
@@ -1065,7 +1065,7 @@ static unsigned setup_smp6(void)
byte = ((val>>16) & 0xfe) - link_connection(4,5) - link_connection(4,2);
byte = get_linkn_last_count(byte);
- if ((byte>>2)==1) { /* We should have three coherent link on node 4 for 6p and above*/
+ if ((byte>>2) == 1) { /* We should have three coherent link on node 4 for 6p and above*/
byte &= 3; /* bit [3,2] is count-2*/
print_linkn("(4,6) link=", byte);
setup_row_direct(4, 6, byte);
@@ -1115,7 +1115,7 @@ static unsigned setup_smp6(void)
/* We need to do sth about reverse about setup_temp_row (0,1), (2,4), (1, 3), (3,5)
* It will be done by clear_dead_links
*/
- for (byte=0; byte<4; byte++) {
+ for (byte = 0; byte < 4; byte++) {
clear_temp_row(byte);
}
#endif
@@ -1134,7 +1134,7 @@ static unsigned setup_smp8(void)
u8 byte;
uint32_t val;
- nodes=8;
+ nodes = 8;
/* Setup and check temporary connection from Node 0 to Node 6 via 2 and 4 to 7 */
val = get_row(4,4);
@@ -1143,7 +1143,7 @@ static unsigned setup_smp8(void)
#else
byte = ((val>>16) & 0xfe) - link_connection(4,5) - link_connection(4,2);
byte = get_linkn_last_count(byte); /* Max link to 6*/
- if ((byte>>2)==0) { /* We should have two or three coherent links on node 4 for 8p*/
+ if ((byte>>2) == 0) { /* We should have two or three coherent links on node 4 for 8p*/
nodes = 6;
return nodes;
}
@@ -1170,7 +1170,7 @@ static unsigned setup_smp8(void)
val = get_row(5,5);
byte = ((val>>16) & 0xfe) - link_connection(5,4) - link_connection(5,3);
byte = get_linkn_last_count(byte);
- if ((byte>>2)==0) { /* We should have three coherent links on node 5 for 6p and above*/
+ if ((byte>>2) == 0) { /* We should have three coherent links on node 5 for 6p and above*/
nodes = 6;
return nodes;
}
@@ -1203,7 +1203,7 @@ static unsigned setup_smp8(void)
setup_row_indirect_group(conn8_1,ARRAY_SIZE(conn8_1));
- for (byte=0; byte<6; byte+=2) {
+ for (byte = 0; byte < 6; byte+=2) {
setup_temp_row(byte,byte+2);
}
verify_connection(7);
@@ -1241,7 +1241,7 @@ static unsigned setup_smp8(void)
setup_row_direct(5, 6, byte);
setup_temp_row(0,1); /* temp. link between nodes 0 and 1 */
- for (byte=0; byte<4; byte+=2) {
+ for (byte = 0; byte < 4; byte+=2) {
setup_temp_row(byte+1,byte+3);
}
setup_temp_row(5,6);
@@ -1249,7 +1249,7 @@ static unsigned setup_smp8(void)
verify_connection(7);
val = get_row(7,6); // to chect it if it is node6 before renaming
- if ( (val>>16) == 1) { // it is real node 7 so swap it
+ if ((val>>16) == 1) { // it is real node 7 so swap it
/* We need to recompute link to 6 */
val = get_row(5,5);
byte = ((val>>16) & 0xfe) - link_connection(5,3);
@@ -1262,7 +1262,7 @@ static unsigned setup_smp8(void)
setup_row_direct(5, 6, byte);
#if 0
setup_temp_row(0,1); /* temp. link between nodes 0 and 1 */
- for (byte=0; byte<4; byte+=2) {
+ for (byte = 0; byte < 4; byte+=2) {
setup_temp_row(byte+1,byte+3);
}
#endif
@@ -1282,7 +1282,7 @@ static unsigned setup_smp8(void)
#if !CROSS_BAR_47_56
setup_temp_row(0,1);
- for (byte=0; byte<6; byte+=2) {
+ for (byte = 0; byte < 6; byte+=2) {
setup_temp_row(byte+1,byte+3);
}
@@ -1302,7 +1302,7 @@ static unsigned setup_smp8(void)
setup_row_direct(4, 7, byte);
/* Setup and check temporary connection from Node 0 to Node 7 through 2, and 4*/
- for (byte=0; byte<4; byte+=2) {
+ for (byte = 0; byte < 4; byte+=2) {
setup_temp_row(byte,byte+2);
}
@@ -1327,7 +1327,7 @@ static unsigned setup_smp8(void)
setup_row_direct(5, 7, byte);
setup_temp_row(0,1); /* temp. link between nodes 0 and 1 */
- for (byte=0; byte<4; byte+=2) {
+ for (byte = 0; byte < 4; byte+=2) {
setup_temp_row(byte+1,byte+3);
}
@@ -1511,7 +1511,7 @@ static unsigned verify_mp_capabilities(unsigned nodes)
mask = 0x06; /* BigMPCap */
- for (node=0; node<nodes; node++) {
+ for (node = 0; node < nodes; node++) {
mask &= pci_read_config32(NODE_MC(node), 0xe8);
}
@@ -1525,7 +1525,7 @@ static unsigned verify_mp_capabilities(unsigned nodes)
break;
#endif
case 0x00: /* Non SMP */
- if (nodes >1 ) {
+ if (nodes >1) {
printk(BIOS_ERR, "Going back to UP\n");
return 1;
}
@@ -1542,7 +1542,7 @@ static void clear_dead_routes(unsigned nodes)
int last_row;
int node, row;
#if CONFIG_MAX_PHYSICAL_CPUS == 8
- if (nodes==8) return;/* don't touch (7,7)*/
+ if (nodes == 8) return;/* don't touch (7,7)*/
#endif
last_row = nodes;
if (nodes == 1) {
@@ -1555,9 +1555,9 @@ static void clear_dead_routes(unsigned nodes)
}
/* Update the local row */
- for ( node=0; node<nodes; node++) {
+ for (node = 0; node < nodes; node++) {
uint32_t val = 0;
- for (row =0; row<nodes; row++) {
+ for (row =0; row < nodes; row++) {
val |= get_row(node, row);
}
fill_row(node, node, (((val & 0xff) | ((val >> 8) & 0xff)) << 16) | 0x0101);
@@ -1571,7 +1571,7 @@ static unsigned verify_dualcore(unsigned nodes)
unsigned node, totalcpus, tmp;
totalcpus = 0;
- for (node=0; node<nodes; node++) {
+ for (node = 0; node < nodes; node++) {
tmp = (pci_read_config32(NODE_MC(node), 0xe8) >> 12) & 3 ;
totalcpus += (tmp + 1);
}
@@ -1626,7 +1626,7 @@ static void coherent_ht_finalize(unsigned nodes)
/* Only respond to real CPU pci configuration cycles
* and optimize the HT settings
*/
- val=pci_read_config32(dev, HT_TRANSACTION_CONTROL);
+ val = pci_read_config32(dev, HT_TRANSACTION_CONTROL);
val &= ~((HTTC_BUF_REL_PRI_MASK << HTTC_BUF_REL_PRI_SHIFT) |
(HTTC_MED_PRI_BYP_CNT_MASK << HTTC_MED_PRI_BYP_CNT_SHIFT) |
(HTTC_HI_PRI_BYP_CNT_MASK << HTTC_HI_PRI_BYP_CNT_SHIFT));
@@ -1666,14 +1666,14 @@ static int apply_cpu_errata_fixes(unsigned nodes)
if ((cmd & (3 << 0)) != 2) {
cmd &= ~(3<<0);
cmd |= (2<<0);
- pci_write_config32(dev, 0x70, cmd );
+ pci_write_config32(dev, 0x70, cmd);
needs_reset = 1;
}
cmd = pci_read_config32(dev, 0x7c);
if ((cmd & (3 << 4)) != 0) {
cmd &= ~(3<<4);
cmd |= (0<<4);
- pci_write_config32(dev, 0x7c, cmd );
+ pci_write_config32(dev, 0x7c, cmd);
needs_reset = 1;
}
/* Clock Power/Timing Low */
@@ -1694,7 +1694,7 @@ static int apply_cpu_errata_fixes(unsigned nodes)
cmd_ref = 0x04e20707; /* Registered */
cmd = pci_read_config32(dev, 0xd4);
if (cmd != cmd_ref) {
- pci_write_config32(dev, 0xd4, cmd_ref );
+ pci_write_config32(dev, 0xd4, cmd_ref);
needs_reset = 1; /* Needed? */
}
}
@@ -1765,14 +1765,14 @@ static int optimize_link_coherent_ht(void)
nodes = get_nodes();
#if CONFIG_MAX_PHYSICAL_CPUS > 1
- if (nodes>1) {
+ if (nodes > 1) {
needs_reset |= optimize_connection(
NODE_HT(0), 0x80 + link_to_register(link_connection(0,1)),
- NODE_HT(1), 0x80 + link_to_register(link_connection(1,0)) );
+ NODE_HT(1), 0x80 + link_to_register(link_connection(1,0)));
}
#if CONFIG_MAX_PHYSICAL_CPUS > 2
- if (nodes>2) {
+ if (nodes > 2) {
/* optimize physical connections - by LYH */
static const u8 opt_conn4[] = {
0,2,
@@ -1784,7 +1784,7 @@ static int optimize_link_coherent_ht(void)
#endif
#if CONFIG_MAX_PHYSICAL_CPUS > 4
- if (nodes>4) {
+ if (nodes > 4) {
static const uint8_t opt_conn6[] ={
2, 4,
3, 5,
@@ -1797,7 +1797,7 @@ static int optimize_link_coherent_ht(void)
#endif
#if CONFIG_MAX_PHYSICAL_CPUS > 6
- if (nodes>6) {
+ if (nodes > 6) {
static const uint8_t opt_conn8[] ={
4, 6,
#if CROSS_BAR_47_56
diff --git a/src/northbridge/amd/amdk8/debug.c b/src/northbridge/amd/amdk8/debug.c
index 18fc85866d..a8431f4df2 100644
--- a/src/northbridge/amd/amdk8/debug.c
+++ b/src/northbridge/amd/amdk8/debug.c
@@ -71,7 +71,7 @@ static inline void dump_pci_device_index_wait(unsigned dev, uint32_t index_reg)
int j;
printk(BIOS_DEBUG, "\n%02x:",i);
val = pci_read_config32_index_wait(dev, index_reg, i);
- for (j=0;j<4;j++) {
+ for (j = 0; j < 4; j++) {
printk(BIOS_DEBUG, " %02x", val & 0xff);
val >>= 8;
}
@@ -185,7 +185,7 @@ static void dump_smbus_registers(void)
printk(BIOS_DEBUG, "\n");
for (device = 1; device < 0x80; device++) {
int j;
- if ( smbus_read_byte(device, 0) < 0 ) continue;
+ if (smbus_read_byte(device, 0) < 0) continue;
printk(BIOS_DEBUG, "smbus: %02x", device);
for (j = 0; j < 256; j++) {
int status;
@@ -211,7 +211,7 @@ static inline void dump_io_resources(unsigned port)
int i;
udelay(2000);
printk(BIOS_DEBUG, "%04x:\n", port);
- for (i=0;i<256;i++) {
+ for (i = 0; i < 256; i++) {
uint8_t val;
if ((i & 0x0f) == 0) {
printk(BIOS_DEBUG, "%02x:", i);
@@ -229,8 +229,8 @@ static inline void dump_mem(unsigned start, unsigned end)
{
unsigned i;
printk(BIOS_DEBUG, "dump_mem:");
- for (i=start;i<end;i++) {
- if ((i & 0xf)==0) {
+ for (i = start; i < end; i++) {
+ if ((i & 0xf) == 0) {
printk(BIOS_DEBUG, "\n%08x:", i);
}
printk(BIOS_DEBUG, " %02x", (unsigned char)*((unsigned char *)i));
diff --git a/src/northbridge/amd/amdk8/early_ht.c b/src/northbridge/amd/amdk8/early_ht.c
index d07da2aea1..638511ad52 100644
--- a/src/northbridge/amd/amdk8/early_ht.c
+++ b/src/northbridge/amd/amdk8/early_ht.c
@@ -64,7 +64,7 @@ static void enumerate_ht_chain(void)
pci_devfn_t devx;
#if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20
- if (next_unitid>=0x18) { // don't get mask out by k8, at this time BSP, RT is not enabled, it will response from 0x18,0--0x1f.
+ if (next_unitid >= 0x18) { // don't get mask out by k8, at this time BSP, RT is not enabled, it will response from 0x18,0--0x1f.
if (!end_used) {
next_unitid = CONFIG_HT_CHAIN_END_UNITID_BASE;
end_used = 1;
@@ -127,7 +127,7 @@ out:
;
#if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20
- if ((ht_dev_num>1) && (real_last_unitid != CONFIG_HT_CHAIN_END_UNITID_BASE) && !end_used) {
+ if ((ht_dev_num > 1) && (real_last_unitid != CONFIG_HT_CHAIN_END_UNITID_BASE) && !end_used) {
uint16_t flags;
dev = PCI_DEV(0,real_last_unitid, 0);
flags = pci_read_config16(dev, real_last_pos + PCI_CAP_FLAGS);
diff --git a/src/northbridge/amd/amdk8/exit_from_self.c b/src/northbridge/amd/amdk8/exit_from_self.c
index 858a0c0a3f..6da042f673 100644
--- a/src/northbridge/amd/amdk8/exit_from_self.c
+++ b/src/northbridge/amd/amdk8/exit_from_self.c
@@ -144,7 +144,7 @@ void exit_from_self(int controllers, const struct mem_controller *ctrl,
dcm =
pci_read_config32(ctrl[i].f2, DRAM_CTRL_MISC);
} while (((dcm & DCM_MemClrStatus) ==
- 0) /* || ((dcm & DCM_DramEnabled) == 0) */ );
+ 0) /* || ((dcm & DCM_DramEnabled) == 0) */);
if (loops >= TIMEOUT_LOOPS) {
printk(BIOS_DEBUG, "timeout with with cntrl[%d]\n", i);
diff --git a/src/northbridge/amd/amdk8/f.h b/src/northbridge/amd/amdk8/f.h
index ce039af4df..a67915071c 100644
--- a/src/northbridge/amd/amdk8/f.h
+++ b/src/northbridge/amd/amdk8/f.h
@@ -537,9 +537,9 @@ static inline void wait_all_core0_mem_trained(struct sys_info *sysinfo)
if (sysinfo->nodes == 1) return; // in case only one CPU installed
- for (i=1; i<sysinfo->nodes; i++) {
+ for (i = 1; i < sysinfo->nodes; i++) {
/* Skip everything if I don't have any memory on this controller */
- if (sysinfo->mem_trained[i]==0x00) continue;
+ if (sysinfo->mem_trained[i]== 0x00) continue;
mask |= (1<<i);
@@ -564,7 +564,7 @@ static inline void wait_all_core0_mem_trained(struct sys_info *sysinfo)
i%=sysinfo->nodes;
}
- for (i=0; i<sysinfo->nodes; i++) {
+ for (i = 0; i < sysinfo->nodes; i++) {
printk(BIOS_DEBUG, "mem_trained[%02x]=%02x\n", i, sysinfo->mem_trained[i]);
switch(sysinfo->mem_trained[i]) {
case 0: //don't need train
diff --git a/src/northbridge/amd/amdk8/get_sblk_pci1234.c b/src/northbridge/amd/amdk8/get_sblk_pci1234.c
index 9cf40830e4..764b6d19a5 100644
--- a/src/northbridge/amd/amdk8/get_sblk_pci1234.c
+++ b/src/northbridge/amd/amdk8/get_sblk_pci1234.c
@@ -211,7 +211,7 @@ void get_sblk_pci1234(void)
dev = dev_find_slot(0, PCI_DEVFN(0x18, 1));
- for (j=0;j<4;j++) {
+ for (j = 0; j < 4; j++) {
uint32_t dwordx;
dwordx = pci_read_config32(dev, 0xe0 + j*4);
dwordx &=0xffff0ff1; /* keep bus num, node_id, link_num, enable bits */
@@ -225,7 +225,7 @@ void get_sblk_pci1234(void)
/* We need to find out the number of HC
* for exact match
*/
- for (i=1;i<sysconf.hc_possible_num;i++) {
+ for (i = 1; i < sysconf.hc_possible_num; i++) {
if ((dwordx & 0xff0) == (sysconf.pci1234[i] & 0xff0)) {
sysconf.pci1234[i] = dwordx;
sysconf.hcdn[i] = sysconf.hcdn_reg[j];
@@ -234,7 +234,7 @@ void get_sblk_pci1234(void)
}
/* For 0xff0 match or same node */
- for (i=1;i<sysconf.hc_possible_num;i++) {
+ for (i = 1; i < sysconf.hc_possible_num; i++) {
if ((dwordx & 0xff0) == (dwordx & sysconf.pci1234[i] & 0xff0)) {
sysconf.pci1234[i] = dwordx;
sysconf.hcdn[i] = sysconf.hcdn_reg[j];
@@ -244,7 +244,7 @@ void get_sblk_pci1234(void)
}
}
- for (i=1;i<sysconf.hc_possible_num;i++) {
+ for (i = 1; i < sysconf.hc_possible_num; i++) {
if ((sysconf.pci1234[i] & 1) != 1) {
sysconf.pci1234[i] = 0;
sysconf.hcdn[i] = 0x20202020;
diff --git a/src/northbridge/amd/amdk8/incoherent_ht.c b/src/northbridge/amd/amdk8/incoherent_ht.c
index bc50b66a24..9fa0034848 100644
--- a/src/northbridge/amd/amdk8/incoherent_ht.c
+++ b/src/northbridge/amd/amdk8/incoherent_ht.c
@@ -374,7 +374,7 @@ static int ht_setup_chainx(pci_devfn_t udev, uint8_t upos, uint8_t bus,
#if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20
if (offset_unitid) {
- if (next_unitid>= (bus ? 0x20:0x18) ) {
+ if (next_unitid >= (bus ? 0x20:0x18)) {
if (!end_used) {
next_unitid = CONFIG_HT_CHAIN_END_UNITID_BASE;
end_used = 1;
@@ -428,9 +428,9 @@ static int ht_setup_chainx(pci_devfn_t udev, uint8_t upos, uint8_t bus,
/* Remeber the location of the last device */
udev = dev;
upos = pos;
- uoffs = ( offs != PCI_HT_SLAVE0_OFFS ) ? PCI_HT_SLAVE0_OFFS : PCI_HT_SLAVE1_OFFS;
+ uoffs = (offs != PCI_HT_SLAVE0_OFFS) ? PCI_HT_SLAVE0_OFFS : PCI_HT_SLAVE1_OFFS;
- } while (last_unitid != next_unitid );
+ } while (last_unitid != next_unitid);
#if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20
out:
@@ -438,7 +438,7 @@ out:
end_of_chain: ;
#if CONFIG_HT_CHAIN_END_UNITID_BASE != 0x20
- if (offset_unitid && (ht_dev_num>1) && (real_last_unitid != CONFIG_HT_CHAIN_END_UNITID_BASE) && !end_used ) {
+ if (offset_unitid && (ht_dev_num > 1) && (real_last_unitid != CONFIG_HT_CHAIN_END_UNITID_BASE) && !end_used) {
uint16_t flags;
flags = pci_read_config16(PCI_DEV(bus,real_last_unitid,0), real_last_pos + PCI_CAP_FLAGS);
flags &= ~0x1f;
@@ -448,7 +448,7 @@ end_of_chain: ;
#if CONFIG_RAMINIT_SYSINFO
// Here need to change the dev in the array
int i;
- for (i=0;i<sysinfo->link_pair_num;i++)
+ for (i = 0; i < sysinfo->link_pair_num; i++)
{
struct link_pair_st *link_pair = &sysinfo->link_pair[i];
if (link_pair->udev == PCI_DEV(bus, real_last_unitid, 0)) {
@@ -514,9 +514,9 @@ static int optimize_link_read_pointer(uint8_t node, uint8_t linkn, uint8_t linkt
link_type = dword & 0xff;
- if ( (link_type & 7) == linkt ) { /* Coherent Link only linkt = 3, ncoherent = 7*/
+ if ((link_type & 7) == linkt) { /* Coherent Link only linkt = 3, ncoherent = 7*/
dword_old = dword = pci_read_config32(PCI_DEV(0,0x18+node,3), 0xdc);
- dword &= ~( 0xff<<(linkn *8) );
+ dword &= ~(0xff<<(linkn *8));
dword |= val << (linkn *8);
if (dword != dword_old) {
@@ -551,10 +551,10 @@ static int optimize_link_read_pointers_chain(uint8_t ht_c_num)
devn = offset_unit_id(i == 0) ? CONFIG_HT_CHAIN_UNITID_BASE : 1;
- reg = pci_read_config32( PCI_DEV(busn, devn, 0), PCI_VENDOR_ID); // ? the chain dev maybe offseted
- if ( (reg & 0xffff) == PCI_VENDOR_ID_AMD) {
+ reg = pci_read_config32(PCI_DEV(busn, devn, 0), PCI_VENDOR_ID); // ? the chain dev maybe offseted
+ if ((reg & 0xffff) == PCI_VENDOR_ID_AMD) {
val = 0x25;
- } else if ( (reg & 0xffff) == PCI_VENDOR_ID_NVIDIA ) {
+ } else if ((reg & 0xffff) == PCI_VENDOR_ID_NVIDIA) {
val = 0x25;//???
} else {
continue;
@@ -581,9 +581,9 @@ static int set_ht_link_buffer_count(uint8_t node, uint8_t linkn, uint8_t linkt,
dword = pci_read_config32(dev, regpos);
link_type = dword & 0xff;
- if ( (link_type & 0x7) == linkt ) { /* Coherent Link only linkt = 3, ncoherent = 7*/
+ if ((link_type & 0x7) == linkt) { /* Coherent Link only linkt = 3, ncoherent = 7*/
regpos = 0x90 + (linkn * 0x20);
- dword = pci_read_config32(dev, regpos );
+ dword = pci_read_config32(dev, regpos);
if (dword != val) {
pci_write_config32(dev, regpos, val);
@@ -615,8 +615,8 @@ static int set_ht_link_buffer_counts_chain(uint8_t ht_c_num, unsigned vendorid,
busn = (reg & 0xff0000)>>16; //busn
for (devn = 0; devn < 0x20; devn++) {
- reg = pci_read_config32( PCI_DEV(busn, devn, 0), PCI_VENDOR_ID); //1?
- if ( (reg & 0xffff) == vendorid ) {
+ reg = pci_read_config32(PCI_DEV(busn, devn, 0), PCI_VENDOR_ID); //1?
+ if ((reg & 0xffff) == vendorid) {
reset_needed |= set_ht_link_buffer_count(nodeid, linkn, 0x07,val);
break;
}
@@ -659,14 +659,14 @@ static int ht_setup_chains(uint8_t ht_c_num)
reg = pci_read_config32(PCI_DEV(0,0x18,1), 0xe0 + i * 4);
//We need setup 0x94, 0xb4, and 0xd4 according to the reg
- devpos = ((reg & 0xf0)>>4)+0x18; // nodeid; it will decide 0x18 or 0x19
- regpos = ((reg & 0xf00)>>8) * 0x20 + 0x94; // link n; it will decide 0x94 or 0xb4, 0x0xd4;
+ devpos = ((reg & 0xf0)>>4)+0x18; // nodeid;it will decide 0x18 or 0x19
+ regpos = ((reg & 0xf00)>>8) * 0x20 + 0x94; // link n;it will decide 0x94 or 0xb4, 0x0xd4;
busn = (reg & 0xff0000)>>16;
- dword = pci_read_config32( PCI_DEV(0, devpos, 0), regpos) ;
+ dword = pci_read_config32(PCI_DEV(0, devpos, 0), regpos) ;
dword &= ~(0xffff<<8);
dword |= (reg & 0xffff0000)>>8;
- pci_write_config32( PCI_DEV(0, devpos,0), regpos , dword);
+ pci_write_config32(PCI_DEV(0, devpos,0), regpos , dword);
/* Make certain the HT bus is not enumerated */
ht_collapse_previous_enumeration(busn, offset_unit_id(i == 0));
@@ -712,29 +712,29 @@ static int ht_setup_chains_x(void)
/* read PCI_DEV(0,0x18,0) 0x64 bit [8:9] to find out SbLink m */
reg = pci_read_config32(PCI_DEV(0, 0x18, 0), 0x64);
- /* update PCI_DEV(0, 0x18, 1) 0xe0 to 0x05000m03, and next_busn=0x3f+1 */
- print_linkn_in("SBLink=", ((reg>>8) & 3) );
+ /* update PCI_DEV(0, 0x18, 1) 0xe0 to 0x05000m03, and next_busn = 0x3f+1 */
+ print_linkn_in("SBLink=", ((reg>>8) & 3));
#if CONFIG_RAMINIT_SYSINFO
sysinfo->sblk = (reg>>8) & 3;
sysinfo->sbbusn = 0;
sysinfo->nodes = nodes;
#endif
- tempreg = 3 | ( 0<<4) | (((reg>>8) & 3)<<8) | (0<<16)| (0x3f<<24);
+ tempreg = 3 | (0<<4) | (((reg>>8) & 3)<<8) | (0<<16)| (0x3f<<24);
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0, tempreg);
- next_busn=0x3f+1; /* 0 will be used ht chain with SB we need to keep SB in bus0 in auto stage*/
+ next_busn = 0x3f+1; /* 0 will be used ht chain with SB we need to keep SB in bus0 in auto stage*/
#if CONFIG_K8_ALLOCATE_IO_RANGE
/* io range allocation */
- tempreg = 0 | (((reg>>8) & 0x3) << 4 )| (0x3<<12); //limit
+ tempreg = 0 | (((reg>>8) & 0x3) << 4)| (0x3<<12); //limit
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC4, tempreg);
- tempreg = 3 | ( 3<<4) | (0<<12); //base
+ tempreg = 3 | (3<<4) | (0<<12); //base
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC0, tempreg);
next_io_base = 0x3+0x1;
#endif
/* clean others */
- for (ht_c_num=1;ht_c_num<4; ht_c_num++) {
+ for (ht_c_num = 1;ht_c_num < 4; ht_c_num++) {
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4, 0);
#if CONFIG_K8_ALLOCATE_IO_RANGE
@@ -744,11 +744,11 @@ static int ht_setup_chains_x(void)
#endif
}
- for (nodeid=0; nodeid<nodes; nodeid++) {
+ for (nodeid = 0; nodeid < nodes; nodeid++) {
pci_devfn_t dev;
uint8_t linkn;
dev = PCI_DEV(0, 0x18+nodeid,0);
- for (linkn = 0; linkn<3; linkn++) {
+ for (linkn = 0; linkn < 3; linkn++) {
unsigned regpos;
regpos = 0x98 + 0x20 * linkn;
reg = pci_read_config32(dev, regpos);
@@ -756,7 +756,7 @@ static int ht_setup_chains_x(void)
print_linkn_in("NC node|link=", ((nodeid & 0xf)<<4)|(linkn & 0xf));
tempreg = 3 | (nodeid <<4) | (linkn<<8);
/*compare (temp & 0xffff), with (PCI(0, 0x18, 1) 0xe0 to 0xec & 0xfffff) */
- for (ht_c_num=0;ht_c_num<4; ht_c_num++) {
+ for (ht_c_num = 0;ht_c_num < 4; ht_c_num++) {
reg = pci_read_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4);
if (((reg & 0xffff) == (tempreg & 0xffff)) || ((reg & 0xffff) == 0x0000)) { /*we got it*/
break;
@@ -774,7 +774,7 @@ static int ht_setup_chains_x(void)
/* io range allocation */
tempreg = nodeid | (linkn<<4) | ((next_io_base+0x3)<<12); //limit
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC4 + ht_c_num * 8, tempreg);
- tempreg = 3 /*| ( 3<<4)*/ | (next_io_base<<12); //base :ISA and VGA ?
+ tempreg = 3 /*| (3<<4)*/ | (next_io_base<<12); //base :ISA and VGA ?
pci_write_config32(PCI_DEV(0, 0x18, 1), 0xC0 + ht_c_num * 8, tempreg);
next_io_base += 0x3+0x1;
#endif
@@ -783,11 +783,11 @@ static int ht_setup_chains_x(void)
}
/*update 0xe0, 0xe4, 0xe8, 0xec from PCI_DEV(0, 0x18,1) to PCI_DEV(0, 0x19,1) to PCI_DEV(0, 0x1f,1);*/
- for (nodeid = 1; nodeid<nodes; nodeid++) {
+ for (nodeid = 1; nodeid < nodes; nodeid++) {
int i;
pci_devfn_t dev;
dev = PCI_DEV(0, 0x18+nodeid,1);
- for (i = 0; i< 4; i++) {
+ for (i = 0; i < 4; i++) {
unsigned regpos;
regpos = 0xe0 + i * 4;
reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos);
@@ -796,13 +796,13 @@ static int ht_setup_chains_x(void)
#if CONFIG_K8_ALLOCATE_IO_RANGE
/* io range allocation */
- for (i = 0; i< 4; i++) {
+ for (i = 0; i < 4; i++) {
unsigned regpos;
regpos = 0xc4 + i * 8;
reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos);
pci_write_config32(dev, regpos, reg);
}
- for (i = 0; i< 4; i++) {
+ for (i = 0; i < 4; i++) {
unsigned regpos;
regpos = 0xc0 + i * 8;
reg = pci_read_config32(PCI_DEV(0, 0x18, 1), regpos);
@@ -812,8 +812,8 @@ static int ht_setup_chains_x(void)
}
/* recount ht_c_num*/
- uint8_t i=0;
- for (ht_c_num=0;ht_c_num<4; ht_c_num++) {
+ uint8_t i = 0;
+ for (ht_c_num = 0;ht_c_num < 4; ht_c_num++) {
reg = pci_read_config32(PCI_DEV(0, 0x18, 1), 0xe0 + ht_c_num * 4);
if (((reg & 0xf) != 0x0)) {
i++;
@@ -841,7 +841,7 @@ static int optimize_link_incoherent_ht(struct sys_info *sysinfo)
printk(BIOS_SPEW, "entering optimize_link_incoherent_ht\n");
printk(BIOS_SPEW, "sysinfo->link_pair_num=0x%x\n", link_pair_num);
- for (i=0; i< link_pair_num; i++) {
+ for (i = 0; i < link_pair_num; i++) {
struct link_pair_st *link_pair= &sysinfo->link_pair[i];
reset_needed |= ht_optimize_link(link_pair->udev, link_pair->upos, link_pair->uoffs, link_pair->dev, link_pair->pos, link_pair->offs);
printk(BIOS_SPEW, "after ht_optimize_link for link pair %d, reset_needed=0x%x\n", i, reset_needed);
diff --git a/src/northbridge/amd/amdk8/misc_control.c b/src/northbridge/amd/amdk8/misc_control.c
index a2b4651a4a..3cbeb042dd 100644
--- a/src/northbridge/amd/amdk8/misc_control.c
+++ b/src/northbridge/amd/amdk8/misc_control.c
@@ -120,7 +120,7 @@ static void misc_control_init(struct device *dev)
*/
cmd = pci_read_config32(dev, 0x44);
cmd |= (1<<6) | (1<<25);
- pci_write_config32(dev, 0x44, cmd );
+ pci_write_config32(dev, 0x44, cmd);
#if !CONFIG_K8_REV_F_SUPPORT
if (is_cpu_pre_c0()) {
@@ -129,11 +129,11 @@ static void misc_control_init(struct device *dev)
*/
cmd = pci_read_config32(dev, 0x80);
cmd &= ~(1<<0);
- pci_write_config32(dev, 0x80, cmd );
+ pci_write_config32(dev, 0x80, cmd);
cmd = pci_read_config32(dev, 0x84);
cmd &= ~(1<<24);
cmd &= ~(1<<8);
- pci_write_config32(dev, 0x84, cmd );
+ pci_write_config32(dev, 0x84, cmd);
/* Errata 66
* Limit the number of downstream posted requests to 1
@@ -142,14 +142,14 @@ static void misc_control_init(struct device *dev)
if ((cmd & (3 << 0)) != 2) {
cmd &= ~(3<<0);
cmd |= (2<<0);
- pci_write_config32(dev, 0x70, cmd );
+ pci_write_config32(dev, 0x70, cmd);
needs_reset = 1;
}
cmd = pci_read_config32(dev, 0x7c);
if ((cmd & (3 << 4)) != 0) {
cmd &= ~(3<<4);
cmd |= (0<<4);
- pci_write_config32(dev, 0x7c, cmd );
+ pci_write_config32(dev, 0x7c, cmd);
needs_reset = 1;
}
/* Clock Power/Timing Low */
@@ -175,7 +175,7 @@ static void misc_control_init(struct device *dev)
}
cmd = pci_read_config32(dev, 0xd4);
if (cmd != cmd_ref) {
- pci_write_config32(dev, 0xd4, cmd_ref );
+ pci_write_config32(dev, 0xd4, cmd_ref);
needs_reset = 1; /* Needed? */
}
}
diff --git a/src/northbridge/amd/amdk8/northbridge.c b/src/northbridge/amd/amdk8/northbridge.c
index d80c565d78..fbd4da83b0 100644
--- a/src/northbridge/amd/amdk8/northbridge.c
+++ b/src/northbridge/amd/amdk8/northbridge.c
@@ -42,7 +42,7 @@ struct amdk8_sysconf_t sysconf;
#define MAX_FX_DEVS 8
static device_t __f0_dev[MAX_FX_DEVS];
static device_t __f1_dev[MAX_FX_DEVS];
-static unsigned fx_devs=0;
+static unsigned fx_devs = 0;
static void get_fx_devs(void)
{
@@ -178,7 +178,7 @@ static void amdk8_scan_chain(struct bus *link)
config_busses &= 0x000fc88;
config_busses |=
(3 << 0) | /* rw enable, no device compare */
- (( nodeid & 7) << 4) |
+ ((nodeid & 7) << 4) |
((link->link_num & 3) << 8) |
((link->secondary) << 16) |
(0xff << 24);
@@ -489,8 +489,8 @@ static void amdk8_create_vga_resource(device_t dev, unsigned nodeid)
printk(BIOS_DEBUG, "VGA: vga_pri bus num = %d link bus range [%d,%d]\n", vga_pri->bus->secondary,
link->secondary,link->subordinate);
/* We need to make sure the vga_pri is under the link */
- if ((vga_pri->bus->secondary >= link->secondary ) &&
- (vga_pri->bus->secondary <= link->subordinate )
+ if ((vga_pri->bus->secondary >= link->secondary) &&
+ (vga_pri->bus->secondary <= link->subordinate)
)
#endif
break;
@@ -505,7 +505,7 @@ static void amdk8_create_vga_resource(device_t dev, unsigned nodeid)
/* allocate a temp resource for the legacy VGA buffer */
resource = new_resource(dev, IOINDEX(4, link->link_num));
- if (!resource){
+ if (!resource) {
printk(BIOS_DEBUG, "VGA: %s out of resources.\n", dev_path(dev));
return;
}
@@ -697,7 +697,7 @@ static struct hw_mem_hole_info get_hw_mem_hole_info(void)
*/
if (mem_hole.node_id==-1) {
u32 limitk_pri = 0;
- for (i=0; i<8; i++) {
+ for (i = 0; i < 8; i++) {
u32 base, limit;
unsigned base_k, limit_k;
base = f1_read_config32(0x40 + (i << 3));
@@ -738,7 +738,7 @@ static void disable_hoist_memory(unsigned long hole_startk, int node_id)
hole_sizek = (4*1024*1024) - hole_startk;
- for (i=7;i>node_id;i--) {
+ for (i = 7; i > node_id; i--) {
base = f1_read_config32(0x40 + (i << 3));
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
@@ -775,7 +775,7 @@ static u32 hoist_memory(unsigned long hole_startk, int node_id)
carry_over = (4*1024*1024) - hole_startk;
- for (i=7;i>node_id;i--) {
+ for (i = 7; i > node_id; i--) {
base = f1_read_config32(0x40 + (i << 3));
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
@@ -970,11 +970,11 @@ static void amdk8_domain_set_resources(device_t dev)
#endif
/* See if I need to split the region to accommodate pci memory space */
- if ( (basek < 4*1024*1024 ) && (limitk > mmio_basek) ) {
+ if ((basek < 4*1024*1024) && (limitk > mmio_basek)) {
if (basek <= mmio_basek) {
unsigned pre_sizek;
pre_sizek = mmio_basek - basek;
- if (pre_sizek>0) {
+ if (pre_sizek > 0) {
ram_resource(dev, (idx | i), basek, pre_sizek);
idx += 0x10;
sizek -= pre_sizek;
@@ -984,7 +984,7 @@ static void amdk8_domain_set_resources(device_t dev)
#if CONFIG_HW_MEM_HOLE_SIZEK != 0
if (reset_memhole)
#if !CONFIG_K8_REV_F_SUPPORT
- if (!is_cpu_pre_e0() )
+ if (!is_cpu_pre_e0())
#endif
sizek += hoist_memory(mmio_basek,i);
#endif
@@ -1210,7 +1210,7 @@ static void cpu_bus_scan(device_t dev)
// ----> you can mixed single core e0 and dual core e0 at any sequence
// That is the typical case
- if (j == 0 ){
+ if (j == 0) {
#if !CONFIG_K8_REV_F_SUPPORT
e0_later_single_core = is_e0_later_in_bsp(i); // single core
#else
@@ -1222,10 +1222,10 @@ static void cpu_bus_scan(device_t dev)
if (e0_later_single_core) {
printk(BIOS_DEBUG, "\tFound Rev E or Rev F later single core\n");
- j=1;
+ j = 1;
}
- if (siblings > j ) {
+ if (siblings > j) {
}
else {
siblings = j;
@@ -1243,7 +1243,7 @@ static void cpu_bus_scan(device_t dev)
jj = siblings;
}
- for (j = 0; j <=jj; j++ ) {
+ for (j = 0; j <= jj; j++) {
u32 apic_id = i * (nb_cfg_54?(siblings+1):1) + j * (nb_cfg_54?1:8);
if (sysconf.enabled_apic_ext_id) {
if (apic_id != 0 || sysconf.lift_bsp_apicid) {
diff --git a/src/northbridge/amd/amdk8/raminit.c b/src/northbridge/amd/amdk8/raminit.c
index aab9fa7620..7fbe4ec20b 100644
--- a/src/northbridge/amd/amdk8/raminit.c
+++ b/src/northbridge/amd/amdk8/raminit.c
@@ -645,7 +645,7 @@ static struct dimm_size spd_get_dimm_size(unsigned device)
value = spd_read_byte(device, 5); /* number of physical banks */
if (value < 0) goto hw_err;
if (value == 1) goto out;
- if ((value != 2) && (value != 4 )) {
+ if ((value != 2) && (value != 4)) {
goto val_err;
}
#if CONFIG_QRANK_DIMM_SUPPORT
@@ -760,7 +760,7 @@ static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz,
map &= ~(0xf << (index * 4));
#if CONFIG_QRANK_DIMM_SUPPORT
if (sz.rank == 4) {
- map &= ~(0xf << ( (index + 2) * 4));
+ map &= ~(0xf << ((index + 2) * 4));
}
#endif
@@ -771,7 +771,7 @@ static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz,
map |= (sz.side1 - (25 + 3)) << (index *4);
#if CONFIG_QRANK_DIMM_SUPPORT
if (sz.rank == 4) {
- map |= (sz.side1 - (25 + 3)) << ( (index + 2) * 4);
+ map |= (sz.side1 - (25 + 3)) << ((index + 2) * 4);
}
#endif
}
@@ -779,7 +779,7 @@ static void set_dimm_map(const struct mem_controller *ctrl, struct dimm_size sz,
map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << (index*4);
#if CONFIG_QRANK_DIMM_SUPPORT
if (sz.rank == 4) {
- map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ( (index + 2) * 4);
+ map |= cs_map_aa[(sz.rows - 12) * 5 + (sz.col - 8) ] << ((index + 2) * 4);
}
#endif
}
@@ -824,7 +824,7 @@ static void route_dram_accesses(const struct mem_controller *ctrl,
limit = (limit_k << 2);
limit &= 0xffff0000;
limit -= 0x00010000;
- limit |= ( 0 << 8) | (node_id << 0);
+ limit |= (0 << 8) | (node_id << 0);
base = (base_k << 2);
base &= 0xffff0000;
base |= (0 << 8) | (1<<1) | (1<<0);
@@ -940,8 +940,8 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
}
value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
- cs_mode =( value >> ((index>>1)*4)) & 0xf;
- if (cs_mode == 0 ) continue;
+ cs_mode =(value >> ((index>>1)*4)) & 0xf;
+ if (cs_mode == 0) continue;
if (common_cs_mode == 0) {
common_cs_mode = cs_mode;
}
@@ -960,7 +960,7 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
}
/* Find the bits of csbase that we need to interleave on */
- if (is_cpu_pre_d0()){
+ if (is_cpu_pre_d0()) {
csbase_inc = 1 << csbase_low_shift[common_cs_mode];
if (is_dual_channel(ctrl)) {
/* Also we run out of address mask bits if we try and interleave 8 4GB dimms */
@@ -974,8 +974,8 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl)
else {
csbase_inc = 1 << csbase_low_d0_shift[common_cs_mode];
if (is_dual_channel(ctrl)) {
- if ( (bits==3) && (common_cs_mode > 8)) {
-// printk(BIOS_DEBUG, "8 cs_mode>8 chip selects cannot be interleaved\n");
+ if ((bits == 3) && (common_cs_mode > 8)) {
+// printk(BIOS_DEBUG, "8 cs_mode > 8 chip selects cannot be interleaved\n");
return 0;
}
csbase_inc <<=1;
@@ -1223,7 +1223,7 @@ static long spd_enable_2channels(const struct mem_controller *ctrl, long dimm_ma
5, /* *Physical Banks */
6, /* *Module Data Width low */
7, /* *Module Data Width high */
- 9, /* *Cycle time at highest CAS Latency CL=X */
+ 9, /* *Cycle time at highest CAS Latency CL = X */
11, /* *SDRAM Type */
13, /* *SDRAM Width */
17, /* *Logical Banks */
@@ -1390,7 +1390,7 @@ static int spd_dimm_loading_socket(const struct mem_controller *ctrl, long dimm_
/*
Following table comes directly from BKDG (unbuffered DIMM support)
- [Y][X] Y = ch0_0, ch1_0, ch0_1, ch1_1 1=present 0=empty
+ [Y][X] Y = ch0_0, ch1_0, ch0_1, ch1_1 1 = present 0 = empty
X uses same layout but 1 means double rank 0 is single rank/empty
Following tables come from BKDG the ch{0_0,1_0,0_1,1_1} maps to
@@ -1674,7 +1674,7 @@ static struct spd_set_memclk_result spd_set_memclk(const struct mem_controller *
#if 0
/* Improves DQS centering by correcting for case when core speed multiplier and MEMCLK speed result in odd clock divisor, by selecting the next lowest memory speed, required only at DDR400 and higher speeds with certain DIMM loadings ---- cheating???*/
if (!is_cpu_pre_e0()) {
- if (min_cycle_time==0x50) {
+ if (min_cycle_time == 0x50) {
value |= 1<<31;
}
}
@@ -1927,7 +1927,7 @@ static int update_dimm_x4(const struct mem_controller *ctrl, const struct mem_pa
dimm = 1<<(DCL_x4DIMM_SHIFT+i);
#if CONFIG_QRANK_DIMM_SUPPORT
- if (rank==4) {
+ if (rank == 4) {
dimm |= 1<<(DCL_x4DIMM_SHIFT+i+2);
}
#endif
@@ -2239,7 +2239,7 @@ static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,
carry_over = (4*1024*1024) - hole_startk;
- for (ii=controllers - 1;ii>i;ii--) {
+ for (ii = controllers - 1; ii > i; ii--) {
base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
continue;
@@ -2294,7 +2294,7 @@ static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
* we need to decrease it.
*/
uint32_t basek_pri;
- for (i=0; i<controllers; i++) {
+ for (i = 0; i < controllers; i++) {
uint32_t base;
unsigned base_k;
base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
@@ -2315,7 +2315,7 @@ static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
printk(BIOS_SPEW, "Handling memory hole at 0x%08x (adjusted)\n", hole_startk);
#endif
/* Find node number that needs the memory hole configured */
- for (i=0; i<controllers; i++) {
+ for (i = 0; i < controllers; i++) {
uint32_t base, limit;
unsigned base_k, limit_k;
base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
@@ -2480,7 +2480,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl)
2. check cs_base lo is 0, node 0 f2 0x40,,,,, if any one is not using lo is CS_BASE, get out
3. check if other node is the same as node 0 about f2 0x40,,,,, otherwise get out
4. if all ready enable node_interleaving in f1 0x40..... of every node
- 5. for node interleaving we need to set mem hole to every node ( need recalcute hole offset in f0 for every node)
+ 5. for node interleaving we need to set mem hole to every node (need recalcute hole offset in f0 for every node)
*/
}
@@ -2495,7 +2495,7 @@ void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
int i;
int j;
struct mem_controller *ctrl;
- for (i=0;i<controllers; i++) {
+ for (i = 0; i < controllers; i++) {
ctrl = &ctrl_a[i];
ctrl->node_id = i;
ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
@@ -2505,7 +2505,7 @@ void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
if (spd_addr == (void *)0) continue;
- for (j=0;j<DIMM_SOCKETS;j++) {
+ for (j = 0; j < DIMM_SOCKETS; j++) {
ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];
}
diff --git a/src/northbridge/amd/amdk8/raminit_f.c b/src/northbridge/amd/amdk8/raminit_f.c
index 67f3433ae8..4ef621c96d 100644
--- a/src/northbridge/amd/amdk8/raminit_f.c
+++ b/src/northbridge/amd/amdk8/raminit_f.c
@@ -222,7 +222,7 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
/* DRAM Control Register
* F2:0x78
- * [ 3: 0] RdPtrInit ( Read Pointer Initial Value)
+ * [ 3: 0] RdPtrInit (Read Pointer Initial Value)
* 0x03-0x00: reserved
* [ 6: 4] RdPadRcvFifoDly (Read Delay from Pad Receive FIFO)
* 000 = reserved
@@ -361,7 +361,7 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* 01 = 3 bus clocks
* 10 = 4 bus clocks
* 11 = 5 bus clocks
- * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel A,
+ * [31:24] MemClkDis (Disable the MEMCLK outputs for DRAM channel A,
* BIOS should set it to reduce the power consumption)
* Bit F(1207) M2 Package S1g1 Package
* 0 N/A MA1_CLK1 N/A
@@ -400,22 +400,22 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* 10 = 2 bus clocks
* 11 = 3 bus clocks
* [13:12] Twrwr (Write to Write Timing)
- * 00 = 1 bus clocks ( 0 idle cycle on the bus)
- * 01 = 2 bus clocks ( 1 idle cycle on the bus)
- * 10 = 3 bus clocks ( 2 idle cycles on the bus)
+ * 00 = 1 bus clocks (0 idle cycle on the bus)
+ * 01 = 2 bus clocks (1 idle cycle on the bus)
+ * 10 = 3 bus clocks (2 idle cycles on the bus)
* 11 = Reserved
- * [15:14] Trdrd ( Read to Read Timing)
- * 00 = 2 bus clocks ( 1 idle cycle on the bus)
- * 01 = 3 bus clocks ( 2 idle cycles on the bus)
- * 10 = 4 bus clocks ( 3 idle cycles on the bus)
- * 11 = 5 bus clocks ( 4 idle cycles on the bus)
+ * [15:14] Trdrd (Read to Read Timing)
+ * 00 = 2 bus clocks (1 idle cycle on the bus)
+ * 01 = 3 bus clocks (2 idle cycles on the bus)
+ * 10 = 4 bus clocks (3 idle cycles on the bus)
+ * 11 = 5 bus clocks (4 idle cycles on the bus)
* [17:16] Tref (Refresh Rate)
* 00 = Undefined behavior
* 01 = Reserved
* 10 = Refresh interval of 7.8 microseconds
* 11 = Refresh interval of 3.9 microseconds
* [19:18] Reserved
- * [22:20] Trfc0 ( Auto-Refresh Row Cycle Time for the Logical DIMM0,
+ * [22:20] Trfc0 (Auto-Refresh Row Cycle Time for the Logical DIMM0,
* based on DRAM density and speed)
* 000 = 75 ns (all speeds, 256Mbit)
* 001 = 105 ns (all speeds, 512Mbit)
@@ -425,11 +425,11 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* 101 = reserved
* 110 = reserved
* 111 = reserved
- * [25:23] Trfc1 ( Auto-Refresh Row Cycle Time for the Logical DIMM1,
+ * [25:23] Trfc1 (Auto-Refresh Row Cycle Time for the Logical DIMM1,
* based on DRAM density and speed)
- * [28:26] Trfc2 ( Auto-Refresh Row Cycle Time for the Logical DIMM2,
+ * [28:26] Trfc2 (Auto-Refresh Row Cycle Time for the Logical DIMM2,
* based on DRAM density and speed)
- * [31:29] Trfc3 ( Auto-Refresh Row Cycle Time for the Logical DIMM3,
+ * [31:29] Trfc3 (Auto-Refresh Row Cycle Time for the Logical DIMM3,
* based on DRAM density and speed)
*/
PCI_ADDR(0, 0x18, 2, 0x8c), 0x000c008f, (2 << 16)|(1 << 8),
@@ -438,7 +438,7 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* [ 0: 0] InitDram (Initialize DRAM)
* 1 = write 1 cause DRAM controller to execute the DRAM
* initialization, when done it read to 0
- * [ 1: 1] ExitSelfRef ( Exit Self Refresh Command )
+ * [ 1: 1] ExitSelfRef (Exit Self Refresh Command)
* 1 = write 1 causes the DRAM controller to bring the DRAMs out
* for self refresh mode
* [ 3: 2] Reserved
@@ -448,19 +448,19 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* 10 = 150 ohms
* 11 = 50 ohms
* [ 6: 6] Reserved
- * [ 7: 7] DramDrvWeak ( DRAM Drivers Weak Mode)
+ * [ 7: 7] DramDrvWeak (DRAM Drivers Weak Mode)
* 0 = Normal drive strength mode.
* 1 = Weak drive strength mode
* [ 8: 8] ParEn (Parity Enable)
* 1 = Enable address parity computation output, PAR,
* and enables the parity error input, ERR
* [ 9: 9] SelfRefRateEn (Faster Self Refresh Rate Enable)
- * 1 = Enable high temperature ( two times normal )
+ * 1 = Enable high temperature (two times normal)
* self refresh rate
- * [10:10] BurstLength32 ( DRAM Burst Length Set for 32 Bytes)
+ * [10:10] BurstLength32 (DRAM Burst Length Set for 32 Bytes)
* 0 = 64-byte mode
* 1 = 32-byte mode
- * [11:11] Width128 ( Width of DRAM interface)
+ * [11:11] Width128 (Width of DRAM interface)
* 0 = the controller DRAM interface is 64-bits wide
* 1 = the controller DRAM interface is 128-bits wide
* [12:12] X4Dimm (DIMM 0 is x4)
@@ -469,19 +469,19 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* [15:15] X4Dimm (DIMM 3 is x4)
* 0 = DIMM is not x4
* 1 = x4 DIMM present
- * [16:16] UnBuffDimm ( Unbuffered DIMMs)
+ * [16:16] UnBuffDimm (Unbuffered DIMMs)
* 0 = Buffered DIMMs
* 1 = Unbuffered DIMMs
* [18:17] Reserved
- * [19:19] DimmEccEn ( DIMM ECC Enable )
+ * [19:19] DimmEccEn (DIMM ECC Enable)
* 1 = ECC checking is being enabled for all DIMMs on the DRAM
- * controller ( Through F3 0x44[EccEn])
+ * controller (Through F3 0x44[EccEn])
* [31:20] Reserved
*/
PCI_ADDR(0, 0x18, 2, 0x90), 0xfff6004c, 0x00000010,
/* DRAM Config High Register
* F2:0x94
- * [ 0: 2] MemClkFreq ( Memory Clock Frequency)
+ * [ 0: 2] MemClkFreq (Memory Clock Frequency)
* 000 = 200MHz
* 001 = 266MHz
* 010 = 333MHz
@@ -490,25 +490,25 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* [ 3: 3] MemClkFreqVal (Memory Clock Frequency Valid)
* 1 = BIOS need to set the bit when setting up MemClkFreq to
* the proper value
- * [ 7: 4] MaxAsyncLat ( Maximum Asynchronous Latency)
+ * [ 7: 4] MaxAsyncLat (Maximum Asynchronous Latency)
* 0000 = 0 ns
* ...
* 1111 = 15 ns
* [11: 8] Reserved
- * [12:12] RDqsEn ( Read DQS Enable) This bit is only be set if x8
+ * [12:12] RDqsEn (Read DQS Enable) This bit is only be set if x8
* registered DIMMs are present in the system
* 0 = DM pins function as data mask pins
* 1 = DM pins function as read DQS pins
* [13:13] Reserved
- * [14:14] DisDramInterface ( Disable the DRAM interface ) When this bit
+ * [14:14] DisDramInterface (Disable the DRAM interface) When this bit
* is set, the DRAM controller is disabled, and interface in low power
* state
* 0 = Enabled (default)
* 1 = Disabled
- * [15:15] PowerDownEn ( Power Down Mode Enable )
+ * [15:15] PowerDownEn (Power Down Mode Enable)
* 0 = Disabled (default)
* 1 = Enabled
- * [16:16] PowerDown ( Power Down Mode )
+ * [16:16] PowerDown (Power Down Mode)
* 0 = Channel CKE Control
* 1 = Chip Select CKE Control
* [17:17] FourRankSODimm (Four Rank SO-DIMM)
@@ -526,17 +526,17 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* i.e., these signals are driven for two MEMCLK cycles
* rather than one
* [21:21] Reserved
- * [22:22] BankSwizzleMode ( Bank Swizzle Mode),
+ * [22:22] BankSwizzleMode (Bank Swizzle Mode),
* 0 = Disabled (default)
* 1 = Enabled
* [23:23] Reserved
- * [27:24] DcqBypassMax ( DRAM Controller Queue Bypass Maximum)
+ * [27:24] DcqBypassMax (DRAM Controller Queue Bypass Maximum)
* 0000 = No bypass; the oldest request is never bypassed
* 0001 = The oldest request may be bypassed no more than 1 time
* ...
* 1111 = The oldest request may be bypassed no more than 15\
* times
- * [31:28] FourActWindow ( Four Bank Activate Window) , not more than
+ * [31:28] FourActWindow (Four Bank Activate Window) , not more than
* 4 banks in a 8 bank device are activated
* 0000 = No tFAW window restriction
* 0001 = 8 MEMCLK cycles
@@ -552,11 +552,11 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* when set, this bit indicates that the memory clear function
* is complete. Only clear by reset. BIOS should not write or
* read the DRAM until this bit is set by hardware
- * [ 1: 1] DisableJitter ( Disable Jitter)
+ * [ 1: 1] DisableJitter (Disable Jitter)
* When set the DDR compensation circuit will not change the
* values unless the change is more than one step from the
* current value
- * [ 3: 2] RdWrQByp ( Read/Write Queue Bypass Count)
+ * [ 3: 2] RdWrQByp (Read/Write Queue Bypass Count)
* 00 = 2
* 01 = 4
* 10 = 8
@@ -565,11 +565,11 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* 1 When bit enables support for mismatched DIMMs when using
* 128-bit DRAM interface, the Width128 no effect, only for
* AM2 and s1g1
- * [ 5: 5] DCC_EN ( Dynamic Idle Cycle Counter Enable)
+ * [ 5: 5] DCC_EN (Dynamic Idle Cycle Counter Enable)
* When set to 1, indicates that each entry in the page tables
* dynamically adjusts the idle cycle limit based on page
* Conflict/Page Miss (PC/PM) traffic
- * [ 8: 6] ILD_lmt ( Idle Cycle Limit)
+ * [ 8: 6] ILD_lmt (Idle Cycle Limit)
* 000 = 0 cycles
* 001 = 4 cycles
* 010 = 8 cycles
@@ -578,16 +578,16 @@ static void sdram_set_registers(const struct mem_controller *ctrl, struct sys_in
* 101 = 64 cycles
* 110 = 128 cycles
* 111 = 256 cycles
- * [ 9: 9] DramEnabled ( DRAM Enabled)
+ * [ 9: 9] DramEnabled (DRAM Enabled)
* When Set, this bit indicates that the DRAM is enabled, this
* bit is set by hardware after DRAM initialization or on an exit
* from self refresh. The DRAM controller is initialized after the
- * hardware-controlled initialization process ( initiated by the
+ * hardware-controlled initialization process (initiated by the
* F2 0x90[DramInit]) completes or when the BIOS-controlled
* initialization process completes (F2 0x7c(EnDramInit] is
* written from 1 to 0)
* [23:10] Reserved
- * [31:24] MemClkDis ( Disable the MEMCLK outputs for DRAM channel B,
+ * [31:24] MemClkDis (Disable the MEMCLK outputs for DRAM channel B,
* BIOS should set it to reduce the power consumption)
* Bit F(1207) M2 Package S1g1 Package
* 0 N/A MA1_CLK1 N/A
@@ -785,7 +785,7 @@ static void spd_get_dimm_size(unsigned device, struct dimm_size *sz)
rank == 4 two side , and four ranks total
Some one side two ranks, because of stacked
*/
- if ((value != 1) && (value != 2) && (value != 4 )) {
+ if ((value != 1) && (value != 2) && (value != 4)) {
goto val_err;
}
sz->rank = value;
@@ -797,9 +797,9 @@ static void spd_get_dimm_size(unsigned device, struct dimm_size *sz)
if (value < 0) goto hw_err;
value &= 0xff;
value = log2(value);
- if (value <=4 ) value += 8; // add back to 1G to high
+ if (value <= 4) value += 8; // add back to 1G to high
value += (27-5); // make 128MB to the real lines
- if ( value != (sz->per_rank)) {
+ if (value != (sz->per_rank)) {
printk(BIOS_ERR, "Bad RANK Size --\n");
goto val_err;
}
@@ -836,12 +836,12 @@ static void set_dimm_size(const struct mem_controller *ctrl,
/* Make certain side1 of the dimm is at least 128MB */
if (sz->per_rank >= 27) {
- base0 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
+ base0 = (1 << ((sz->per_rank - 27) + 19)) | 1;
}
/* Make certain side2 of the dimm is at least 128MB */
if (sz->rank > 1) { // 2 ranks or 4 ranks
- base1 = (1 << ((sz->per_rank - 27 ) + 19)) | 1;
+ base1 = (1 << ((sz->per_rank - 27) + 19)) | 1;
}
/* Double the size if we are using dual channel memory */
@@ -954,7 +954,7 @@ static void set_dimm_cs_map(const struct mem_controller *ctrl,
map &= ~(0xf << (index * 4));
#if CONFIG_QRANK_DIMM_SUPPORT
if (sz->rank == 4) {
- map &= ~(0xf << ( (index + 2) * 4));
+ map &= ~(0xf << ((index + 2) * 4));
}
#endif
@@ -965,7 +965,7 @@ static void set_dimm_cs_map(const struct mem_controller *ctrl,
map |= temp_map << (index*4);
#if CONFIG_QRANK_DIMM_SUPPORT
if (sz->rank == 4) {
- map |= temp_map << ( (index + 2) * 4);
+ map |= temp_map << ((index + 2) * 4);
}
#endif
}
@@ -1018,7 +1018,7 @@ static void route_dram_accesses(const struct mem_controller *ctrl,
limit = (limit_k << 2);
limit &= 0xffff0000;
limit -= 0x00010000;
- limit |= ( 0 << 8) | (node_id << 0);
+ limit |= (0 << 8) | (node_id << 0);
base = (base_k << 2);
base &= 0xffff0000;
base |= (0 << 8) | (1<<1) | (1<<0);
@@ -1136,7 +1136,7 @@ static unsigned long interleave_chip_selects(const struct mem_controller *ctrl,
}
value = pci_read_config32(ctrl->f2, DRAM_BANK_ADDR_MAP);
- cs_mode =( value >> ((index>>1)*4)) & 0xf;
+ cs_mode =(value >> ((index>>1)*4)) & 0xf;
if (common_cs_mode == 0xff) {
common_cs_mode = cs_mode;
}
@@ -1351,8 +1351,8 @@ static long spd_handle_unbuffered_dimms(const struct mem_controller *ctrl,
if (is_opteron(ctrl)) {
#if 0
- if ( registered != (meminfo->dimm_mask & ((1<<DIMM_SOCKETS)-1)) ) {
- meminfo->dimm_mask &= (registered | (registered << DIMM_SOCKETS) ); //disable unbuffered dimm
+ if (registered != (meminfo->dimm_mask & ((1 << DIMM_SOCKETS)-1))) {
+ meminfo->dimm_mask &= (registered | (registered << DIMM_SOCKETS)); //disable unbuffered dimm
// die("Mixed buffered and registered dimms not supported");
}
//By yhlu for debug M2, s1g1 can do dual channel, but it use unbuffer DIMM
@@ -1647,7 +1647,7 @@ static uint8_t get_exact_divisor(int i, uint8_t divisor)
/* Check for FID control support */
struct cpuid_result cpuid1;
cpuid1 = cpuid(0x80000007);
- if ( cpuid1.edx & 0x02 ) {
+ if (cpuid1.edx & 0x02) {
/* Use current FID */
unsigned fid_cur;
msr = rdmsr(0xc0010042);
@@ -1663,9 +1663,9 @@ static uint8_t get_exact_divisor(int i, uint8_t divisor)
index = fid_start>>25;
}
- if (index>12) return divisor;
+ if (index > 12) return divisor;
- if (i>3) return divisor;
+ if (i > 3) return divisor;
return dv_a[index * 4+i];
@@ -1748,7 +1748,7 @@ static int find_optimum_spd_latency(u32 spd_device, unsigned *min_latency, unsig
printk_raminit("\t\tvalue2: %08x\n", value);
/* Only increase the latency if we decrease the clock */
- if (value >= *min_cycle_time ) {
+ if (value >= *min_cycle_time) {
if (value < new_cycle_time) {
new_cycle_time = value;
new_latency = latency;
@@ -1763,7 +1763,7 @@ static int find_optimum_spd_latency(u32 spd_device, unsigned *min_latency, unsig
}
- if (new_latency > 6){
+ if (new_latency > 6) {
return 1;
}
@@ -2052,7 +2052,7 @@ static int update_dimm_Trfc(const struct mem_controller *ctrl, const struct mem_
static int update_dimm_TT_1_4(const struct mem_controller *ctrl, const struct mem_param *param, int i, long dimm_mask,
unsigned TT_REG,
- unsigned SPD_TT, unsigned TT_SHIFT, unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX )
+ unsigned SPD_TT, unsigned TT_SHIFT, unsigned TT_MASK, unsigned TT_BASE, unsigned TT_MIN, unsigned TT_MAX)
{
unsigned clocks, old_clocks;
uint32_t dtl;
@@ -2181,7 +2181,7 @@ static int update_dimm_Tref(const struct mem_controller *ctrl,
value = spd_read_byte(spd_device, SPD_TREF); // 0: 15.625us, 1: 3.9us 2: 7.8 us....
if (value < 0) return -1;
- if (value == 1 ) {
+ if (value == 1) {
value = 3;
} else {
value = 2;
@@ -2267,7 +2267,7 @@ static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl,
mask_single_rank |= 1<<i;
}
- if (meminfo->sz[i].col==10) {
+ if (meminfo->sz[i].col == 10) {
mask_page_1k |= 1<<i;
}
@@ -2278,17 +2278,17 @@ static uint32_t get_extra_dimm_mask(const struct mem_controller *ctrl,
rank = meminfo->sz[i].rank;
#endif
- if (value==4) {
+ if (value == 4) {
mask_x4 |= (1<<i);
#if CONFIG_QRANK_DIMM_SUPPORT
- if (rank==4) {
+ if (rank == 4) {
mask_x4 |= 1<<(i+2);
}
#endif
- } else if (value==16) {
+ } else if (value == 16) {
mask_x16 |= (1<<i);
#if CONFIG_QRANK_DIMM_SUPPORT
- if (rank==4) {
+ if (rank == 4) {
mask_x16 |= 1<<(i+2);
}
#endif
@@ -2340,7 +2340,7 @@ static void set_DramTerm(const struct mem_controller *ctrl,
if (param->divisor == 100) { //DDR2 800
if (meminfo->is_Width128) {
- if (count_ones(meminfo->dimm_mask & 0x0f)==2) {
+ if (count_ones(meminfo->dimm_mask & 0x0f) == 2) {
odt = 3; //50 ohms
}
}
@@ -2503,7 +2503,7 @@ static void set_max_async_latency(const struct mem_controller *ctrl, const struc
pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, dch);
}
-#if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
+#if (CONFIG_DIMM_SUPPORT & 0x0100) == 0x0000 /* 2T mode only used for unbuffered DIMM */
static void set_SlowAccessMode(const struct mem_controller *ctrl)
{
uint32_t dch;
@@ -2524,23 +2524,23 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *
{
uint32_t dword;
uint32_t dwordx;
-#if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
+#if (CONFIG_DIMM_SUPPORT & 0x0100) == 0x0000 /* 2T mode only used for unbuffered DIMM */
unsigned SlowAccessMode = 0;
#endif
-#if CONFIG_DIMM_SUPPORT==0x0104 /* DDR2 and REG */
+#if CONFIG_DIMM_SUPPORT == 0x0104 /* DDR2 and REG */
long dimm_mask = meminfo->dimm_mask & 0x0f;
/* for REG DIMM */
dword = 0x00111222;
dwordx = 0x002f0000;
switch (meminfo->memclk_set) {
case DCH_MemClkFreq_266MHz:
- if ( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
+ if ((dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
dwordx = 0x002f2700;
}
break;
case DCH_MemClkFreq_333MHz:
- if ( (dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
+ if ((dimm_mask == 0x03) || (dimm_mask == 0x02) || (dimm_mask == 0x01)) {
if ((meminfo->single_rank_mask & 0x03)!=0x03) { //any double rank there?
dwordx = 0x002f2f00;
}
@@ -2553,7 +2553,7 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *
#endif
-#if CONFIG_DIMM_SUPPORT==0x0204 /* DDR2 and SO-DIMM, S1G1 */
+#if CONFIG_DIMM_SUPPORT == 0x0204 /* DDR2 and SO-DIMM, S1G1 */
dword = 0x00111222;
dwordx = 0x002F2F00;
@@ -2593,7 +2593,7 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *
}
#endif
-#if CONFIG_DIMM_SUPPORT==0x0004 /* DDR2 and unbuffered */
+#if CONFIG_DIMM_SUPPORT == 0x0004 /* DDR2 and unbuffered */
long dimm_mask = meminfo->dimm_mask & 0x0f;
/* for UNBUF DIMM */
dword = 0x00111222;
@@ -2609,7 +2609,7 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *
if (dimm_mask == 0x03) {
SlowAccessMode = 1;
dword = 0x00111322;
- if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
+ if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0)) {
switch (meminfo->single_rank_mask) {
case 0x03:
dwordx = 0x00002f00; //x8 single Rank
@@ -2620,14 +2620,14 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *
default:
dwordx = 0x00372f00; //x8 single Rank and double Rank mixed
}
- } else if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
+ } else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x01) && (meminfo->single_rank_mask == 0x01)) {
dwordx = 0x00382f00; //x8 Double Rank and x16 single Rank mixed
- } else if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
+ } else if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x02) && (meminfo->single_rank_mask == 0x02)) {
dwordx = 0x00382f00; //x16 single Rank and x8 double Rank mixed
}
} else {
- if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0x00) && ((meminfo->single_rank_mask == 0x01)||(meminfo->single_rank_mask == 0x02))) { //x8 single rank
+ if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0x00) && ((meminfo->single_rank_mask == 0x01)||(meminfo->single_rank_mask == 0x02))) { //x8 single rank
dwordx = 0x002f2f00;
} else {
dwordx = 0x002b2f00;
@@ -2639,7 +2639,7 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *
if (dimm_mask == 0x03) {
SlowAccessMode = 1;
dword = 0x00111322;
- if ((meminfo->x4_mask == 0 ) && (meminfo->x16_mask == 0)) {
+ if ((meminfo->x4_mask == 0) && (meminfo->x16_mask == 0)) {
switch (meminfo->single_rank_mask) {
case 0x03:
dwordx = 0x00302220; //x8 single Rank
@@ -2676,7 +2676,7 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *
printk_raminit("\tAddr Timing= %08x\n", dwordx);
#endif
-#if (CONFIG_DIMM_SUPPORT & 0x0100)==0x0000 /* 2T mode only used for unbuffered DIMM */
+#if (CONFIG_DIMM_SUPPORT & 0x0100) == 0x0000 /* 2T mode only used for unbuffered DIMM */
if (SlowAccessMode) {
set_SlowAccessMode(ctrl);
}
@@ -2707,7 +2707,7 @@ static void set_misc_timing(const struct mem_controller *ctrl, struct mem_info *
static void set_RDqsEn(const struct mem_controller *ctrl,
const struct mem_param *param, struct mem_info *meminfo)
{
-#if CONFIG_CPU_SOCKET_TYPE==0x10
+#if CONFIG_CPU_SOCKET_TYPE == 0x10
//only need to set for reg and x8
uint32_t dch;
@@ -2749,7 +2749,7 @@ static long spd_set_dram_timing(const struct mem_controller *ctrl,
for (i = 0; i < DIMM_SOCKETS; i++) {
int rc;
if (!(meminfo->dimm_mask & (1 << i)) &&
- !(meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i))) ) {
+ !(meminfo->dimm_mask & (1 << (DIMM_SOCKETS + i)))) {
continue;
}
printk_raminit("spd_set_dram_timing dimm socket: %08x\n", i);
@@ -2911,17 +2911,17 @@ static uint32_t hoist_memory(int controllers, const struct mem_controller *ctrl,
carry_over = (4*1024*1024) - hole_startk;
- for (ii=controllers - 1;ii>i;ii--) {
+ for (ii = controllers - 1; ii > i; ii--) {
base = pci_read_config32(ctrl[0].f1, 0x40 + (ii << 3));
if ((base & ((1<<1)|(1<<0))) != ((1<<1)|(1<<0))) {
continue;
}
limit = pci_read_config32(ctrl[0].f1, 0x44 + (ii << 3));
- limit += (carry_over << 2 );
- base += (carry_over << 2 );
+ limit += (carry_over << 2);
+ base += (carry_over << 2);
for (j = 0; j < controllers; j++) {
pci_write_config32(ctrl[j].f1, 0x44 + (ii << 3), limit);
- pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base );
+ pci_write_config32(ctrl[j].f1, 0x40 + (ii << 3), base);
}
}
limit = pci_read_config32(ctrl[0].f1, 0x44 + (i << 3));
@@ -2966,7 +2966,7 @@ static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
/* We need to double check if the hole_startk is valid, if it is equal
to basek, we need to decrease it some */
uint32_t basek_pri;
- for (i=0; i<controllers; i++) {
+ for (i = 0; i < controllers; i++) {
uint32_t base;
unsigned base_k;
base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
@@ -2985,7 +2985,7 @@ static void set_hw_mem_hole(int controllers, const struct mem_controller *ctrl)
printk_raminit("Handling memory hole at 0x%08x (adjusted)\n", hole_startk);
#endif
/* find node index that need do set hole */
- for (i=0; i < controllers; i++) {
+ for (i = 0; i < controllers; i++) {
uint32_t base, limit;
unsigned base_k, limit_k;
base = pci_read_config32(ctrl[0].f1, 0x40 + (i << 3));
@@ -3038,7 +3038,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl,
dch = pci_read_config32(ctrl[i].f2, DRAM_CONFIG_HIGH);
/* if no memory installed, disabled the interface */
- if (sysinfo->meminfo[i].dimm_mask==0x00){
+ if (sysinfo->meminfo[i].dimm_mask == 0x00) {
dch |= DCH_DisDramInterface;
pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
@@ -3046,7 +3046,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl,
dch |= DCH_MemClkFreqVal;
pci_write_config32(ctrl[i].f2, DRAM_CONFIG_HIGH, dch);
/* address timing and Output driver comp Control */
- set_misc_timing(ctrl+i, sysinfo->meminfo+i );
+ set_misc_timing(ctrl+i, sysinfo->meminfo+i);
}
}
@@ -3108,7 +3108,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl,
if (!sysinfo->ctrl_present[ i ])
continue;
/* Skip everything if I don't have any memory on this controller */
- if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
+ if (sysinfo->meminfo[i].dimm_mask == 0x00) continue;
printk(BIOS_DEBUG, "Initializing memory: ");
int loops = 0;
@@ -3127,7 +3127,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl,
/* Wait until it is safe to touch memory */
do {
dcm = pci_read_config32(ctrl[i].f2, DRAM_CTRL_MISC);
- } while (((dcm & DCM_MemClrStatus) == 0) /* || ((dcm & DCM_DramEnabled) == 0)*/ );
+ } while (((dcm & DCM_MemClrStatus) == 0) /* || ((dcm & DCM_DramEnabled) == 0)*/);
#if K8_REV_F_SUPPORT_F0_F1_WORKAROUND == 1
if (cpu_f0_f1[i]) {
@@ -3136,7 +3136,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl,
print_debug_dqs_tsc("\nbegin tsc0", i, tsc0[i].hi, tsc0[i].lo, 2);
print_debug_dqs_tsc("end tsc ", i, tsc.hi, tsc.lo, 2);
- if (tsc.lo<tsc0[i].lo) {
+ if (tsc.lo < tsc0[i].lo) {
tsc.hi--;
}
tsc.lo -= tsc0[i].lo;
@@ -3176,7 +3176,7 @@ static void sdram_enable(int controllers, const struct mem_controller *ctrl,
continue;
/* Skip everything if I don't have any memory on this controller */
- if (sysinfo->meminfo[i].dimm_mask==0x00)
+ if (sysinfo->meminfo[i].dimm_mask == 0x00)
continue;
sysinfo->mem_trained[i] = 0x80; // mem need to be trained
@@ -3226,7 +3226,7 @@ void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
int i;
int j;
struct mem_controller *ctrl;
- for (i=0;i<controllers; i++) {
+ for (i = 0; i < controllers; i++) {
ctrl = &ctrl_a[i];
ctrl->node_id = i;
ctrl->f0 = PCI_DEV(0, 0x18+i, 0);
@@ -3236,7 +3236,7 @@ void fill_mem_ctrl(int controllers, struct mem_controller *ctrl_a,
if (spd_addr == (void *)0) continue;
- for (j=0;j<DIMM_SOCKETS;j++) {
+ for (j = 0; j < DIMM_SOCKETS; j++) {
ctrl->channel0[j] = spd_addr[(i*2+0)*DIMM_SOCKETS + j];
ctrl->channel1[j] = spd_addr[(i*2+1)*DIMM_SOCKETS + j];
}
diff --git a/src/northbridge/amd/amdk8/raminit_f_dqs.c b/src/northbridge/amd/amdk8/raminit_f_dqs.c
index 9f0b8dba0c..2d05951513 100644
--- a/src/northbridge/amd/amdk8/raminit_f_dqs.c
+++ b/src/northbridge/amd/amdk8/raminit_f_dqs.c
@@ -58,10 +58,10 @@ static void fill_mem_cs_sysinfo(unsigned nodeid, const struct mem_controller *ct
{
int i;
- sysinfo->mem_base[nodeid] = pci_read_config32(ctrl->f1, 0x40 + (nodeid<<3));
+ sysinfo->mem_base[nodeid] = pci_read_config32(ctrl->f1, 0x40 + (nodeid << 3));
- for (i=0;i<8; i++) {
- sysinfo->cs_base[nodeid*8+i] = pci_read_config32(ctrl->f2, 0x40 + (i<<2));
+ for (i = 0; i < 8; i++) {
+ sysinfo->cs_base[nodeid*8+i] = pci_read_config32(ctrl->f2, 0x40 + (i << 2));
}
sysinfo->hole_reg[nodeid] = pci_read_config32(ctrl->f1, 0xf0);
@@ -90,9 +90,9 @@ static unsigned Get_MCTSysAddr(const struct mem_controller *ctrl, unsigned cs_id
hole_reg = sysinfo->hole_reg[nodeid];
if (hole_reg & 1) {
unsigned hole_startk;
- hole_startk = (hole_reg & (0xff<<24)) >> 10;
- if ( (dword >= (hole_startk<<2)) && (dword < ((4*1024*1024)<<2))) {
- dword += ((4*1024*1024 - hole_startk)<<2);
+ hole_startk = (hole_reg & (0xff << 24)) >> 10;
+ if ((dword >= (hole_startk << 2)) && (dword < ((4*1024*1024) << 2))) {
+ dword += ((4*1024*1024 - hole_startk) << 2);
}
}
#endif
@@ -197,12 +197,12 @@ static void WriteLNTestPattern(unsigned addr_lo, uint8_t *buf_a, unsigned line_n
static void Write1LTestPattern(unsigned addr, unsigned p, uint8_t *buf_a, uint8_t *buf_b)
{
uint8_t *buf;
- if (p==1) { buf = buf_b; }
+ if (p == 1) { buf = buf_b; }
else { buf = buf_a; }
- set_FSBASE (addr>>24);
+ set_FSBASE (addr >> 24);
- WriteLNTestPattern(addr<<8, buf, 1);
+ WriteLNTestPattern(addr << 8, buf, 1);
}
static void Read1LTestPattern(unsigned addr)
@@ -214,7 +214,7 @@ static void Read1LTestPattern(unsigned addr)
/* 1st move causes read fill (to exclusive or shared)*/
__asm__ volatile (
"movl %%fs:(%1), %0\n\t"
- :"=b"(value): "a" (addr<<8)
+ :"=b"(value): "a" (addr << 8)
);
}
@@ -243,7 +243,7 @@ static unsigned CompareTestPatternQW0(unsigned channel, unsigned addr, unsigned
unsigned result = DQS_FAIL;
if (Pass == DQS_FIRST_PASS) {
- if (pattern==1) {
+ if (pattern == 1) {
test_buf = (uint32_t *)TestPattern1;
}
else {
@@ -254,9 +254,9 @@ static unsigned CompareTestPatternQW0(unsigned channel, unsigned addr, unsigned
test_buf = (uint32_t *)TestPattern2;
}
- set_FSBASE(addr>>24);
+ set_FSBASE(addr >> 24);
- addr_lo = addr<<8;
+ addr_lo = addr << 8;
if (is_Width128 && (channel == 1)) {
addr_lo += 8; //second channel
@@ -285,13 +285,13 @@ static unsigned CompareTestPatternQW0(unsigned channel, unsigned addr, unsigned
print_debug_dqs_pair("\t\t\t\t\t\tQW0.hi : test_buf= ", (unsigned)test_buf, " value = ", value_test, 4);
print_debug_dqs_pair("\t\t\t\t\t\tQW0.hi : addr_lo = ", addr_lo, " value = ", value, 4);
- if (value == value_test){
+ if (value == value_test) {
result = DQS_PASS;
}
}
if (Pass == DQS_SECOND_PASS) { // second pass need to be inverted
- if (result==DQS_PASS) {
+ if (result == DQS_PASS) {
result = DQS_FAIL;
}
else {
@@ -314,7 +314,7 @@ static void SetMaxAL_RcvrDly(const struct mem_controller *ctrl, unsigned dly)
reg = pci_read_config32(ctrl->f2, DRAM_CONFIG_HIGH);
- reg &= ~(DCH_MaxAsyncLat_MASK <<DCH_MaxAsyncLat_SHIFT);
+ reg &= ~(DCH_MaxAsyncLat_MASK << DCH_MaxAsyncLat_SHIFT);
reg |= ((dly - DCH_MaxAsyncLat_BASE) << DCH_MaxAsyncLat_SHIFT);
pci_write_config32(ctrl->f2, DRAM_CONFIG_HIGH, reg);
@@ -327,8 +327,8 @@ static void SetMaxAL_RcvrDly(const struct mem_controller *ctrl, unsigned dly)
static void SetTargetWTIO(unsigned addr)
{
msr_t msr;
- msr.hi = addr>>24;
- msr.lo = addr<<8;
+ msr.hi = addr >> 24;
+ msr.lo = addr << 8;
wrmsr(0xc0010016, msr); //IORR0 BASE
msr.hi = 0xff;
@@ -354,7 +354,7 @@ static void proc_CLFLUSH(unsigned addr)
__asm__ volatile (
/* clflush fs:[eax] */
"clflush %%fs:(%0)\n\t"
- ::"a" (addr<<8)
+ ::"a" (addr << 8)
);
}
@@ -409,7 +409,7 @@ static uint16_t get_exact_T1000(unsigned i)
/* Check for FID control support */
struct cpuid_result cpuid1;
cpuid1 = cpuid(0x80000007);
- if ( cpuid1.edx & 0x02 ) {
+ if (cpuid1.edx & 0x02) {
/* Use current FID */
unsigned fid_cur;
msr = rdmsr(0xc0010042);
@@ -425,7 +425,7 @@ static uint16_t get_exact_T1000(unsigned i)
index = fid_start>>25;
}
- if (index>12) return T1000_a[i];
+ if (index > 12) return T1000_a[i];
return TT_a[index * 4+i];
@@ -437,14 +437,14 @@ static void InitDQSPos4RcvrEn(const struct mem_controller *ctrl)
uint32_t dword;
dword = 0x00000000;
- for (i=1; i<=3; i++) {
+ for (i = 1; i <= 3; i++) {
/* Program the DQS Write Timing Control Registers (Function 2:Offset 0x9c, index 0x01-0x03, 0x21-0x23) to 0x00 for all bytes */
pci_write_config32_index_wait(ctrl->f2, 0x98, i, dword);
pci_write_config32_index_wait(ctrl->f2, 0x98, i+0x20, dword);
}
dword = 0x2f2f2f2f;
- for (i=5; i<=7; i++) {
+ for (i = 5; i <= 7; i++) {
/* Program the DQS Write Timing Control Registers (Function 2:Offset 0x9c, index 0x05-0x07, 0x25-0x27) to 0x2f for all bytes */
pci_write_config32_index_wait(ctrl->f2, 0x98, i, dword);
pci_write_config32_index_wait(ctrl->f2, 0x98, i+0x20, dword);
@@ -554,14 +554,14 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st
// SetupRcvrPattern
buf_a = (uint8_t *)(((uint32_t)(&pattern_buf_x[0]) + 0x10) & (0xfffffff0));
buf_b = buf_a + 128; //??
- if (Pass==DQS_FIRST_PASS) {
- for (i=0;i<16;i++) {
+ if (Pass == DQS_FIRST_PASS) {
+ for (i = 0; i < 16; i++) {
*((uint32_t *)(buf_a + i*4)) = TestPattern0[i];
*((uint32_t *)(buf_b + i*4)) = TestPattern1[i];
}
}
else {
- for (i=0;i<16;i++) {
+ for (i = 0; i < 16; i++) {
*((uint32_t *)(buf_a + i*4)) = TestPattern2[i];
*((uint32_t *)(buf_b + i*4)) = TestPattern2[i];
}
@@ -581,13 +581,13 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st
channel = 1;
}
- for ( ; (channel < 2) && (!Errors); channel++)
+ for (; (channel < 2) && (!Errors); channel++)
{
print_debug_dqs("\tTrainRcvEn51: channel ",channel, 1);
/* for each rank */
/* there are four receiver pairs, loosely associated with CS */
- for ( receiver = 0; (receiver < 8) && (!Errors); receiver+=2)
+ for (receiver = 0; (receiver < 8) && (!Errors); receiver+=2)
{
unsigned index=(receiver>>1) * 3 + 0x10;
@@ -642,7 +642,7 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st
RcvrEnDly = dqs_rcvr_dly_a[channel * 8 + receiver];
}
- while ( RcvrEnDly < 0xaf) { // Sweep Delay value here
+ while (RcvrEnDly < 0xaf) { // Sweep Delay value here
print_debug_dqs("\t\t\tTrainRcvEn541: RcvrEnDly ", RcvrEnDly, 3);
if (RcvrEnDly & 1) {
@@ -841,7 +841,7 @@ static unsigned TrainRcvrEn(const struct mem_controller *ctrl, unsigned Pass, st
printk(BIOS_DEBUG, " CTLRMaxDelay=%02x\n", CTLRMaxDelay);
#endif
- return (CTLRMaxDelay==0xae)?1:0;
+ return (CTLRMaxDelay == 0xae)?1:0;
}
@@ -859,14 +859,14 @@ static void SetDQSDelayCSR(const struct mem_controller *ctrl, unsigned channel,
index = (bytelane>>2) + 1 + channel * 0x20 + (direction << 2);
shift = bytelane;
- while (shift>3) {
+ while (shift > 3) {
shift-=4;
}
shift <<= 3; // 8 bit
dword = pci_read_config32_index_wait(ctrl->f2, 0x98, index);
- dword &= ~(0x3f<<shift);
- dword |= (dqs_delay<<shift);
+ dword &= ~(0x3f << shift);
+ dword |= (dqs_delay << shift);
pci_write_config32_index_wait(ctrl->f2, 0x98, index, dword);
}
@@ -879,13 +879,13 @@ static void SetDQSDelayAllCSR(const struct mem_controller *ctrl, unsigned channe
dword = 0;
dqs_delay &= 0xff;
- for (i=0;i<4;i++) {
- dword |= dqs_delay<<(i*8);
+ for (i = 0; i < 4; i++) {
+ dword |= dqs_delay << (i*8);
}
index = 1 + channel * 0x20 + direction * 4;
- for (i=0; i<2; i++) {
+ for (i = 0; i < 2; i++) {
pci_write_config32_index_wait(ctrl->f2, 0x98, index + i, dword);
}
@@ -898,7 +898,7 @@ static unsigned MiddleDQS(unsigned min_d, unsigned max_d)
if (size_d & 1) { //need round up
min_d++;
}
- return ( min_d + (size_d>>1));
+ return (min_d + (size_d>>1));
}
static inline void save_dqs_delay(unsigned channel, unsigned bytelane, unsigned direction, uint8_t *dqs_delay_a, uint8_t dqs_delay)
@@ -1026,10 +1026,10 @@ static __attribute__((noinline)) void FlushDQSTestPattern_L18(unsigned addr_lo)
);
}
-static void FlushDQSTestPattern(unsigned addr_lo, unsigned pattern )
+static void FlushDQSTestPattern(unsigned addr_lo, unsigned pattern)
{
- if (pattern == 0){
+ if (pattern == 0) {
FlushDQSTestPattern_L9(addr_lo);
}
else {
@@ -1056,7 +1056,7 @@ static unsigned CompareDQSTestPattern(unsigned channel, unsigned addr_lo, unsign
}
bytelane = 0;
- for (i=0;i<9*64/4;i++) {
+ for (i = 0; i < 9*64/4; i++) {
__asm__ volatile (
"movl %%fs:(%1), %0\n\t"
:"=b"(value): "a" (addr_lo)
@@ -1066,9 +1066,9 @@ static unsigned CompareDQSTestPattern(unsigned channel, unsigned addr_lo, unsign
print_debug_dqs_pair("\t\t\t\t\t\ttest_buf= ", (unsigned)test_buf, " value = ", value_test, 7);
print_debug_dqs_pair("\t\t\t\t\t\ttaddr_lo = ",addr_lo, " value = ", value, 7);
- for (j=0;j<4*8;j+=8) {
+ for (j = 0; j < 4*8; j+=8) {
if (((value>>j)&0xff) != ((value_test>>j)& 0xff)) {
- bitmap &= ~(1<<bytelane);
+ bitmap &= ~(1 << bytelane);
}
bytelane++;
@@ -1116,7 +1116,7 @@ static unsigned TrainDQSPos(const struct mem_controller *ctrl, unsigned channel,
printk(BIOS_DEBUG, "TrainDQSPos: MutualCSPassW[48] :%p\n", MutualCSPassW);
- for (DQSDelay=0; DQSDelay<48; DQSDelay++) {
+ for (DQSDelay = 0; DQSDelay < 48; DQSDelay++) {
MutualCSPassW[DQSDelay] = 0xff; // Bitmapped status per delay setting, 0xff=All positions passing (1= PASS)
}
@@ -1135,25 +1135,25 @@ static unsigned TrainDQSPos(const struct mem_controller *ctrl, unsigned channel,
if (Direction == DQS_READDIR) {
print_debug_dqs("\t\t\t\tTrainDQSPos: 13 for read so write at first", 0, 4);
- WriteDQSTestPattern(TestAddr<<8, Pattern, buf_a);
+ WriteDQSTestPattern(TestAddr << 8, Pattern, buf_a);
}
- for (DQSDelay = 0; DQSDelay < 48; DQSDelay++ ){
+ for (DQSDelay = 0; DQSDelay < 48; DQSDelay++) {
print_debug_dqs("\t\t\t\t\tTrainDQSPos: 141 DQSDelay ", DQSDelay, 5);
if (MutualCSPassW[DQSDelay] == 0) continue; //skip current delay value if other chipselects have failed all 8 bytelanes
SetDQSDelayAllCSR(ctrl, channel, Direction, DQSDelay);
print_debug_dqs("\t\t\t\t\tTrainDQSPos: 142 MutualCSPassW ", MutualCSPassW[DQSDelay], 5);
if (Direction == DQS_WRITEDIR) {
print_debug_dqs("\t\t\t\t\tTrainDQSPos: 143 for write", 0, 5);
- WriteDQSTestPattern(TestAddr<<8, Pattern, buf_a);
+ WriteDQSTestPattern(TestAddr << 8, Pattern, buf_a);
}
print_debug_dqs("\t\t\t\t\tTrainDQSPos: 144 Pattern ", Pattern, 5);
- ReadDQSTestPattern(TestAddr<<8, Pattern);
+ ReadDQSTestPattern(TestAddr << 8, Pattern);
print_debug_dqs("\t\t\t\t\tTrainDQSPos: 145 MutualCSPassW ", MutualCSPassW[DQSDelay], 5);
- MutualCSPassW[DQSDelay] &= CompareDQSTestPattern(channel, TestAddr<<8, Pattern, buf_a); //0: fail, 1=pass
+ MutualCSPassW[DQSDelay] &= CompareDQSTestPattern(channel, TestAddr << 8, Pattern, buf_a); //0: fail, 1=pass
print_debug_dqs("\t\t\t\t\tTrainDQSPos: 146 MutualCSPassW ", MutualCSPassW[DQSDelay], 5);
SetTargetWTIO(TestAddr);
- FlushDQSTestPattern(TestAddr<<8, Pattern);
+ FlushDQSTestPattern(TestAddr << 8, Pattern);
ResetTargetWTIO();
}
}
@@ -1166,8 +1166,8 @@ static unsigned TrainDQSPos(const struct mem_controller *ctrl, unsigned channel,
RnkDlySeqPassMax = 0;
RnkDlyFilterMax = 0;
RnkDlyFilterMin = 0;
- for (DQSDelay=0; DQSDelay<48; DQSDelay++) {
- if (MutualCSPassW[DQSDelay] & (1<<ByteLane)) {
+ for (DQSDelay = 0; DQSDelay < 48; DQSDelay++) {
+ if (MutualCSPassW[DQSDelay] & (1 << ByteLane)) {
print_debug_dqs("\t\t\t\t\tTrainDQSPos: 321 DQSDelay ", DQSDelay, 5);
print_debug_dqs("\t\t\t\t\tTrainDQSPos: 322 MutualCSPassW ", MutualCSPassW[DQSDelay], 5);
@@ -1176,7 +1176,7 @@ static unsigned TrainDQSPos(const struct mem_controller *ctrl, unsigned channel,
if (LastTest == DQS_FAIL) {
RnkDlySeqPassMin = DQSDelay; //start sequential run
}
- if ((RnkDlySeqPassMax - RnkDlySeqPassMin)>(RnkDlyFilterMax-RnkDlyFilterMin)){
+ if ((RnkDlySeqPassMax - RnkDlySeqPassMin)>(RnkDlyFilterMax-RnkDlyFilterMin)) {
RnkDlyFilterMin = RnkDlySeqPassMin;
RnkDlyFilterMax = RnkDlySeqPassMax;
}
@@ -1194,7 +1194,7 @@ static unsigned TrainDQSPos(const struct mem_controller *ctrl, unsigned channel,
else {
print_debug_dqs("\t\t\t\tTrainDQSPos: 34 RnkDlyFilterMax ", RnkDlyFilterMax, 4);
print_debug_dqs("\t\t\t\tTrainDQSPos: 34 RnkDlyFilterMin ", RnkDlyFilterMin, 4);
- if ((RnkDlyFilterMax - RnkDlyFilterMin)< MIN_DQS_WNDW){
+ if ((RnkDlyFilterMax - RnkDlyFilterMin)< MIN_DQS_WNDW) {
Errors |= SB_SMALLDQS;
}
else {
@@ -1371,15 +1371,15 @@ static unsigned TrainDQSRdWrPos(const struct mem_controller *ctrl, struct sys_in
//SetupDqsPattern
buf_a = (uint8_t *)(((uint32_t)(&pattern_buf_x[0]) + 0x10) & (~0xf));
- if (is_Width128){
+ if (is_Width128) {
pattern = 1;
- for (i=0;i<16*18;i++) {
+ for (i = 0; i < 16*18; i++) {
*((uint32_t *)(buf_a + i*4)) = TestPatternJD1b[i];
}
}
else {
pattern = 0;
- for (i=0; i<16*9;i++) {
+ for (i = 0; i < 16*9; i++) {
*((uint32_t *)(buf_a + i*4)) = TestPatternJD1a[i];
}
@@ -1397,7 +1397,7 @@ static unsigned TrainDQSRdWrPos(const struct mem_controller *ctrl, struct sys_in
channel = 1;
}
- while ( (channel<2) && (!Errors)) {
+ while ((channel < 2) && (!Errors)) {
print_debug_dqs("\tTrainDQSRdWrPos: 1 channel ",channel, 1);
for (DQSWrDelay = 0; DQSWrDelay < 48; DQSWrDelay++) {
unsigned err;
@@ -1417,7 +1417,7 @@ static unsigned TrainDQSRdWrPos(const struct mem_controller *ctrl, struct sys_in
}
channel++;
- if (!is_Width128){
+ if (!is_Width128) {
//FIXME: 64MuxMode??
channel++; // skip channel if 64-bit mode
}
@@ -1458,7 +1458,7 @@ static unsigned CalcEccDQSPos(unsigned channel,unsigned ByteLane0, unsigned Byte
DQSDelay0 = get_dqs_delay(channel, ByteLane0, Direction, dqs_delay_a);
DQSDelay1 = get_dqs_delay(channel, ByteLane1, Direction, dqs_delay_a);
- if (DQSDelay0>DQSDelay1) {
+ if (DQSDelay0 > DQSDelay1) {
DQSDelay = DQSDelay0 - DQSDelay1;
InterFactor = 0xff - InterFactor;
}
@@ -1470,7 +1470,7 @@ static unsigned CalcEccDQSPos(unsigned channel,unsigned ByteLane0, unsigned Byte
DQSDelay >>= 8; // /255
- if (DQSDelay0>DQSDelay1) {
+ if (DQSDelay0 > DQSDelay1) {
DQSDelay += DQSDelay1;
}
else {
@@ -1496,11 +1496,11 @@ static void SetEccDQSRdWrPos(const struct mem_controller *ctrl, struct sys_info
ByteLane = 8;
for (channel = 0; channel < 2; channel++) {
- for (i=0;i<2;i++) {
+ for (i = 0; i < 2; i++) {
Direction = direction[i];
lane0 = 4; lane1 = 5; ratio = 0;
dqs_delay = CalcEccDQSPos(channel, lane0, lane1, ratio, Direction, dqs_delay_a);
- print_debug_dqs_pair("\t\tSetEccDQSRdWrPos: channel ", channel, Direction==DQS_READDIR? " R dqs_delay":" W dqs_delay", dqs_delay, 2);
+ print_debug_dqs_pair("\t\tSetEccDQSRdWrPos: channel ", channel, Direction == DQS_READDIR? " R dqs_delay":" W dqs_delay", dqs_delay, 2);
SetDQSDelayCSR(ctrl, channel, ByteLane, Direction, dqs_delay);
save_dqs_delay(channel, ByteLane, Direction, dqs_delay_a, dqs_delay);
}
@@ -1546,7 +1546,7 @@ static void f0_svm_workaround(int controllers, const struct mem_controller *ctrl
continue;
/* Skip everything if I don't have any memory on this controller */
- if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
+ if (sysinfo->meminfo[i].dimm_mask == 0x00) continue;
uint32_t dword;
@@ -1568,7 +1568,7 @@ static void f0_svm_workaround(int controllers, const struct mem_controller *ctrl
print_debug_dqs_tsc("begin: tsc1", i, tsc1[i].hi, tsc1[i].lo, 2);
dword = tsc1[i].lo + tsc0[i].lo;
- if ((dword<tsc1[i].lo) || (dword<tsc0[i].lo)) {
+ if ((dword < tsc1[i].lo) || (dword < tsc0[i].lo)) {
tsc1[i].hi++;
}
tsc1[i].lo = dword;
@@ -1583,7 +1583,7 @@ static void f0_svm_workaround(int controllers, const struct mem_controller *ctrl
continue;
/* Skip everything if I don't have any memory on this controller */
- if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
+ if (sysinfo->meminfo[i].dimm_mask == 0x00) continue;
if (!cpu_f0_f1[i]) continue;
@@ -1591,7 +1591,7 @@ static void f0_svm_workaround(int controllers, const struct mem_controller *ctrl
do {
tsc = rdtsc();
- } while ((tsc1[i].hi>tsc.hi) || ((tsc1[i].hi==tsc.hi) && (tsc1[i].lo>tsc.lo)));
+ } while ((tsc1[i].hi > tsc.hi) || ((tsc1[i].hi == tsc.hi) && (tsc1[i].lo > tsc.lo)));
print_debug_dqs_tsc("end : tsc ", i, tsc.hi, tsc.lo, 2);
}
@@ -1661,8 +1661,8 @@ static unsigned int range_to_mtrr(unsigned int reg,
#if CONFIG_MEM_TRAIN_SEQ != 1
printk(BIOS_DEBUG, "Setting variable MTRR %d, base: %4ldMB, range: %4ldMB, type %s\n",
reg, range_startk >>10, sizek >> 10,
- (type==MTRR_TYPE_UNCACHEABLE)?"UC":
- ((type==MTRR_TYPE_WRBACK)?"WB":"Other")
+ (type == MTRR_TYPE_UNCACHEABLE)?"UC":
+ ((type == MTRR_TYPE_WRBACK)?"WB":"Other")
);
#endif
set_var_mtrr_dqs(reg++, range_startk, sizek, type, address_bits);
@@ -1737,7 +1737,7 @@ static void clear_mtrr_dqs(unsigned tom2_k)
wrmsr(0x258, msr);
//[1M, TOM)
- for (i=0x204;i<0x210;i++) {
+ for (i = 0x204; i < 0x210; i++) {
wrmsr(i, msr);
}
@@ -1755,8 +1755,8 @@ static void set_htic_bit(unsigned i, unsigned val, unsigned bit)
{
uint32_t dword;
dword = pci_read_config32(PCI_DEV(0, 0x18+i, 0), HT_INIT_CONTROL);
- dword &= ~(1<<bit);
- dword |= ((val & 1) <<bit);
+ dword &= ~(1 << bit);
+ dword |= ((val & 1) << bit);
pci_write_config32(PCI_DEV(0, 0x18+i, 0), HT_INIT_CONTROL, dword);
}
@@ -1764,7 +1764,7 @@ static unsigned get_htic_bit(unsigned i, unsigned bit)
{
uint32_t dword;
dword = pci_read_config32(PCI_DEV(0, 0x18+i, 0), HT_INIT_CONTROL);
- dword &= (1<<bit);
+ dword &= (1 << bit);
return dword;
}
@@ -1865,7 +1865,7 @@ static void dqs_restore_MC_NVRAM(unsigned int dev)
pos = dqs_load_MC_NVRAM_ch(dev, 1, pos);
/* load the maxasync lat here */
pos = s3_load_nvram_early(4, &reg, pos);
- reg &= (DCH_MaxAsyncLat_MASK <<DCH_MaxAsyncLat_SHIFT);
+ reg &= (DCH_MaxAsyncLat_MASK << DCH_MaxAsyncLat_SHIFT);
reg |= pci_read_config32(dev, DRAM_CONFIG_HIGH);
pci_write_config32(dev, DRAM_CONFIG_HIGH, reg);
}
@@ -1890,7 +1890,7 @@ static void dqs_timing(int controllers, const struct mem_controller *ctrl, struc
continue;
/* Skip everything if I don't have any memory on this controller */
- if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
+ if (sysinfo->meminfo[i].dimm_mask == 0x00) continue;
fill_mem_cs_sysinfo(i, ctrl+i, sysinfo);
}
@@ -1901,7 +1901,7 @@ static void dqs_timing(int controllers, const struct mem_controller *ctrl, struc
continue;
/* Skip everything if I don't have any memory on this controller */
- if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
+ if (sysinfo->meminfo[i].dimm_mask == 0x00) continue;
printk(BIOS_DEBUG, "DQS Training:RcvrEn:Pass1: %02x\n", i);
if (train_DqsRcvrEn(ctrl+i, 1, sysinfo)) goto out;
@@ -1919,7 +1919,7 @@ static void dqs_timing(int controllers, const struct mem_controller *ctrl, struc
continue;
/* Skip everything if I don't have any memory on this controller */
- if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
+ if (sysinfo->meminfo[i].dimm_mask == 0x00) continue;
printk(BIOS_DEBUG, "DQS Training:DQSPos: %02x\n", i);
if (train_DqsPos(ctrl+i, sysinfo)) goto out;
@@ -1932,7 +1932,7 @@ static void dqs_timing(int controllers, const struct mem_controller *ctrl, struc
continue;
/* Skip everything if I don't have any memory on this controller */
- if (sysinfo->meminfo[i].dimm_mask==0x00) continue;
+ if (sysinfo->meminfo[i].dimm_mask == 0x00) continue;
printk(BIOS_DEBUG, "DQS Training:RcvrEn:Pass2: %02x\n", i);
if (train_DqsRcvrEn(ctrl+i, 2, sysinfo)) goto out;
@@ -1948,7 +1948,7 @@ out:
clear_mtrr_dqs(sysinfo->tom2_k);
- for (i=0;i<5;i++) {
+ for (i = 0; i < 5; i++) {
print_debug_dqs_tsc_x("DQS Training:tsc", i, tsc[i].hi, tsc[i].lo);
}
@@ -2004,7 +2004,7 @@ static void dqs_timing(int i, const struct mem_controller *ctrl, struct sys_info
printk(BIOS_DEBUG, "set DQS timing:RcvrEn:Pass2: %02x\n", i);
}
- if (train_DqsRcvrEn(ctrl, 2, sysinfo)){
+ if (train_DqsRcvrEn(ctrl, 2, sysinfo)) {
sysinfo->mem_trained[i]=0x83; //
goto out;
}
@@ -2021,7 +2021,7 @@ out:
#endif
if (v) {
- for (ii=0;ii<4;ii++) {
+ for (ii = 0; ii < 4; ii++) {
print_debug_dqs_tsc_x("Total DQS Training : tsc ", ii, tsc[ii].hi, tsc[ii].lo);
}
}
diff --git a/src/northbridge/amd/amdk8/setup_resource_map.c b/src/northbridge/amd/amdk8/setup_resource_map.c
index fa03e4ab93..e6e79c868e 100644
--- a/src/northbridge/amd/amdk8/setup_resource_map.c
+++ b/src/northbridge/amd/amdk8/setup_resource_map.c
@@ -50,9 +50,9 @@ static void setup_resource_map_x_offset(const unsigned int *register_values, int
#if RES_DEBUG
printk(BIOS_DEBUG, "%04x: %02x %08x <- & %08x | %08x\n",
i>>2, register_values[i],
- register_values[i+1] + ( (register_values[i]==RES_PCI_IO) ? offset_pci_dev : 0),
+ register_values[i+1] + ((register_values[i]==RES_PCI_IO) ? offset_pci_dev : 0),
register_values[i+2],
- register_values[i+3] + ( ( (register_values[i] & RES_PORT_IO_32) == RES_PORT_IO_32) ? offset_io_base : 0)
+ register_values[i+3] + (((register_values[i] & RES_PORT_IO_32) == RES_PORT_IO_32) ? offset_io_base : 0)
);
#endif
switch (register_values[i]) {
@@ -103,7 +103,7 @@ static void setup_resource_map_x_offset(const unsigned int *register_values, int
reg = read32(where);
reg &= register_values[i+2];
reg |= register_values[i+3];
- write32( where, reg);
+ write32(where, reg);
}
break;
#endif
@@ -174,7 +174,7 @@ static void setup_mem_resource_map(const unsigned int *register_values, int max)
reg = read32(where);
reg &= register_values[i+1];
reg |= register_values[i+2];
- write32( where, reg);
+ write32(where, reg);
#if 0
reg = read32(where);
prink(BIOS_DEBUG, " RB %08x\n", reg);
diff --git a/src/northbridge/amd/amdk8/util.asl b/src/northbridge/amd/amdk8/util.asl
index 6a9b69f267..83d741eada 100644
--- a/src/northbridge/amd/amdk8/util.asl
+++ b/src/northbridge/amd/amdk8/util.asl
@@ -44,9 +44,9 @@ Scope (\_SB)
Method (DADD, 2, NotSerialized)
{
- Store( Arg1, Local0)
- Store( Arg0, Local1)
- Add( ShiftLeft(Local1,16), Local0, Local0)
+ Store(Arg1, Local0)
+ Store(Arg0, Local1)
+ Add(ShiftLeft(Local1,16), Local0, Local0)
Return (Local0)
}
@@ -54,7 +54,7 @@ Scope (\_SB)
Method (GHCE, 1, NotSerialized) // check if the HC enabled
{
Store (DerefOf (Index (\_SB.PCI0.HCLK, Arg0)), Local1)
- if (LEqual ( And(Local1, 0x01), 0x01)) { Return (0x0F) }
+ if (LEqual (And(Local1, 0x01), 0x01)) { Return (0x0F) }
Else { Return (0x00) }
}
@@ -62,7 +62,7 @@ Scope (\_SB)
{
Store (0x00, Local0)
Store (DerefOf (Index (\_SB.PCI0.HCLK, Arg0)), Local1)
- Store (ShiftRight( And (Local1, 0xf0), 0x04), Local0)
+ Store (ShiftRight(And (Local1, 0xf0), 0x04), Local0)
Return (Local0)
}
@@ -70,7 +70,7 @@ Scope (\_SB)
{
Store (0x00, Local0)
Store (DerefOf (Index (\_SB.PCI0.HCLK, Arg0)), Local1)
- Store (ShiftRight( And (Local1, 0xf00), 0x08), Local0)
+ Store (ShiftRight(And (Local1, 0xf00), 0x08), Local0)
Return (Local0)
}
@@ -80,7 +80,7 @@ Scope (\_SB)
Store (DerefOf (Index (\_SB.PCI0.HCDN, Arg0)), Local1)
Store (Arg1, Local2) // Arg1 could be 3, 2, 1, 0
Multiply (Local2, 0x08, Local2) // change to 24, 16, 8, 0
- Store (And (ShiftRight( Local1, Local2), 0xff), Local0)
+ Store (And (ShiftRight(Local1, Local2), 0xff), Local0)
Return (Local0)
}