diff options
author | Scott Duplichan <scott@notabs.org> | 2010-10-19 04:58:49 +0000 |
---|---|---|
committer | Scott Duplichan <scott@notabs.org> | 2010-10-19 04:58:49 +0000 |
commit | 1ba2eee2b4f38231254525a6a4ffcf1999dea636 (patch) | |
tree | e5a0dcbd155aa0374ef35d9ac5cd85dfc2105abc | |
parent | 236aef238f6e5c735cc9f253746def582fbaf8ff (diff) | |
download | coreboot-1ba2eee2b4f38231254525a6a4ffcf1999dea636.tar.xz |
Revision 5966 changed the end of line style of the 3 modified files. This change restores the original end of line style.
Signed-off-by: Scott Duplichan <scott@notabs.org>
Acked-by: Scott Duplichan <scott@notabs.org>
git-svn-id: svn://svn.coreboot.org/coreboot/trunk@5968 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
-rw-r--r-- | src/cpu/amd/model_10xxx/fidvid.c | 1676 | ||||
-rw-r--r-- | src/cpu/amd/model_10xxx/init_cpus.c | 1890 | ||||
-rw-r--r-- | src/northbridge/amd/amdfam10/amdfam10.h | 2404 |
3 files changed, 2985 insertions, 2985 deletions
diff --git a/src/cpu/amd/model_10xxx/fidvid.c b/src/cpu/amd/model_10xxx/fidvid.c index 6104f9c3f6..22295c5654 100644 --- a/src/cpu/amd/model_10xxx/fidvid.c +++ b/src/cpu/amd/model_10xxx/fidvid.c @@ -1,838 +1,838 @@ -/*
- * This file is part of the coreboot project.
- *
- * Copyright (C) 2007 Advanced Micro Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#if SET_FIDVID == 1
-#include <northbridge/amd/amdht/AsPsDefs.h>
-
-#define SET_FIDVID_DEBUG 1
-
-// if we are tight of CAR stack, disable it
-#define SET_FIDVID_STORE_AP_APICID_AT_FIRST 1
-
-static inline void print_debug_fv(const char *str, u32 val)
-{
-#if SET_FIDVID_DEBUG == 1
- printk(BIOS_DEBUG, "%s%x\n", str, val);
-#endif
-}
-
-static inline void print_debug_fv_8(const char *str, u8 val)
-{
-#if SET_FIDVID_DEBUG == 1
- printk(BIOS_DEBUG, "%s%02x\n", str, val);
-#endif
-}
-
-static inline void print_debug_fv_64(const char *str, u32 val, u32 val2)
-{
-#if SET_FIDVID_DEBUG == 1
- printk(BIOS_DEBUG, "%s%x%x\n", str, val, val2);
-#endif
-}
-
-struct fidvid_st {
- u32 common_fid;
-};
-
-static void enable_fid_change(u8 fid)
-{
- u32 dword;
- u32 nodes;
- device_t dev;
- int i;
-
- nodes = get_nodes();
-
- for (i = 0; i < nodes; i++) {
- dev = NODE_PCI(i, 3);
- dword = pci_read_config32(dev, 0xd4);
- dword &= ~0x1F;
- dword |= (u32) fid & 0x1F;
- dword |= 1 << 5; // enable
- pci_write_config32(dev, 0xd4, dword);
- printk(BIOS_DEBUG, "FID Change Node:%02x, F3xD4: %08x \n", i,
- dword);
- }
-}
-
-static void recalculateVsSlamTimeSettingOnCorePre(device_t dev)
-{
- u8 pviModeFlag;
- u8 highVoltageVid, lowVoltageVid, bValue;
- u16 minimumSlamTime;
- u16 vSlamTimes[7] = { 1000, 2000, 3000, 4000, 6000, 10000, 20000 }; /* Reg settings scaled by 100 */
- u32 dtemp;
- msr_t msr;
-
- /* This function calculates the VsSlamTime using the range of possible
- * voltages instead of a hardcoded 200us.
- * Note:This function is called from setFidVidRegs and setUserPs after
- * programming a custom Pstate.
- */
-
- /* Calculate Slam Time
- * Vslam = 0.4us/mV * Vp0 - (lowest out of Vpmin or Valt)
- * In our case, we will scale the values by 100 to avoid
- * decimals.
- */
-
- /* Determine if this is a PVI or SVI system */
- dtemp = pci_read_config32(dev, 0xA0);
-
- if (dtemp & PVI_MODE)
- pviModeFlag = 1;
- else
- pviModeFlag = 0;
-
- /* Get P0's voltage */
- msr = rdmsr(0xC0010064);
- highVoltageVid = (u8) ((msr.lo >> PS_CPU_VID_SHFT) & 0x7F);
-
- /* If SVI, we only care about CPU VID.
- * If PVI, determine the higher voltage b/t NB and CPU
- */
- if (pviModeFlag) {
- bValue = (u8) ((msr.lo >> PS_NB_VID_SHFT) & 0x7F);
- if (highVoltageVid > bValue)
- highVoltageVid = bValue;
- }
-
- /* Get Pmin's index */
- msr = rdmsr(0xC0010061);
- bValue = (u8) ((msr.lo >> PS_CUR_LIM_SHFT) & BIT_MASK_3);
-
- /* Get Pmin's VID */
- msr = rdmsr(0xC0010064 + bValue);
- lowVoltageVid = (u8) ((msr.lo >> PS_CPU_VID_SHFT) & 0x7F);
-
- /* If SVI, we only care about CPU VID.
- * If PVI, determine the higher voltage b/t NB and CPU
- */
- if (pviModeFlag) {
- bValue = (u8) ((msr.lo >> PS_NB_VID_SHFT) & 0x7F);
- if (lowVoltageVid > bValue)
- lowVoltageVid = bValue;
- }
-
- /* Get AltVID */
- dtemp = pci_read_config32(dev, 0xDC);
- bValue = (u8) (dtemp & BIT_MASK_7);
-
- /* Use the VID with the lowest voltage (higher VID) */
- if (lowVoltageVid < bValue)
- lowVoltageVid = bValue;
-
- /* If Vids are 7Dh - 7Fh, force 7Ch to keep calculations linear */
- if (lowVoltageVid > 0x7C) {
- lowVoltageVid = 0x7C;
- if (highVoltageVid > 0x7C)
- highVoltageVid = 0x7C;
- }
-
- bValue = (u8) (lowVoltageVid - highVoltageVid);
-
- /* Each Vid increment is 12.5 mV. The minimum slam time is:
- * vidCodeDelta * 12.5mV * 0.4us/mV
- * Scale by 100 to avoid decimals.
- */
- minimumSlamTime = bValue * (125 * 4);
-
- /* Now round up to nearest register setting.
- * Note that if we don't find a value, we
- * will fall through to a value of 7
- */
- for (bValue = 0; bValue < 7; bValue++) {
- if (minimumSlamTime <= vSlamTimes[bValue])
- break;
- }
-
- /* Apply the value */
- dtemp = pci_read_config32(dev, 0xD8);
- dtemp &= VSSLAM_MASK;
- dtemp |= bValue;
- pci_write_config32(dev, 0xd8, dtemp);
-}
-
-static void prep_fid_change(void)
-{
- u32 dword, dtemp;
- u32 nodes;
- device_t dev;
- int i;
-
- /* This needs to be run before any Pstate changes are requested */
-
- nodes = get_nodes();
-
- for (i = 0; i < nodes; i++) {
- printk(BIOS_DEBUG, "Prep FID/VID Node:%02x \n", i);
- dev = NODE_PCI(i, 3);
-
- dword = pci_read_config32(dev, 0xd8);
- dword &= VSRAMP_MASK;
- dword |= VSRAMP_VALUE;
- pci_write_config32(dev, 0xd8, dword);
-
- /* Figure out the value for VsSlamTime and program it */
- recalculateVsSlamTimeSettingOnCorePre(dev);
-
- /* Program fields in Clock Power/Control register0 (F3xD4) */
- /* set F3xD4 Clock Power/Timing Control 0 Register
- * NbClkDidApplyAll=1b
- * NbClkDid=100b
- * PowerStepUp= "platform dependent"
- * PowerStepDown= "platform dependent"
- * LinkPllLink=01b
- * ClkRampHystSel=HW default
- */
- /* check platform type */
- if (!(get_platform_type() & AMD_PTYPE_SVR)) {
- /* For non-server platform
- * PowerStepUp=01000b - 50nS
- * PowerStepDown=01000b - 50ns
- */
- dword = pci_read_config32(dev, 0xd4);
- dword &= CPTC0_MASK;
- dword |= NB_CLKDID_ALL | NB_CLKDID | PW_STP_UP50 | PW_STP_DN50 | LNK_PLL_LOCK; /* per BKDG */
- pci_write_config32(dev, 0xd4, dword);
- } else {
- dword = pci_read_config32(dev, 0xd4);
- dword &= CPTC0_MASK;
- /* get number of cores for PowerStepUp & PowerStepDown in server
- 1 core - 400nS - 0000b
- 2 cores - 200nS - 0010b
- 3 cores - 133nS -> 100nS - 0011b
- 4 cores - 100nS - 0011b
- */
- switch (get_core_num_in_bsp(i)) {
- case 0:
- dword |= PW_STP_UP400 | PW_STP_DN400;
- break;
- case 1:
- case 2:
- dword |= PW_STP_UP200 | PW_STP_DN200;
- break;
- case 3:
- dword |= PW_STP_UP100 | PW_STP_DN100;
- break;
- default:
- dword |= PW_STP_UP100 | PW_STP_DN100;
- break;
- }
- dword |= NB_CLKDID_ALL | NB_CLKDID | LNK_PLL_LOCK;
- pci_write_config32(dev, 0xd4, dword);
- }
-
- /* check PVI/SVI */
- dword = pci_read_config32(dev, 0xA0);
- if (dword & PVI_MODE) { /* PVI */
- /* set slamVidMode to 0 for PVI */
- dword &= VID_SLAM_OFF | PLLLOCK_OFF;
- dword |= PLLLOCK_DFT_L;
- pci_write_config32(dev, 0xA0, dword);
- } else { /* SVI */
- /* set slamVidMode to 1 for SVI */
- dword &= PLLLOCK_OFF;
- dword |= PLLLOCK_DFT_L | VID_SLAM_ON;
- pci_write_config32(dev, 0xA0, dword);
-
- dtemp = dword;
-
- /* Program F3xD8[PwrPlanes] according F3xA0[DulaVdd] */
- dword = pci_read_config32(dev, 0xD8);
-
- if (dtemp & DUAL_VDD_BIT)
- dword |= PWR_PLN_ON;
- else
- dword &= PWR_PLN_OFF;
- pci_write_config32(dev, 0xD8, dword);
- }
-
- /* Note the following settings are additional from the ported
- * function setFidVidRegs()
- */
- dword = pci_read_config32(dev, 0xDc);
- dword |= 0x5 << 12; /* NbsynPtrAdj set to 0x5 per BKDG (needs reset) */
- pci_write_config32(dev, 0xdc, dword);
-
- /* Rev B settings - FIXME: support other revs. */
- dword = 0xA0E641E6;
- pci_write_config32(dev, 0x84, dword);
-
- dword = 0xE600A681;
- pci_write_config32(dev, 0x80, dword);
-
- dword = pci_read_config32(dev, 0x80);
- printk(BIOS_DEBUG, " F3x80: %08x \n", dword);
- dword = pci_read_config32(dev, 0x84);
- printk(BIOS_DEBUG, " F3x84: %08x \n", dword);
- dword = pci_read_config32(dev, 0xD4);
- printk(BIOS_DEBUG, " F3xD4: %08x \n", dword);
- dword = pci_read_config32(dev, 0xD8);
- printk(BIOS_DEBUG, " F3xD8: %08x \n", dword);
- dword = pci_read_config32(dev, 0xDC);
- printk(BIOS_DEBUG, " F3xDC: %08x \n", dword);
-
-
- }
-}
-
-
-static void UpdateSinglePlaneNbVid(void)
-{
- u32 nbVid, cpuVid;
- u8 i;
- msr_t msr;
-
- /* copy higher voltage (lower VID) of NBVID & CPUVID to both */
- for (i = 0; i < 5; i++) {
- msr = rdmsr(PS_REG_BASE + i);
- nbVid = (msr.lo & PS_CPU_VID_M_ON) >> PS_CPU_VID_SHFT;
- cpuVid = (msr.lo & PS_NB_VID_M_ON) >> PS_NB_VID_SHFT;
-
- if (nbVid != cpuVid) {
- if (nbVid > cpuVid)
- nbVid = cpuVid;
-
- msr.lo = msr.lo & PS_BOTH_VID_OFF;
- msr.lo = msr.lo | (u32) ((nbVid) << PS_NB_VID_SHFT);
- msr.lo = msr.lo | (u32) ((nbVid) << PS_CPU_VID_SHFT);
- wrmsr(PS_REG_BASE + i, msr);
- }
- }
-}
-
-static void fixPsNbVidBeforeWR(u32 newNbVid, u32 coreid)
-{
- msr_t msr;
- u8 startup_pstate;
-
- /* This function sets NbVid before the warm reset.
- * Get StartupPstate from MSRC001_0071.
- * Read Pstate register pionted by [StartupPstate].
- * and copy its content to P0 and P1 registers.
- * Copy newNbVid to P0[NbVid].
- * transition to P1 on all cores,
- * then transition to P0 on core 0.
- * Wait for MSRC001_0063[CurPstate] = 000b on core 0.
- */
-
- msr = rdmsr(0xc0010071);
- startup_pstate = (msr.hi >> (32 - 32)) & 0x07;
-
- /* Copy startup pstate to P1 and P0 MSRs. Set the maxvid for this node in P0.
- * Then transition to P1 for corex and P0 for core0.
- * These setting will be cleared by the warm reset
- */
- msr = rdmsr(0xC0010064 + startup_pstate);
- wrmsr(0xC0010065, msr);
- wrmsr(0xC0010064, msr);
-
- msr.lo &= ~0xFE000000; // clear nbvid
- msr.lo |= newNbVid << 25;
- wrmsr(0xC0010064, msr);
-
- UpdateSinglePlaneNbVid();
-
- // Transition to P1 for all APs and P0 for core0.
- msr = rdmsr(0xC0010062);
- msr.lo = (msr.lo & ~0x07) | 1;
- wrmsr(0xC0010062, msr);
-
- // Wait for P1 to set.
- do {
- msr = rdmsr(0xC0010063);
- } while (msr.lo != 1);
-
- if (coreid == 0) {
- msr.lo = msr.lo & ~0x07;
- wrmsr(0xC0010062, msr);
- // Wait for P0 to set.
- do {
- msr = rdmsr(0xC0010063);
- } while (msr.lo != 0);
- }
-}
-
-static void coreDelay(void)
-{
- u32 saved;
- u32 hi, lo, msr;
- u32 cycles;
-
- /* delay ~40us
- This seems like a hack to me...
- It would be nice to have a central delay function. */
-
- cycles = 8000 << 3; /* x8 (number of 1.25ns ticks) */
-
- msr = 0x10; /* TSC */
- _RDMSR(msr, &lo, &hi);
- saved = lo;
- do {
- _RDMSR(msr, &lo, &hi);
- } while (lo - saved < cycles);
-}
-
-static void transitionVid(u32 targetVid, u8 dev, u8 isNb)
-{
- u32 currentVid, dtemp;
- msr_t msr;
- u8 vsTimecode;
- u16 timeTable[8] = { 10, 20, 30, 40, 60, 100, 200, 500 };
- int vsTime;
-
- /* This function steps or slam the Nb VID to the target VID.
- * It uses VSRampTime for [SlamVidMode]=0 ([PviMode]=1)
- * or VSSlamTime for [SlamVidMode]=1 ([PviMode]=0)to time period.
- */
-
- /* get the current VID */
- msr = rdmsr(0xC0010071);
- if (isNb)
- currentVid = (msr.lo >> NB_VID_POS) & BIT_MASK_7;
- else
- currentVid = (msr.lo >> CPU_VID_POS) & BIT_MASK_7;
-
- /* Read MSRC001_0070 COFVID Control Register */
- msr = rdmsr(0xC0010070);
-
- /* check PVI/SPI */
- dtemp = pci_read_config32(dev, 0xA0);
- if (dtemp & PVI_MODE) { /* PVI, step VID */
- if (currentVid < targetVid) {
- while (currentVid < targetVid) {
- currentVid++;
- if (isNb)
- msr.lo = (msr.lo & NB_VID_MASK_OFF) | (currentVid << NB_VID_POS);
- else
- msr.lo = (msr.lo & CPU_VID_MASK_OFF) | (currentVid << CPU_VID_POS);
- wrmsr(0xC0010070, msr);
-
- /* read F3xD8[VSRampTime] */
- dtemp = pci_read_config32(dev, 0xD8);
- vsTimecode = (u8) ((dtemp >> VS_RAMP_T) & 0x7);
- vsTime = (int)timeTable[vsTimecode];
- do {
- coreDelay();
- vsTime -= 40;
- } while (vsTime > 0);
- }
- } else if (currentVid > targetVid) {
- while (currentVid > targetVid) {
- currentVid--;
- if (isNb)
- msr.lo = (msr.lo & NB_VID_MASK_OFF) | (currentVid << NB_VID_POS);
- else
- msr.lo = (msr.lo & CPU_VID_MASK_OFF) | (currentVid << CPU_VID_POS);
- wrmsr(0xC0010070, msr);
-
- /* read F3xD8[VSRampTime] */
- dtemp = pci_read_config32(dev, 0xD8);
- vsTimecode = (u8) ((dtemp >> VS_RAMP_T) & 0x7);
- vsTime = (int)timeTable[vsTimecode];
- do {
- coreDelay();
- vsTime -= 40;
- } while (vsTime > 0);
- }
- }
- } else { /* SVI, slam VID */
- if (isNb)
- msr.lo = (msr.lo & NB_VID_MASK_OFF) | (targetVid << NB_VID_POS);
- else
- msr.lo = (msr.lo & CPU_VID_MASK_OFF) | (targetVid << CPU_VID_POS);
- wrmsr(0xC0010070, msr);
-
- /* read F3xD8[VSRampTime] */
- dtemp = pci_read_config32(dev, 0xD8);
- vsTimecode = (u8) ((dtemp >> VS_RAMP_T) & 0x7);
- vsTime = (int)timeTable[vsTimecode];
- do {
- coreDelay();
- vsTime -= 40;
- } while (vsTime > 0);
- }
-}
-
-
-static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid)
-{
- device_t dev;
- u32 vid_max;
- u32 fid_max;
- u8 nb_cof_vid_update;
- u8 pvimode;
- u32 reg1fc;
- u32 send;
- u8 nodes;
- u8 i;
-
- printk(BIOS_DEBUG, "FIDVID on AP: %02x\n", apicid);
-
- /* Steps 1-6 of BIOS NB COF and VID Configuration
- * for SVI and Single-Plane PVI Systems.
- */
-
- /* If any node has nb_cof_vid_update set all nodes need an update. */
- nodes = get_nodes();
- nb_cof_vid_update = 0;
- for (i = 0; i < nodes; i++) {
- if (pci_read_config32(NODE_PCI(i, 3), 0x1FC) & 1) {
- nb_cof_vid_update = 1;
- break;
- }
- }
-
- dev = NODE_PCI(nodeid, 3);
- pvimode = (pci_read_config32(dev, 0xA0) >> 8) & 1;
- reg1fc = pci_read_config32(dev, 0x1FC);
-
- if (nb_cof_vid_update) {
- if (pvimode) {
- vid_max = (reg1fc >> 7) & 0x7F;
- fid_max = (reg1fc >> 2) & 0x1F;
-
- /* write newNbVid to P-state Reg's NbVid always if NbVidUpdatedAll=1 */
- fixPsNbVidBeforeWR(vid_max, coreid);
- } else { /* SVI */
- vid_max = ((reg1fc >> 7) & 0x7F) - ((reg1fc >> 17) & 0x1F);
- fid_max = ((reg1fc >> 2) & 0x1F) + ((reg1fc >> 14) & 0x7);
- transitionVid(vid_max, dev, IS_NB);
- }
-
- /* fid setup is handled by the BSP at the end. */
-
- } else { /* ! nb_cof_vid_update */
- /* Use max values */
- if (pvimode)
- UpdateSinglePlaneNbVid();
- }
-
- send = (nb_cof_vid_update << 16) | (fid_max << 8);
- send |= (apicid << 24); // ap apicid
-
- // Send signal to BSP about this AP max fid
- // This also indicates this AP is ready for warm reset (if required).
- lapic_write(LAPIC_MSG_REG, send | F10_APSTATE_RESET);
-}
-
-static u32 calc_common_fid(u32 fid_packed, u32 fid_packed_new)
-{
- u32 fidmax;
- u32 fidmax_new;
-
- fidmax = (fid_packed >> 8) & 0xFF;
-
- fidmax_new = (fid_packed_new >> 8) & 0xFF;
-
- if (fidmax > fidmax_new) {
- fidmax = fidmax_new;
- }
-
- fid_packed &= 0xFF << 16;
- fid_packed |= (fidmax << 8);
- fid_packed |= fid_packed_new & (0xFF << 16); // set nb_cof_vid_update
-
- return fid_packed;
-}
-
-static void init_fidvid_bsp_stage1(u32 ap_apicid, void *gp)
-{
- u32 readback = 0;
- u32 timeout = 1;
-
- struct fidvid_st *fvp = gp;
- int loop;
-
- print_debug_fv("Wait for AP stage 1: ap_apicid = ", ap_apicid);
-
- loop = 100000;
- while (--loop > 0) {
- if (lapic_remote_read(ap_apicid, LAPIC_MSG_REG, &readback) != 0)
- continue;
- if ((readback & 0x3f) == 1) {
- timeout = 0;
- break; /* target ap is in stage 1 */
- }
- }
-
- if (timeout) {
- printk(BIOS_DEBUG, "%s: timed out reading from ap %02x\n",
- __func__, ap_apicid);
- return;
- }
-
- print_debug_fv("\treadback = ", readback);
-
- fvp->common_fid = calc_common_fid(fvp->common_fid, readback);
-
- print_debug_fv("\tcommon_fid(packed) = ", fvp->common_fid);
-
-}
-
-static void updateSviPsNbVidAfterWR(u32 newNbVid)
-{
- msr_t msr;
- u8 i;
-
- /* This function copies newNbVid to NbVid bits in P-state Registers[4:0]
- * for SVI mode.
- */
-
- for (i = 0; i < 5; i++) {
- msr = rdmsr(0xC0010064 + i);
- if ((msr.hi >> 31) & 1) { /* PstateEn? */
- msr.lo &= ~(0x7F << 25);
- msr.lo |= (newNbVid & 0x7F) << 25;
- wrmsr(0xC0010064 + i, msr);
- }
- }
-}
-
-
-static void fixPsNbVidAfterWR(u32 newNbVid, u8 NbVidUpdatedAll)
-{
- msr_t msr;
- u8 i;
- u8 StartupPstate;
-
- /* This function copies newNbVid to NbVid bits in P-state
- * Registers[4:0] if its NbDid bit=0 and PstateEn bit =1 in case of
- * NbVidUpdatedAll =0 or copies copies newNbVid to NbVid bits in
- * P-state Registers[4:0] if its and PstateEn bit =1 in case of
- * NbVidUpdatedAll=1. Then transition to StartPstate.
- */
-
- /* write newNbVid to P-state Reg's NbVid if its NbDid=0 */
- for (i = 0; i < 5; i++) {
- msr = rdmsr(0xC0010064 + i);
- /* NbDid (bit 22 of P-state Reg) == 0 or NbVidUpdatedAll = 1 */
- if ((((msr.lo >> 22) & 1) == 0) || NbVidUpdatedAll) {
- msr.lo &= ~(0x7F << 25);
- msr.lo |= (newNbVid & 0x7F) << 25;
- wrmsr(0xC0010064 + i, msr);
- }
- }
-
- UpdateSinglePlaneNbVid();
-
- /* For each core in the system, transition all cores to StartupPstate */
- msr = rdmsr(0xC0010071);
- StartupPstate = msr.hi & 0x07;
- msr = rdmsr(0xC0010062);
- msr.lo = StartupPstate;
- wrmsr(0xC0010062, msr);
-
- /* Wait for StartupPstate to set. */
- do {
- msr = rdmsr(0xC0010063);
- } while (msr.lo != StartupPstate);
-}
-
-static void set_p0(void)
-{
- msr_t msr;
-
- // Transition P0 for calling core.
- msr = rdmsr(0xC0010062);
- msr.lo = (msr.lo & ~0x07);
- wrmsr(0xC0010062, msr);
-
- /* Wait for P0 to set. */
- do {
- msr = rdmsr(0xC0010063);
- } while (msr.lo != 0);
-}
-
-static void finalPstateChange(void)
-{
- /* Enble P0 on all cores for best performance.
- * Linux can slow them down later if need be.
- * It is safe since they will be in C1 halt
- * most of the time anyway.
- */
- set_p0();
-}
-
-static void init_fidvid_stage2(u32 apicid, u32 nodeid)
-{
- msr_t msr;
- device_t dev;
- u32 reg1fc;
- u32 dtemp;
- u32 nbvid;
- u8 nb_cof_vid_update;
- u8 nodes;
- u8 NbVidUpdateAll;
- u8 i;
- u8 pvimode;
-
- /* After warm reset finish the fid/vid setup for all cores. */
-
- /* If any node has nb_cof_vid_update set all nodes need an update. */
- nodes = get_nodes();
- nb_cof_vid_update = 0;
- for (i = 0; i < nodes; i++) {
- if (pci_read_config32(NODE_PCI(i, 3), 0x1FC) & 1) {
- nb_cof_vid_update = 1;
- break;
- }
- }
-
- dev = NODE_PCI(nodeid, 3);
- pvimode = (pci_read_config32(dev, 0xA0) >> 8) & 1;
- reg1fc = pci_read_config32(dev, 0x1FC);
- nbvid = (reg1fc >> 7) & 0x7F;
- NbVidUpdateAll = (reg1fc >> 1) & 1;
-
- if (nb_cof_vid_update) {
- if (pvimode) {
- nbvid = (reg1fc >> 7) & 0x7F;
- /* write newNbVid to P-state Reg's NbVid if its NbDid=0 */
- fixPsNbVidAfterWR(nbvid, NbVidUpdateAll);
- } else { /* SVI */
- nbvid = ((reg1fc >> 7) & 0x7F) - ((reg1fc >> 17) & 0x1F);
- updateSviPsNbVidAfterWR(nbvid);
- }
- } else { /* !nb_cof_vid_update */
- if (pvimode)
- UpdateSinglePlaneNbVid();
- }
- dtemp = pci_read_config32(dev, 0xA0);
- dtemp &= PLLLOCK_OFF;
- dtemp |= PLLLOCK_DFT_L;
- pci_write_config32(dev, 0xA0, dtemp);
-
- finalPstateChange();
-
- /* Set TSC to tick at the P0 ndfid rate */
- msr = rdmsr(HWCR);
- msr.lo |= 1 << 24;
- wrmsr(HWCR, msr);
-}
-
-
-#if SET_FIDVID_STORE_AP_APICID_AT_FIRST == 1
-struct ap_apicid_st {
- u32 num;
- // it could use 256 bytes for 64 node quad core system
- u8 apicid[NODE_NUMS * 4];
-};
-
-static void store_ap_apicid(unsigned ap_apicid, void *gp)
-{
- struct ap_apicid_st *p = gp;
-
- p->apicid[p->num++] = ap_apicid;
-
-}
-#endif
-
-
-static int init_fidvid_bsp(u32 bsp_apicid, u32 nodes)
-{
-#if SET_FIDVID_STORE_AP_APICID_AT_FIRST == 1
- struct ap_apicid_st ap_apicidx;
- u32 i;
-#endif
- struct fidvid_st fv;
- device_t dev;
- u32 vid_max;
- u32 fid_max=0;
- u8 nb_cof_vid_update;
- u32 reg1fc;
- u8 pvimode;
-
- printk(BIOS_DEBUG, "FIDVID on BSP, APIC_id: %02x\n", bsp_apicid);
- /* FIXME: The first half of this function is nearly the same as
- * init_fidvid_bsp() and the code could be combined.
- */
-
- /* Steps 1-6 of BIOS NB COF and VID Configuration
- * for SVI and Single-Plane PVI Systems.
- */
-
- /* If any node has nb_cof_vid_update set all nodes need an update. */
- nb_cof_vid_update = 0;
- for (i = 0; i < nodes; i++) {
- if (pci_read_config32(NODE_PCI(i, 3), 0x1FC) & 1) {
- nb_cof_vid_update = 1;
- break;
- }
- }
-
- dev = NODE_PCI(0, 3);
- pvimode = (pci_read_config32(dev, 0xA0) >> 8) & 1;
- reg1fc = pci_read_config32(dev, 0x1FC);
-
- if (nb_cof_vid_update) {
- if (pvimode) {
- vid_max = (reg1fc >> 7) & 0x7F;
- fid_max = (reg1fc >> 2) & 0x1F;
-
- /* write newNbVid to P-state Reg's NbVid always if NbVidUpdatedAll=1 */
- fixPsNbVidBeforeWR(vid_max, 0);
- } else { /* SVI */
- vid_max = ((reg1fc >> 7) & 0x7F) - ((reg1fc >> 17) & 0x1F);
- fid_max = ((reg1fc >> 2) & 0x1F) + ((reg1fc >> 14) & 0x7);
- transitionVid(vid_max, dev, IS_NB);
- }
-
- /* fid setup is handled by the BSP at the end. */
-
- } else { /* ! nb_cof_vid_update */
- /* Use max values */
- if (pvimode)
- UpdateSinglePlaneNbVid();
- }
-
- fv.common_fid = (nb_cof_vid_update << 16) | (fid_max << 8);
- print_debug_fv("BSP fid = ", fv.common_fid);
-
-#if SET_FIDVID_STORE_AP_APICID_AT_FIRST == 1 && SET_FIDVID_CORE0_ONLY == 0
- /* For all APs (We know the APIC ID of all APs even when the APIC ID
- is lifted) remote read from AP LAPIC_MSG_REG about max fid.
- Then calculate the common max fid that can be used for all
- APs and BSP */
- ap_apicidx.num = 0;
-
- for_each_ap(bsp_apicid, SET_FIDVID_CORE_RANGE, store_ap_apicid, &ap_apicidx);
-
- for (i = 0; i < ap_apicidx.num; i++) {
- init_fidvid_bsp_stage1(ap_apicidx.apicid[i], &fv);
- }
-#else
- for_each_ap(bsp_apicid, SET_FIDVID_CORE0_ONLY, init_fidvid_bsp_stage1, &fv);
-#endif
-
- print_debug_fv("common_fid = ", fv.common_fid);
-
- if (fv.common_fid & (1 << 16)) { /* check nb_cof_vid_update */
-
- // Enable the common fid and other settings.
- enable_fid_change((fv.common_fid >> 8) & 0x1F);
-
- // nbfid change need warm reset, so reset at first
- return 1;
- }
-
- return 0; // No FID/VID changes. Don't reset
-}
-#endif
+/* + * This file is part of the coreboot project. + * + * Copyright (C) 2007 Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#if SET_FIDVID == 1 +#include <northbridge/amd/amdht/AsPsDefs.h> + +#define SET_FIDVID_DEBUG 1 + +// if we are tight of CAR stack, disable it +#define SET_FIDVID_STORE_AP_APICID_AT_FIRST 1 + +static inline void print_debug_fv(const char *str, u32 val) +{ +#if SET_FIDVID_DEBUG == 1 + printk(BIOS_DEBUG, "%s%x\n", str, val); +#endif +} + +static inline void print_debug_fv_8(const char *str, u8 val) +{ +#if SET_FIDVID_DEBUG == 1 + printk(BIOS_DEBUG, "%s%02x\n", str, val); +#endif +} + +static inline void print_debug_fv_64(const char *str, u32 val, u32 val2) +{ +#if SET_FIDVID_DEBUG == 1 + printk(BIOS_DEBUG, "%s%x%x\n", str, val, val2); +#endif +} + +struct fidvid_st { + u32 common_fid; +}; + +static void enable_fid_change(u8 fid) +{ + u32 dword; + u32 nodes; + device_t dev; + int i; + + nodes = get_nodes(); + + for (i = 0; i < nodes; i++) { + dev = NODE_PCI(i, 3); + dword = pci_read_config32(dev, 0xd4); + dword &= ~0x1F; + dword |= (u32) fid & 0x1F; + dword |= 1 << 5; // enable + pci_write_config32(dev, 0xd4, dword); + printk(BIOS_DEBUG, "FID Change Node:%02x, F3xD4: %08x \n", i, + dword); + } +} + +static void recalculateVsSlamTimeSettingOnCorePre(device_t dev) +{ + u8 pviModeFlag; + u8 highVoltageVid, lowVoltageVid, bValue; + u16 minimumSlamTime; + u16 vSlamTimes[7] = { 1000, 2000, 3000, 4000, 6000, 10000, 20000 }; /* Reg settings scaled by 100 */ + u32 dtemp; + msr_t msr; + + /* This function calculates the VsSlamTime using the range of possible + * voltages instead of a hardcoded 200us. + * Note:This function is called from setFidVidRegs and setUserPs after + * programming a custom Pstate. + */ + + /* Calculate Slam Time + * Vslam = 0.4us/mV * Vp0 - (lowest out of Vpmin or Valt) + * In our case, we will scale the values by 100 to avoid + * decimals. + */ + + /* Determine if this is a PVI or SVI system */ + dtemp = pci_read_config32(dev, 0xA0); + + if (dtemp & PVI_MODE) + pviModeFlag = 1; + else + pviModeFlag = 0; + + /* Get P0's voltage */ + msr = rdmsr(0xC0010064); + highVoltageVid = (u8) ((msr.lo >> PS_CPU_VID_SHFT) & 0x7F); + + /* If SVI, we only care about CPU VID. + * If PVI, determine the higher voltage b/t NB and CPU + */ + if (pviModeFlag) { + bValue = (u8) ((msr.lo >> PS_NB_VID_SHFT) & 0x7F); + if (highVoltageVid > bValue) + highVoltageVid = bValue; + } + + /* Get Pmin's index */ + msr = rdmsr(0xC0010061); + bValue = (u8) ((msr.lo >> PS_CUR_LIM_SHFT) & BIT_MASK_3); + + /* Get Pmin's VID */ + msr = rdmsr(0xC0010064 + bValue); + lowVoltageVid = (u8) ((msr.lo >> PS_CPU_VID_SHFT) & 0x7F); + + /* If SVI, we only care about CPU VID. + * If PVI, determine the higher voltage b/t NB and CPU + */ + if (pviModeFlag) { + bValue = (u8) ((msr.lo >> PS_NB_VID_SHFT) & 0x7F); + if (lowVoltageVid > bValue) + lowVoltageVid = bValue; + } + + /* Get AltVID */ + dtemp = pci_read_config32(dev, 0xDC); + bValue = (u8) (dtemp & BIT_MASK_7); + + /* Use the VID with the lowest voltage (higher VID) */ + if (lowVoltageVid < bValue) + lowVoltageVid = bValue; + + /* If Vids are 7Dh - 7Fh, force 7Ch to keep calculations linear */ + if (lowVoltageVid > 0x7C) { + lowVoltageVid = 0x7C; + if (highVoltageVid > 0x7C) + highVoltageVid = 0x7C; + } + + bValue = (u8) (lowVoltageVid - highVoltageVid); + + /* Each Vid increment is 12.5 mV. The minimum slam time is: + * vidCodeDelta * 12.5mV * 0.4us/mV + * Scale by 100 to avoid decimals. + */ + minimumSlamTime = bValue * (125 * 4); + + /* Now round up to nearest register setting. + * Note that if we don't find a value, we + * will fall through to a value of 7 + */ + for (bValue = 0; bValue < 7; bValue++) { + if (minimumSlamTime <= vSlamTimes[bValue]) + break; + } + + /* Apply the value */ + dtemp = pci_read_config32(dev, 0xD8); + dtemp &= VSSLAM_MASK; + dtemp |= bValue; + pci_write_config32(dev, 0xd8, dtemp); +} + +static void prep_fid_change(void) +{ + u32 dword, dtemp; + u32 nodes; + device_t dev; + int i; + + /* This needs to be run before any Pstate changes are requested */ + + nodes = get_nodes(); + + for (i = 0; i < nodes; i++) { + printk(BIOS_DEBUG, "Prep FID/VID Node:%02x \n", i); + dev = NODE_PCI(i, 3); + + dword = pci_read_config32(dev, 0xd8); + dword &= VSRAMP_MASK; + dword |= VSRAMP_VALUE; + pci_write_config32(dev, 0xd8, dword); + + /* Figure out the value for VsSlamTime and program it */ + recalculateVsSlamTimeSettingOnCorePre(dev); + + /* Program fields in Clock Power/Control register0 (F3xD4) */ + /* set F3xD4 Clock Power/Timing Control 0 Register + * NbClkDidApplyAll=1b + * NbClkDid=100b + * PowerStepUp= "platform dependent" + * PowerStepDown= "platform dependent" + * LinkPllLink=01b + * ClkRampHystSel=HW default + */ + /* check platform type */ + if (!(get_platform_type() & AMD_PTYPE_SVR)) { + /* For non-server platform + * PowerStepUp=01000b - 50nS + * PowerStepDown=01000b - 50ns + */ + dword = pci_read_config32(dev, 0xd4); + dword &= CPTC0_MASK; + dword |= NB_CLKDID_ALL | NB_CLKDID | PW_STP_UP50 | PW_STP_DN50 | LNK_PLL_LOCK; /* per BKDG */ + pci_write_config32(dev, 0xd4, dword); + } else { + dword = pci_read_config32(dev, 0xd4); + dword &= CPTC0_MASK; + /* get number of cores for PowerStepUp & PowerStepDown in server + 1 core - 400nS - 0000b + 2 cores - 200nS - 0010b + 3 cores - 133nS -> 100nS - 0011b + 4 cores - 100nS - 0011b + */ + switch (get_core_num_in_bsp(i)) { + case 0: + dword |= PW_STP_UP400 | PW_STP_DN400; + break; + case 1: + case 2: + dword |= PW_STP_UP200 | PW_STP_DN200; + break; + case 3: + dword |= PW_STP_UP100 | PW_STP_DN100; + break; + default: + dword |= PW_STP_UP100 | PW_STP_DN100; + break; + } + dword |= NB_CLKDID_ALL | NB_CLKDID | LNK_PLL_LOCK; + pci_write_config32(dev, 0xd4, dword); + } + + /* check PVI/SVI */ + dword = pci_read_config32(dev, 0xA0); + if (dword & PVI_MODE) { /* PVI */ + /* set slamVidMode to 0 for PVI */ + dword &= VID_SLAM_OFF | PLLLOCK_OFF; + dword |= PLLLOCK_DFT_L; + pci_write_config32(dev, 0xA0, dword); + } else { /* SVI */ + /* set slamVidMode to 1 for SVI */ + dword &= PLLLOCK_OFF; + dword |= PLLLOCK_DFT_L | VID_SLAM_ON; + pci_write_config32(dev, 0xA0, dword); + + dtemp = dword; + + /* Program F3xD8[PwrPlanes] according F3xA0[DulaVdd] */ + dword = pci_read_config32(dev, 0xD8); + + if (dtemp & DUAL_VDD_BIT) + dword |= PWR_PLN_ON; + else + dword &= PWR_PLN_OFF; + pci_write_config32(dev, 0xD8, dword); + } + + /* Note the following settings are additional from the ported + * function setFidVidRegs() + */ + dword = pci_read_config32(dev, 0xDc); + dword |= 0x5 << 12; /* NbsynPtrAdj set to 0x5 per BKDG (needs reset) */ + pci_write_config32(dev, 0xdc, dword); + + /* Rev B settings - FIXME: support other revs. */ + dword = 0xA0E641E6; + pci_write_config32(dev, 0x84, dword); + + dword = 0xE600A681; + pci_write_config32(dev, 0x80, dword); + + dword = pci_read_config32(dev, 0x80); + printk(BIOS_DEBUG, " F3x80: %08x \n", dword); + dword = pci_read_config32(dev, 0x84); + printk(BIOS_DEBUG, " F3x84: %08x \n", dword); + dword = pci_read_config32(dev, 0xD4); + printk(BIOS_DEBUG, " F3xD4: %08x \n", dword); + dword = pci_read_config32(dev, 0xD8); + printk(BIOS_DEBUG, " F3xD8: %08x \n", dword); + dword = pci_read_config32(dev, 0xDC); + printk(BIOS_DEBUG, " F3xDC: %08x \n", dword); + + + } +} + + +static void UpdateSinglePlaneNbVid(void) +{ + u32 nbVid, cpuVid; + u8 i; + msr_t msr; + + /* copy higher voltage (lower VID) of NBVID & CPUVID to both */ + for (i = 0; i < 5; i++) { + msr = rdmsr(PS_REG_BASE + i); + nbVid = (msr.lo & PS_CPU_VID_M_ON) >> PS_CPU_VID_SHFT; + cpuVid = (msr.lo & PS_NB_VID_M_ON) >> PS_NB_VID_SHFT; + + if (nbVid != cpuVid) { + if (nbVid > cpuVid) + nbVid = cpuVid; + + msr.lo = msr.lo & PS_BOTH_VID_OFF; + msr.lo = msr.lo | (u32) ((nbVid) << PS_NB_VID_SHFT); + msr.lo = msr.lo | (u32) ((nbVid) << PS_CPU_VID_SHFT); + wrmsr(PS_REG_BASE + i, msr); + } + } +} + +static void fixPsNbVidBeforeWR(u32 newNbVid, u32 coreid) +{ + msr_t msr; + u8 startup_pstate; + + /* This function sets NbVid before the warm reset. + * Get StartupPstate from MSRC001_0071. + * Read Pstate register pionted by [StartupPstate]. + * and copy its content to P0 and P1 registers. + * Copy newNbVid to P0[NbVid]. + * transition to P1 on all cores, + * then transition to P0 on core 0. + * Wait for MSRC001_0063[CurPstate] = 000b on core 0. + */ + + msr = rdmsr(0xc0010071); + startup_pstate = (msr.hi >> (32 - 32)) & 0x07; + + /* Copy startup pstate to P1 and P0 MSRs. Set the maxvid for this node in P0. + * Then transition to P1 for corex and P0 for core0. + * These setting will be cleared by the warm reset + */ + msr = rdmsr(0xC0010064 + startup_pstate); + wrmsr(0xC0010065, msr); + wrmsr(0xC0010064, msr); + + msr.lo &= ~0xFE000000; // clear nbvid + msr.lo |= newNbVid << 25; + wrmsr(0xC0010064, msr); + + UpdateSinglePlaneNbVid(); + + // Transition to P1 for all APs and P0 for core0. + msr = rdmsr(0xC0010062); + msr.lo = (msr.lo & ~0x07) | 1; + wrmsr(0xC0010062, msr); + + // Wait for P1 to set. + do { + msr = rdmsr(0xC0010063); + } while (msr.lo != 1); + + if (coreid == 0) { + msr.lo = msr.lo & ~0x07; + wrmsr(0xC0010062, msr); + // Wait for P0 to set. + do { + msr = rdmsr(0xC0010063); + } while (msr.lo != 0); + } +} + +static void coreDelay(void) +{ + u32 saved; + u32 hi, lo, msr; + u32 cycles; + + /* delay ~40us + This seems like a hack to me... + It would be nice to have a central delay function. */ + + cycles = 8000 << 3; /* x8 (number of 1.25ns ticks) */ + + msr = 0x10; /* TSC */ + _RDMSR(msr, &lo, &hi); + saved = lo; + do { + _RDMSR(msr, &lo, &hi); + } while (lo - saved < cycles); +} + +static void transitionVid(u32 targetVid, u8 dev, u8 isNb) +{ + u32 currentVid, dtemp; + msr_t msr; + u8 vsTimecode; + u16 timeTable[8] = { 10, 20, 30, 40, 60, 100, 200, 500 }; + int vsTime; + + /* This function steps or slam the Nb VID to the target VID. + * It uses VSRampTime for [SlamVidMode]=0 ([PviMode]=1) + * or VSSlamTime for [SlamVidMode]=1 ([PviMode]=0)to time period. + */ + + /* get the current VID */ + msr = rdmsr(0xC0010071); + if (isNb) + currentVid = (msr.lo >> NB_VID_POS) & BIT_MASK_7; + else + currentVid = (msr.lo >> CPU_VID_POS) & BIT_MASK_7; + + /* Read MSRC001_0070 COFVID Control Register */ + msr = rdmsr(0xC0010070); + + /* check PVI/SPI */ + dtemp = pci_read_config32(dev, 0xA0); + if (dtemp & PVI_MODE) { /* PVI, step VID */ + if (currentVid < targetVid) { + while (currentVid < targetVid) { + currentVid++; + if (isNb) + msr.lo = (msr.lo & NB_VID_MASK_OFF) | (currentVid << NB_VID_POS); + else + msr.lo = (msr.lo & CPU_VID_MASK_OFF) | (currentVid << CPU_VID_POS); + wrmsr(0xC0010070, msr); + + /* read F3xD8[VSRampTime] */ + dtemp = pci_read_config32(dev, 0xD8); + vsTimecode = (u8) ((dtemp >> VS_RAMP_T) & 0x7); + vsTime = (int)timeTable[vsTimecode]; + do { + coreDelay(); + vsTime -= 40; + } while (vsTime > 0); + } + } else if (currentVid > targetVid) { + while (currentVid > targetVid) { + currentVid--; + if (isNb) + msr.lo = (msr.lo & NB_VID_MASK_OFF) | (currentVid << NB_VID_POS); + else + msr.lo = (msr.lo & CPU_VID_MASK_OFF) | (currentVid << CPU_VID_POS); + wrmsr(0xC0010070, msr); + + /* read F3xD8[VSRampTime] */ + dtemp = pci_read_config32(dev, 0xD8); + vsTimecode = (u8) ((dtemp >> VS_RAMP_T) & 0x7); + vsTime = (int)timeTable[vsTimecode]; + do { + coreDelay(); + vsTime -= 40; + } while (vsTime > 0); + } + } + } else { /* SVI, slam VID */ + if (isNb) + msr.lo = (msr.lo & NB_VID_MASK_OFF) | (targetVid << NB_VID_POS); + else + msr.lo = (msr.lo & CPU_VID_MASK_OFF) | (targetVid << CPU_VID_POS); + wrmsr(0xC0010070, msr); + + /* read F3xD8[VSRampTime] */ + dtemp = pci_read_config32(dev, 0xD8); + vsTimecode = (u8) ((dtemp >> VS_RAMP_T) & 0x7); + vsTime = (int)timeTable[vsTimecode]; + do { + coreDelay(); + vsTime -= 40; + } while (vsTime > 0); + } +} + + +static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid) +{ + device_t dev; + u32 vid_max; + u32 fid_max; + u8 nb_cof_vid_update; + u8 pvimode; + u32 reg1fc; + u32 send; + u8 nodes; + u8 i; + + printk(BIOS_DEBUG, "FIDVID on AP: %02x\n", apicid); + + /* Steps 1-6 of BIOS NB COF and VID Configuration + * for SVI and Single-Plane PVI Systems. + */ + + /* If any node has nb_cof_vid_update set all nodes need an update. */ + nodes = get_nodes(); + nb_cof_vid_update = 0; + for (i = 0; i < nodes; i++) { + if (pci_read_config32(NODE_PCI(i, 3), 0x1FC) & 1) { + nb_cof_vid_update = 1; + break; + } + } + + dev = NODE_PCI(nodeid, 3); + pvimode = (pci_read_config32(dev, 0xA0) >> 8) & 1; + reg1fc = pci_read_config32(dev, 0x1FC); + + if (nb_cof_vid_update) { + if (pvimode) { + vid_max = (reg1fc >> 7) & 0x7F; + fid_max = (reg1fc >> 2) & 0x1F; + + /* write newNbVid to P-state Reg's NbVid always if NbVidUpdatedAll=1 */ + fixPsNbVidBeforeWR(vid_max, coreid); + } else { /* SVI */ + vid_max = ((reg1fc >> 7) & 0x7F) - ((reg1fc >> 17) & 0x1F); + fid_max = ((reg1fc >> 2) & 0x1F) + ((reg1fc >> 14) & 0x7); + transitionVid(vid_max, dev, IS_NB); + } + + /* fid setup is handled by the BSP at the end. */ + + } else { /* ! nb_cof_vid_update */ + /* Use max values */ + if (pvimode) + UpdateSinglePlaneNbVid(); + } + + send = (nb_cof_vid_update << 16) | (fid_max << 8); + send |= (apicid << 24); // ap apicid + + // Send signal to BSP about this AP max fid + // This also indicates this AP is ready for warm reset (if required). + lapic_write(LAPIC_MSG_REG, send | F10_APSTATE_RESET); +} + +static u32 calc_common_fid(u32 fid_packed, u32 fid_packed_new) +{ + u32 fidmax; + u32 fidmax_new; + + fidmax = (fid_packed >> 8) & 0xFF; + + fidmax_new = (fid_packed_new >> 8) & 0xFF; + + if (fidmax > fidmax_new) { + fidmax = fidmax_new; + } + + fid_packed &= 0xFF << 16; + fid_packed |= (fidmax << 8); + fid_packed |= fid_packed_new & (0xFF << 16); // set nb_cof_vid_update + + return fid_packed; +} + +static void init_fidvid_bsp_stage1(u32 ap_apicid, void *gp) +{ + u32 readback = 0; + u32 timeout = 1; + + struct fidvid_st *fvp = gp; + int loop; + + print_debug_fv("Wait for AP stage 1: ap_apicid = ", ap_apicid); + + loop = 100000; + while (--loop > 0) { + if (lapic_remote_read(ap_apicid, LAPIC_MSG_REG, &readback) != 0) + continue; + if ((readback & 0x3f) == 1) { + timeout = 0; + break; /* target ap is in stage 1 */ + } + } + + if (timeout) { + printk(BIOS_DEBUG, "%s: timed out reading from ap %02x\n", + __func__, ap_apicid); + return; + } + + print_debug_fv("\treadback = ", readback); + + fvp->common_fid = calc_common_fid(fvp->common_fid, readback); + + print_debug_fv("\tcommon_fid(packed) = ", fvp->common_fid); + +} + +static void updateSviPsNbVidAfterWR(u32 newNbVid) +{ + msr_t msr; + u8 i; + + /* This function copies newNbVid to NbVid bits in P-state Registers[4:0] + * for SVI mode. + */ + + for (i = 0; i < 5; i++) { + msr = rdmsr(0xC0010064 + i); + if ((msr.hi >> 31) & 1) { /* PstateEn? */ + msr.lo &= ~(0x7F << 25); + msr.lo |= (newNbVid & 0x7F) << 25; + wrmsr(0xC0010064 + i, msr); + } + } +} + + +static void fixPsNbVidAfterWR(u32 newNbVid, u8 NbVidUpdatedAll) +{ + msr_t msr; + u8 i; + u8 StartupPstate; + + /* This function copies newNbVid to NbVid bits in P-state + * Registers[4:0] if its NbDid bit=0 and PstateEn bit =1 in case of + * NbVidUpdatedAll =0 or copies copies newNbVid to NbVid bits in + * P-state Registers[4:0] if its and PstateEn bit =1 in case of + * NbVidUpdatedAll=1. Then transition to StartPstate. + */ + + /* write newNbVid to P-state Reg's NbVid if its NbDid=0 */ + for (i = 0; i < 5; i++) { + msr = rdmsr(0xC0010064 + i); + /* NbDid (bit 22 of P-state Reg) == 0 or NbVidUpdatedAll = 1 */ + if ((((msr.lo >> 22) & 1) == 0) || NbVidUpdatedAll) { + msr.lo &= ~(0x7F << 25); + msr.lo |= (newNbVid & 0x7F) << 25; + wrmsr(0xC0010064 + i, msr); + } + } + + UpdateSinglePlaneNbVid(); + + /* For each core in the system, transition all cores to StartupPstate */ + msr = rdmsr(0xC0010071); + StartupPstate = msr.hi & 0x07; + msr = rdmsr(0xC0010062); + msr.lo = StartupPstate; + wrmsr(0xC0010062, msr); + + /* Wait for StartupPstate to set. */ + do { + msr = rdmsr(0xC0010063); + } while (msr.lo != StartupPstate); +} + +static void set_p0(void) +{ + msr_t msr; + + // Transition P0 for calling core. + msr = rdmsr(0xC0010062); + msr.lo = (msr.lo & ~0x07); + wrmsr(0xC0010062, msr); + + /* Wait for P0 to set. */ + do { + msr = rdmsr(0xC0010063); + } while (msr.lo != 0); +} + +static void finalPstateChange(void) +{ + /* Enble P0 on all cores for best performance. + * Linux can slow them down later if need be. + * It is safe since they will be in C1 halt + * most of the time anyway. + */ + set_p0(); +} + +static void init_fidvid_stage2(u32 apicid, u32 nodeid) +{ + msr_t msr; + device_t dev; + u32 reg1fc; + u32 dtemp; + u32 nbvid; + u8 nb_cof_vid_update; + u8 nodes; + u8 NbVidUpdateAll; + u8 i; + u8 pvimode; + + /* After warm reset finish the fid/vid setup for all cores. */ + + /* If any node has nb_cof_vid_update set all nodes need an update. */ + nodes = get_nodes(); + nb_cof_vid_update = 0; + for (i = 0; i < nodes; i++) { + if (pci_read_config32(NODE_PCI(i, 3), 0x1FC) & 1) { + nb_cof_vid_update = 1; + break; + } + } + + dev = NODE_PCI(nodeid, 3); + pvimode = (pci_read_config32(dev, 0xA0) >> 8) & 1; + reg1fc = pci_read_config32(dev, 0x1FC); + nbvid = (reg1fc >> 7) & 0x7F; + NbVidUpdateAll = (reg1fc >> 1) & 1; + + if (nb_cof_vid_update) { + if (pvimode) { + nbvid = (reg1fc >> 7) & 0x7F; + /* write newNbVid to P-state Reg's NbVid if its NbDid=0 */ + fixPsNbVidAfterWR(nbvid, NbVidUpdateAll); + } else { /* SVI */ + nbvid = ((reg1fc >> 7) & 0x7F) - ((reg1fc >> 17) & 0x1F); + updateSviPsNbVidAfterWR(nbvid); + } + } else { /* !nb_cof_vid_update */ + if (pvimode) + UpdateSinglePlaneNbVid(); + } + dtemp = pci_read_config32(dev, 0xA0); + dtemp &= PLLLOCK_OFF; + dtemp |= PLLLOCK_DFT_L; + pci_write_config32(dev, 0xA0, dtemp); + + finalPstateChange(); + + /* Set TSC to tick at the P0 ndfid rate */ + msr = rdmsr(HWCR); + msr.lo |= 1 << 24; + wrmsr(HWCR, msr); +} + + +#if SET_FIDVID_STORE_AP_APICID_AT_FIRST == 1 +struct ap_apicid_st { + u32 num; + // it could use 256 bytes for 64 node quad core system + u8 apicid[NODE_NUMS * 4]; +}; + +static void store_ap_apicid(unsigned ap_apicid, void *gp) +{ + struct ap_apicid_st *p = gp; + + p->apicid[p->num++] = ap_apicid; + +} +#endif + + +static int init_fidvid_bsp(u32 bsp_apicid, u32 nodes) +{ +#if SET_FIDVID_STORE_AP_APICID_AT_FIRST == 1 + struct ap_apicid_st ap_apicidx; + u32 i; +#endif + struct fidvid_st fv; + device_t dev; + u32 vid_max; + u32 fid_max=0; + u8 nb_cof_vid_update; + u32 reg1fc; + u8 pvimode; + + printk(BIOS_DEBUG, "FIDVID on BSP, APIC_id: %02x\n", bsp_apicid); + /* FIXME: The first half of this function is nearly the same as + * init_fidvid_bsp() and the code could be combined. + */ + + /* Steps 1-6 of BIOS NB COF and VID Configuration + * for SVI and Single-Plane PVI Systems. + */ + + /* If any node has nb_cof_vid_update set all nodes need an update. */ + nb_cof_vid_update = 0; + for (i = 0; i < nodes; i++) { + if (pci_read_config32(NODE_PCI(i, 3), 0x1FC) & 1) { + nb_cof_vid_update = 1; + break; + } + } + + dev = NODE_PCI(0, 3); + pvimode = (pci_read_config32(dev, 0xA0) >> 8) & 1; + reg1fc = pci_read_config32(dev, 0x1FC); + + if (nb_cof_vid_update) { + if (pvimode) { + vid_max = (reg1fc >> 7) & 0x7F; + fid_max = (reg1fc >> 2) & 0x1F; + + /* write newNbVid to P-state Reg's NbVid always if NbVidUpdatedAll=1 */ + fixPsNbVidBeforeWR(vid_max, 0); + } else { /* SVI */ + vid_max = ((reg1fc >> 7) & 0x7F) - ((reg1fc >> 17) & 0x1F); + fid_max = ((reg1fc >> 2) & 0x1F) + ((reg1fc >> 14) & 0x7); + transitionVid(vid_max, dev, IS_NB); + } + + /* fid setup is handled by the BSP at the end. */ + + } else { /* ! nb_cof_vid_update */ + /* Use max values */ + if (pvimode) + UpdateSinglePlaneNbVid(); + } + + fv.common_fid = (nb_cof_vid_update << 16) | (fid_max << 8); + print_debug_fv("BSP fid = ", fv.common_fid); + +#if SET_FIDVID_STORE_AP_APICID_AT_FIRST == 1 && SET_FIDVID_CORE0_ONLY == 0 + /* For all APs (We know the APIC ID of all APs even when the APIC ID + is lifted) remote read from AP LAPIC_MSG_REG about max fid. + Then calculate the common max fid that can be used for all + APs and BSP */ + ap_apicidx.num = 0; + + for_each_ap(bsp_apicid, SET_FIDVID_CORE_RANGE, store_ap_apicid, &ap_apicidx); + + for (i = 0; i < ap_apicidx.num; i++) { + init_fidvid_bsp_stage1(ap_apicidx.apicid[i], &fv); + } +#else + for_each_ap(bsp_apicid, SET_FIDVID_CORE0_ONLY, init_fidvid_bsp_stage1, &fv); +#endif + + print_debug_fv("common_fid = ", fv.common_fid); + + if (fv.common_fid & (1 << 16)) { /* check nb_cof_vid_update */ + + // Enable the common fid and other settings. + enable_fid_change((fv.common_fid >> 8) & 0x1F); + + // nbfid change need warm reset, so reset at first + return 1; + } + + return 0; // No FID/VID changes. Don't reset +} +#endif diff --git a/src/cpu/amd/model_10xxx/init_cpus.c b/src/cpu/amd/model_10xxx/init_cpus.c index 9207c2e0ba..b20446f7e2 100644 --- a/src/cpu/amd/model_10xxx/init_cpus.c +++ b/src/cpu/amd/model_10xxx/init_cpus.c @@ -1,945 +1,945 @@ -/*
- * This file is part of the coreboot project.
- *
- * Copyright (C) 2007-2008 Advanced Micro Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "defaults.h"
-#include <stdlib.h>
-#include <cpu/x86/lapic.h>
-#include <cpu/x86/mtrr.h>
-#include <northbridge/amd/amdfam10/amdfam10.h>
-#include <northbridge/amd/amdht/AsPsDefs.h>
-#include <northbridge/amd/amdht/porting.h>
-
-#include <cpu/x86/mtrr/earlymtrr.c>
-#include <northbridge/amd/amdfam10/raminit_amdmct.c>
-
-//it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID
-#ifndef SET_FIDVID
- #define SET_FIDVID 1
-#endif
-
-#ifndef SET_FIDVID_CORE0_ONLY
- /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores,
- Need to do every AP to set common FID/VID */
- #define SET_FIDVID_CORE0_ONLY 0
-#endif
-
-static void prep_fid_change(void);
-static void init_fidvid_stage2(u32 apicid, u32 nodeid);
-void cpuSetAMDMSR(void);
-
-#if CONFIG_PCI_IO_CFG_EXT == 1
-static void set_EnableCf8ExtCfg(void)
-{
- // set the NB_CFG[46]=1;
- msr_t msr;
- msr = rdmsr(NB_CFG_MSR);
- // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range
- msr.hi |= (1 << (46 - 32));
- wrmsr(NB_CFG_MSR, msr);
-}
-#else
-static void set_EnableCf8ExtCfg(void) { }
-#endif
-
-
-typedef void (*process_ap_t) (u32 apicid, void *gp);
-
-//core_range = 0 : all cores
-//core range = 1 : core 0 only
-//core range = 2 : cores other than core0
-
-static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap,
- void *gp)
-{
- // here assume the OS don't change our apicid
- u32 ap_apicid;
-
- u32 nodes;
- u32 siblings;
- u32 disable_siblings;
- u32 cores_found;
- u32 nb_cfg_54;
- int i, j;
- u32 ApicIdCoreIdSize;
-
- /* get_nodes define in ht_wrapper.c */
- nodes = get_nodes();
-
- if (!CONFIG_LOGICAL_CPUS ||
- read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 means multi core
- disable_siblings = 1;
- } else {
- disable_siblings = 0;
- }
-
- /* Assume that all node are same stepping, otherwise we can use use
- nb_cfg_54 from bsp for all nodes */
- nb_cfg_54 = read_nb_cfg_54();
-
- ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf);
- if (ApicIdCoreIdSize) {
- siblings = ((1 << ApicIdCoreIdSize) - 1);
- } else {
- siblings = 3; //quad core
- }
-
- for (i = 0; i < nodes; i++) {
- cores_found = get_core_num_in_bsp(i);
-
- u32 jstart, jend;
-
- if (core_range == 2) {
- jstart = 1;
- } else {
- jstart = 0;
- }
-
- if (disable_siblings || (core_range == 1)) {
- jend = 0;
- } else {
- jend = cores_found;
- }
-
- for (j = jstart; j <= jend; j++) {
- ap_apicid =
- i * (nb_cfg_54 ? (siblings + 1) : 1) +
- j * (nb_cfg_54 ? 1 : 64);
-
-#if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
-#if CONFIG_LIFT_BSP_APIC_ID == 0
- if ((i != 0) || (j != 0)) /* except bsp */
-#endif
- ap_apicid += CONFIG_APIC_ID_OFFSET;
-#endif
-
- if (ap_apicid == bsp_apicid)
- continue;
-
- process_ap(ap_apicid, gp);
-
- }
- }
-}
-
-static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue)
-{
- int timeout;
- u32 status;
- int result;
- lapic_wait_icr_idle();
- lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid));
- lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4));
-
-/* Extra busy check compared to lapic.h */
- timeout = 0;
- do {
- status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY;
- } while (status == LAPIC_ICR_BUSY && timeout++ < 1000);
-
- timeout = 0;
- do {
- status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK;
- } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000);
-
- result = -1;
-
- if (status == LAPIC_ICR_RR_VALID) {
- *pvalue = lapic_read(LAPIC_RRR);
- result = 0;
- }
- return result;
-}
-
-#if SET_FIDVID == 1
-static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid);
-#endif
-
-static inline __attribute__ ((always_inline))
-void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id,
- const char *str)
-{
- printk(BIOS_DEBUG,
- "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str,
- apicid, id.nodeid, id.coreid);
-}
-
-static u32 wait_cpu_state(u32 apicid, u32 state)
-{
- u32 readback = 0;
- u32 timeout = 1;
- int loop = 4000000;
- while (--loop > 0) {
- if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0)
- continue;
- if ((readback & 0x3f) == state || (readback & 0x3f) == F10_APSTATE_RESET) {
- timeout = 0;
- break; //target cpu is in stage started
- }
- }
- if (timeout) {
- if (readback) {
- timeout = readback;
- }
- }
-
- return timeout;
-}
-
-static void wait_ap_started(u32 ap_apicid, void *gp)
-{
- u32 timeout;
- timeout = wait_cpu_state(ap_apicid, F10_APSTATE_STARTED);
- printk(BIOS_DEBUG, "* AP %02x", ap_apicid);
- if (timeout) {
- printk(BIOS_DEBUG, " timed out:%08x\n", timeout);
- } else {
- printk(BIOS_DEBUG, "started\n");
- }
-}
-
-void wait_all_other_cores_started(u32 bsp_apicid)
-{
- // all aps other than core0
- printk(BIOS_DEBUG, "started ap apicid: ");
- for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0);
- printk(BIOS_DEBUG, "\n");
-}
-
-void allow_all_aps_stop(u32 bsp_apicid)
-{
- /* Called by the BSP to indicate AP can stop */
-
- /* FIXME Do APs use this? */
-
- // allow aps to stop use 6 bits for state
- lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | F10_APSTATE_STOPPED);
-}
-
-static void enable_apic_ext_id(u32 node)
-{
- u32 val;
-
- val = pci_read_config32(NODE_HT(node), 0x68);
- val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST);
- pci_write_config32(NODE_HT(node), 0x68, val);
-}
-
-static void STOP_CAR_AND_CPU(void)
-{
- msr_t msr;
-
- /* Disable L2 IC to L3 connection (Only for CAR) */
- msr = rdmsr(BU_CFG2);
- msr.lo &= ~(1 << ClLinesToNbDis);
- wrmsr(BU_CFG2, msr);
-
- disable_cache_as_ram(); // inline
- /* stop all cores except node0/core0 the bsp .... */
- stop_this_cpu();
-}
-
-#if CONFIG_RAMINIT_SYSINFO
-static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo)
-#else
-static u32 init_cpus(u32 cpu_init_detectedx)
-#endif
-{
- u32 bsp_apicid = 0;
- u32 apicid;
- struct node_core_id id;
-
- /*
- * already set early mtrr in cache_as_ram.inc
- */
-
- /* that is from initial apicid, we need nodeid and coreid
- later */
- id = get_node_core_id_x();
-
- /* NB_CFG MSR is shared between cores, so we need make sure
- core0 is done at first --- use wait_all_core0_started */
- if (id.coreid == 0) {
- set_apicid_cpuid_lo(); /* only set it on core0 */
- set_EnableCf8ExtCfg(); /* only set it on core0 */
-#if (CONFIG_ENABLE_APIC_EXT_ID == 1)
- enable_apic_ext_id(id.nodeid);
-#endif
- }
-
- enable_lapic();
-
-#if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0)
- u32 initial_apicid = get_initial_apicid();
-
-#if CONFIG_LIFT_BSP_APIC_ID == 0
- if (initial_apicid != 0) // other than bsp
-#endif
- {
- /* use initial apic id to lift it */
- u32 dword = lapic_read(LAPIC_ID);
- dword &= ~(0xff << 24);
- dword |=
- (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24);
-
- lapic_write(LAPIC_ID, dword);
- }
-#if CONFIG_LIFT_BSP_APIC_ID == 1
- bsp_apicid += CONFIG_APIC_ID_OFFSET;
-#endif
-
-#endif
-
- /* get the apicid, it may be lifted already */
- apicid = lapicid();
-
- // show our apicid, nodeid, and coreid
- if (id.coreid == 0) {
- if (id.nodeid != 0) //all core0 except bsp
- print_apicid_nodeid_coreid(apicid, id, " core0: ");
- } else { //all other cores
- print_apicid_nodeid_coreid(apicid, id, " corex: ");
- }
-
- if (cpu_init_detectedx) {
- print_apicid_nodeid_coreid(apicid, id,
- "\n\n\nINIT detected from ");
- printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n");
- soft_reset();
- }
-
- if (id.coreid == 0) {
- if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets?
- distinguish_cpu_resets(id.nodeid); // Also indicates we are started
- }
- // Mark the core as started.
- lapic_write(LAPIC_MSG_REG, (apicid << 24) | F10_APSTATE_STARTED);
-
- if (apicid != bsp_apicid) {
- /* Setup each AP's cores MSRs.
- * This happens after HTinit.
- * The BSP runs this code in it's own path.
- */
- update_microcode(cpuid_eax(1));
- cpuSetAMDMSR();
-
-#if SET_FIDVID == 1
-#if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1)
- // Run on all AP for proper FID/VID setup.
- if (id.coreid == 0) // only need set fid for core0
-#endif
- {
- // check warm(bios) reset to call stage2 otherwise do stage1
- if (warm_reset_detect(id.nodeid)) {
- printk(BIOS_DEBUG,
- "init_fidvid_stage2 apicid: %02x\n",
- apicid);
- init_fidvid_stage2(apicid, id.nodeid);
- } else {
- printk(BIOS_DEBUG,
- "init_fidvid_ap(stage1) apicid: %02x\n",
- apicid);
- init_fidvid_ap(bsp_apicid, apicid, id.nodeid,
- id.coreid);
- }
- }
-#endif
-
- /* AP is ready, configure MTRRs and go to sleep */
- set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK);
-
- STOP_CAR_AND_CPU();
-
- printk(BIOS_DEBUG,
- "\nAP %02x should be halted but you are reading this....\n",
- apicid);
- }
-
- return bsp_apicid;
-}
-
-static u32 is_core0_started(u32 nodeid)
-{
- u32 htic;
- device_t device;
- device = NODE_PCI(nodeid, 0);
- htic = pci_read_config32(device, HT_INIT_CONTROL);
- htic &= HTIC_ColdR_Detect;
- return htic;
-}
-
-void wait_all_core0_started(void)
-{
- /* When core0 is started, it will distingush_cpu_resets
- * So wait for that to finish */
- u32 i;
- u32 nodes = get_nodes();
-
- printk(BIOS_DEBUG, "core0 started: ");
- for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp
- while (!is_core0_started(i)) {
- }
- printk(BIOS_DEBUG, " %02x", i);
- }
- printk(BIOS_DEBUG, "\n");
-}
-
-#if CONFIG_MAX_PHYSICAL_CPUS > 1
-/**
- * void start_node(u32 node)
- *
- * start the core0 in node, so it can generate HT packet to feature code.
- *
- * This function starts the AP nodes core0s. wait_all_core0_started() in
- * romstage.c waits for all the AP to be finished before continuing
- * system init.
- */
-static void start_node(u8 node)
-{
- u32 val;
-
- /* Enable routing table */
- printk(BIOS_DEBUG, "Start node %02x", node);
-
-#if CONFIG_NORTHBRIDGE_AMD_AMDFAM10
- /* For FAM10 support, we need to set Dram base/limit for the new node */
- pci_write_config32(NODE_MP(node), 0x44, 0);
- pci_write_config32(NODE_MP(node), 0x40, 3);
-#endif
-
- /* Allow APs to make requests (ROM fetch) */
- val = pci_read_config32(NODE_HT(node), 0x6c);
- val &= ~(1 << 1);
- pci_write_config32(NODE_HT(node), 0x6c, val);
-
- printk(BIOS_DEBUG, " done.\n");
-}
-
-/**
- * static void setup_remote_node(u32 node)
- *
- * Copy the BSP Adress Map to each AP.
- */
-static void setup_remote_node(u8 node)
-{
- /* There registers can be used with F1x114_x Address Map at the
- same time, So must set them even 32 node */
- static const u16 pci_reg[] = {
- /* DRAM Base/Limits Registers */
- 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c,
- 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78,
- 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c,
- 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178,
- /* MMIO Base/Limits Registers */
- 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc,
- 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8,
- /* IO Base/Limits Registers */
- 0xc4, 0xcc, 0xd4, 0xdc,
- 0xc0, 0xc8, 0xd0, 0xd8,
- /* Configuration Map Registers */
- 0xe0, 0xe4, 0xe8, 0xec,
- };
- u16 i;
-
- printk(BIOS_DEBUG, "setup_remote_node: %02x", node);
-
- /* copy the default resource map from node 0 */
- for (i = 0; i < ARRAY_SIZE(pci_reg); i++) {
- u32 value;
- u16 reg;
- reg = pci_reg[i];
- value = pci_read_config32(NODE_MP(0), reg);
- pci_write_config32(NODE_MP(node), reg, value);
-
- }
- printk(BIOS_DEBUG, " done\n");
-}
-#endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */
-
-static void AMD_Errata281(u8 node, u32 revision, u32 platform)
-{
- /* Workaround for Transaction Scheduling Conflict in
- * Northbridge Cross Bar. Implement XCS Token adjustment
- * for ganged links. Also, perform fix up for the mixed
- * revision case.
- */
-
- u32 reg, val;
- u8 i;
- u8 mixed = 0;
- u8 nodes = get_nodes();
-
- if (platform & AMD_PTYPE_SVR) {
- /* For each node we need to check for a "broken" node */
- if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) {
- for (i = 0; i < nodes; i++) {
- if (mctGetLogicalCPUID(i) &
- (AMD_DR_B0 | AMD_DR_B1)) {
- mixed = 1;
- break;
- }
- }
- }
-
- if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) {
-
- /* F0X68[22:21] DsNpReqLmt0 = 01b */
- val = pci_read_config32(NODE_PCI(node, 0), 0x68);
- val &= ~0x00600000;
- val |= 0x00200000;
- pci_write_config32(NODE_PCI(node, 0), 0x68, val);
-
- /* F3X6C */
- val = pci_read_config32(NODE_PCI(node, 3), 0x6C);
- val &= ~0x700780F7;
- val |= 0x00010094;
- pci_write_config32(NODE_PCI(node, 3), 0x6C, val);
-
- /* F3X7C */
- val = pci_read_config32(NODE_PCI(node, 3), 0x7C);
- val &= ~0x707FFF1F;
- val |= 0x00144514;
- pci_write_config32(NODE_PCI(node, 3), 0x7C, val);
-
- /* F3X144[3:0] RspTok = 0001b */
- val = pci_read_config32(NODE_PCI(node, 3), 0x144);
- val &= ~0x0000000F;
- val |= 0x00000001;
- pci_write_config32(NODE_PCI(node, 3), 0x144, val);
-
- for (i = 0; i < 3; i++) {
- reg = 0x148 + (i * 4);
- val = pci_read_config32(NODE_PCI(node, 3), reg);
- val &= ~0x000000FF;
- val |= 0x000000DB;
- pci_write_config32(NODE_PCI(node, 3), reg, val);
- }
- }
- }
-}
-
-static void AMD_Errata298(void)
-{
- /* Workaround for L2 Eviction May Occur during operation to
- * set Accessed or dirty bit.
- */
-
- msr_t msr;
- u8 i;
- u8 affectedRev = 0;
- u8 nodes = get_nodes();
-
- /* For each core we need to check for a "broken" node */
- for (i = 0; i < nodes; i++) {
- if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) {
- affectedRev = 1;
- break;
- }
- }
-
- if (affectedRev) {
- msr = rdmsr(HWCR);
- msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */
- wrmsr(HWCR, msr);
-
- msr = rdmsr(BU_CFG);
- msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */
- wrmsr(BU_CFG, msr);
-
- msr = rdmsr(OSVW_ID_Length);
- msr.lo |= 0x01; /* OS Visible Workaround - MSR */
- wrmsr(OSVW_ID_Length, msr);
-
- msr = rdmsr(OSVW_Status);
- msr.lo |= 0x01; /* OS Visible Workaround - MSR */
- wrmsr(OSVW_Status, msr);
- }
-
- if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) {
- msr = rdmsr(OSVW_ID_Length);
- msr.lo |= 0x01; /* OS Visible Workaround - MSR */
- wrmsr(OSVW_ID_Length, msr);
-
- }
-}
-
-static u32 get_platform_type(void)
-{
- u32 ret = 0;
-
- switch (SYSTEM_TYPE) {
- case 1:
- ret |= AMD_PTYPE_DSK;
- break;
- case 2:
- ret |= AMD_PTYPE_MOB;
- break;
- case 0:
- ret |= AMD_PTYPE_SVR;
- break;
- default:
- break;
- }
-
- /* FIXME: add UMA support. */
-
- /* All Fam10 are multi core */
- ret |= AMD_PTYPE_MC;
-
- return ret;
-}
-
-static void AMD_SetupPSIVID_d(u32 platform_type, u8 node)
-{
- u32 dword;
- int i;
- msr_t msr;
-
- if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) {
-
- /* The following code sets the PSIVID to the lowest support P state
- * assuming that the VID for the lowest power state is below
- * the VDD voltage regulator threshold. (This also assumes that there
- * is a Pstate lower than P0)
- */
-
- for (i = 4; i >= 0; i--) {
- msr = rdmsr(PS_REG_BASE + i);
- /* Pstate valid? */
- if (msr.hi & PS_EN_MASK) {
- dword = pci_read_config32(NODE_PCI(i, 3), 0xA0);
- dword &= ~0x7F;
- dword |= (msr.lo >> 9) & 0x7F;
- pci_write_config32(NODE_PCI(i, 3), 0xA0, dword);
- break;
- }
- }
- }
-}
-
-/**
- * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links.
- * HT Phy operations are not valid on links that aren't present, so this
- * prevents invalid accesses.
- *
- * Returns the offset of the link register.
- */
-static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset)
-{
- u32 reg;
- u32 val;
-
- /* get start of CPU HT Host Capabilities */
- val = pci_read_config32(NODE_PCI(node, 0), 0x34);
- val &= 0xFF; //reg offset of first link
-
- cap_count++;
-
- /* Traverse through the capabilities. */
- do {
- reg = pci_read_config32(NODE_PCI(node, 0), val);
- /* Is the capability block a HyperTransport capability block? */
- if ((reg & 0xFF) == 0x08) {
- /* Is the HT capability block an HT Host Capability? */
- if ((reg & 0xE0000000) == (1 << 29))
- cap_count--;
- }
-
- if (cap_count)
- val = (reg >> 8) & 0xFF; //update reg offset
- } while (cap_count && val);
-
- *offset = (u8) val;
-
- /* If requested capability found val != 0 */
- if (!cap_count)
- return TRUE;
- else
- return FALSE;
-}
-
-/**
- * AMD_checkLinkType - Compare desired link characteristics using a logical
- * link type mask.
- *
- * Returns the link characteristic mask.
- */
-static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff)
-{
- u32 val;
- u32 linktype = 0;
-
- /* Check connect, init and coherency */
- val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18);
- val &= 0x1F;
-
- if (val == 3)
- linktype |= HTPHY_LINKTYPE_COHERENT;
-
- if (val == 7)
- linktype |= HTPHY_LINKTYPE_NONCOHERENT;
-
- if (linktype) {
- /* Check gen3 */
- val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08);
-
- if (((val >> 8) & 0x0F) > 6)
- linktype |= HTPHY_LINKTYPE_HT3;
- else
- linktype |= HTPHY_LINKTYPE_HT1;
-
- /* Check ganged */
- val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170);
-
- if (val & 1)
- linktype |= HTPHY_LINKTYPE_GANGED;
- else
- linktype |= HTPHY_LINKTYPE_UNGANGED;
- }
- return linktype;
-}
-
-/**
- * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update
- * a phy setting for that link.
- */
-static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry)
-{
- u32 phyReg;
- u32 phyBase;
- u32 val;
-
- /* Determine this link's portal */
- if (link > 3)
- link -= 4;
-
- phyBase = ((u32) link << 3) | 0x180;
-
- /* Get the portal control register's initial value
- * and update it to access the desired phy register
- */
- phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase);
-
- if (fam10_htphy_default[entry].htreg > 0x1FF) {
- phyReg &= ~HTPHY_DIRECT_OFFSET_MASK;
- phyReg |= HTPHY_DIRECT_MAP;
- } else {
- phyReg &= ~HTPHY_OFFSET_MASK;
- }
-
- /* Now get the current phy register data
- * LinkPhyDone = 0, LinkPhyWrite = 0 is a read
- */
- phyReg |= fam10_htphy_default[entry].htreg;
- pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
-
- do {
- val = pci_read_config32(NODE_PCI(node, 4), phyBase);
- } while (!(val & HTPHY_IS_COMPLETE_MASK));
-
- /* Now we have the phy register data, apply the change */
- val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4);
- val &= ~fam10_htphy_default[entry].mask;
- val |= fam10_htphy_default[entry].data;
- pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val);
-
- /* write it through the portal to the phy
- * LinkPhyDone = 0, LinkPhyWrite = 1 is a write
- */
- phyReg |= HTPHY_WRITE_CMD;
- pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg);
-
- do {
- val = pci_read_config32(NODE_PCI(node, 4), phyBase);
- } while (!(val & HTPHY_IS_COMPLETE_MASK));
-}
-
-void cpuSetAMDMSR(void)
-{
- /* This routine loads the CPU with default settings in fam10_msr_default
- * table . It must be run after Cache-As-RAM has been enabled, and
- * Hypertransport initialization has taken place. Also note
- * that it is run on the current processor only, and only for the current
- * processor core.
- */
- msr_t msr;
- u8 i;
- u32 revision, platform;
-
- printk(BIOS_DEBUG, "cpuSetAMDMSR ");
-
- revision = mctGetLogicalCPUID(0xFF);
- platform = get_platform_type();
-
- for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) {
- if ((fam10_msr_default[i].revision & revision) &&
- (fam10_msr_default[i].platform & platform)) {
- msr = rdmsr(fam10_msr_default[i].msr);
- msr.hi &= ~fam10_msr_default[i].mask_hi;
- msr.hi |= fam10_msr_default[i].data_hi;
- msr.lo &= ~fam10_msr_default[i].mask_lo;
- msr.lo |= fam10_msr_default[i].data_lo;
- wrmsr(fam10_msr_default[i].msr, msr);
- }
- }
- AMD_Errata298();
-
- printk(BIOS_DEBUG, " done\n");
-}
-
-static void cpuSetAMDPCI(u8 node)
-{
- /* This routine loads the CPU with default settings in fam10_pci_default
- * table . It must be run after Cache-As-RAM has been enabled, and
- * Hypertransport initialization has taken place. Also note
- * that it is run for the first core on each node
- */
- u8 i, j;
- u32 revision, platform;
- u32 val;
- u8 offset;
-
- printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node);
-
- revision = mctGetLogicalCPUID(node);
- platform = get_platform_type();
-
- AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */
-
- for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) {
- if ((fam10_pci_default[i].revision & revision) &&
- (fam10_pci_default[i].platform & platform)) {
- val = pci_read_config32(NODE_PCI(node,
- fam10_pci_default[i].
- function),
- fam10_pci_default[i].offset);
- val &= ~fam10_pci_default[i].mask;
- val |= fam10_pci_default[i].data;
- pci_write_config32(NODE_PCI(node,
- fam10_pci_default[i].
- function),
- fam10_pci_default[i].offset, val);
- }
- }
-
- for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) {
- if ((fam10_htphy_default[i].revision & revision) &&
- (fam10_htphy_default[i].platform & platform)) {
- /* HT Phy settings either apply to both sublinks or have
- * separate registers for sublink zero and one, so there
- * will be two table entries. So, here we only loop
- * through the sublink zeros in function zero.
- */
- for (j = 0; j < 4; j++) {
- if (AMD_CpuFindCapability(node, j, &offset)) {
- if (AMD_checkLinkType(node, j, offset)
- & fam10_htphy_default[i].linktype) {
- AMD_SetHtPhyRegister(node, j,
- i);
- }
- } else {
- /* No more capabilities,
- * link not present
- */
- break;
- }
- }
- }
- }
-
- /* FIXME: add UMA support and programXbarToSriReg(); */
-
- AMD_Errata281(node, revision, platform);
-
- /* FIXME: if the dct phy doesn't init correct it needs to reset.
- if (revision & (AMD_DR_B2 | AMD_DR_B3))
- dctPhyDiag(); */
-
- printk(BIOS_DEBUG, " done\n");
-}
-
-#ifdef UNUSED_CODE
-static void cpuInitializeMCA(void)
-{
- /* Clears Machine Check Architecture (MCA) registers, which power on
- * containing unknown data, on currently running processor.
- * This routine should only be executed on initial power on (cold boot),
- * not across a warm reset because valid data is present at that time.
- */
-
- msr_t msr;
- u32 reg;
- u8 i;
-
- if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */
- msr = rdmsr(MCG_CAP);
- if (msr.lo & MCG_CTL_P) { /* MCG_CTL_P bit is set? */
- msr.lo &= 0xFF;
- msr.lo--;
- msr.lo <<= 2; /* multiply the count by 4 */
- reg = MC0_STA + msr.lo;
- msr.lo = msr.hi = 0;
- for (i = 0; i < 4; i++) {
- wrmsr(reg, msr);
- reg -= 4; /* Touch status regs for each bank */
- }
- }
- }
-}
-#endif
-
-/**
- * finalize_node_setup()
- *
- * Do any additional post HT init
- *
- */
-static void finalize_node_setup(struct sys_info *sysinfo)
-{
- u8 i;
- u8 nodes = get_nodes();
- u32 reg;
-
-#if CONFIG_RAMINIT_SYSINFO
- /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */
- reg = pci_read_config32(NODE_HT(0), 0x64);
- sysinfo->sblk = (reg >> 8) & 7;
- sysinfo->sbbusn = 0;
- sysinfo->nodes = nodes;
- sysinfo->sbdn = get_sbdn(sysinfo->sbbusn);
-#endif
-
- for (i = 0; i < nodes; i++) {
- cpuSetAMDPCI(i);
- }
-
-#if SET_FIDVID == 1
- // Prep each node for FID/VID setup.
- prep_fid_change();
-#endif
-
-#if CONFIG_MAX_PHYSICAL_CPUS > 1
- /* Skip the BSP, start at node 1 */
- for (i = 1; i < nodes; i++) {
- setup_remote_node(i);
- start_node(i);
- }
-#endif
-}
-
-#include "fidvid.c"
+/* + * This file is part of the coreboot project. + * + * Copyright (C) 2007-2008 Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#include "defaults.h" +#include <stdlib.h> +#include <cpu/x86/lapic.h> +#include <cpu/x86/mtrr.h> +#include <northbridge/amd/amdfam10/amdfam10.h> +#include <northbridge/amd/amdht/AsPsDefs.h> +#include <northbridge/amd/amdht/porting.h> + +#include <cpu/x86/mtrr/earlymtrr.c> +#include <northbridge/amd/amdfam10/raminit_amdmct.c> + +//it takes the CONFIG_ENABLE_APIC_EXT_ID and CONFIG_APIC_ID_OFFSET and CONFIG_LIFT_BSP_APIC_ID +#ifndef SET_FIDVID + #define SET_FIDVID 1 +#endif + +#ifndef SET_FIDVID_CORE0_ONLY + /* MSR FIDVID_CTL and FIDVID_STATUS are shared by cores, + Need to do every AP to set common FID/VID */ + #define SET_FIDVID_CORE0_ONLY 0 +#endif + +static void prep_fid_change(void); +static void init_fidvid_stage2(u32 apicid, u32 nodeid); +void cpuSetAMDMSR(void); + +#if CONFIG_PCI_IO_CFG_EXT == 1 +static void set_EnableCf8ExtCfg(void) +{ + // set the NB_CFG[46]=1; + msr_t msr; + msr = rdmsr(NB_CFG_MSR); + // EnableCf8ExtCfg: We need that to access CONFIG_PCI_IO_CFG_EXT 4K range + msr.hi |= (1 << (46 - 32)); + wrmsr(NB_CFG_MSR, msr); +} +#else +static void set_EnableCf8ExtCfg(void) { } +#endif + + +typedef void (*process_ap_t) (u32 apicid, void *gp); + +//core_range = 0 : all cores +//core range = 1 : core 0 only +//core range = 2 : cores other than core0 + +static void for_each_ap(u32 bsp_apicid, u32 core_range, process_ap_t process_ap, + void *gp) +{ + // here assume the OS don't change our apicid + u32 ap_apicid; + + u32 nodes; + u32 siblings; + u32 disable_siblings; + u32 cores_found; + u32 nb_cfg_54; + int i, j; + u32 ApicIdCoreIdSize; + + /* get_nodes define in ht_wrapper.c */ + nodes = get_nodes(); + + if (!CONFIG_LOGICAL_CPUS || + read_option(CMOS_VSTART_multi_core, CMOS_VLEN_multi_core, 0) != 0) { // 0 means multi core + disable_siblings = 1; + } else { + disable_siblings = 0; + } + + /* Assume that all node are same stepping, otherwise we can use use + nb_cfg_54 from bsp for all nodes */ + nb_cfg_54 = read_nb_cfg_54(); + + ApicIdCoreIdSize = (cpuid_ecx(0x80000008) >> 12 & 0xf); + if (ApicIdCoreIdSize) { + siblings = ((1 << ApicIdCoreIdSize) - 1); + } else { + siblings = 3; //quad core + } + + for (i = 0; i < nodes; i++) { + cores_found = get_core_num_in_bsp(i); + + u32 jstart, jend; + + if (core_range == 2) { + jstart = 1; + } else { + jstart = 0; + } + + if (disable_siblings || (core_range == 1)) { + jend = 0; + } else { + jend = cores_found; + } + + for (j = jstart; j <= jend; j++) { + ap_apicid = + i * (nb_cfg_54 ? (siblings + 1) : 1) + + j * (nb_cfg_54 ? 1 : 64); + +#if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0) +#if CONFIG_LIFT_BSP_APIC_ID == 0 + if ((i != 0) || (j != 0)) /* except bsp */ +#endif + ap_apicid += CONFIG_APIC_ID_OFFSET; +#endif + + if (ap_apicid == bsp_apicid) + continue; + + process_ap(ap_apicid, gp); + + } + } +} + +static inline int lapic_remote_read(int apicid, int reg, u32 *pvalue) +{ + int timeout; + u32 status; + int result; + lapic_wait_icr_idle(); + lapic_write(LAPIC_ICR2, SET_LAPIC_DEST_FIELD(apicid)); + lapic_write(LAPIC_ICR, LAPIC_DM_REMRD | (reg >> 4)); + +/* Extra busy check compared to lapic.h */ + timeout = 0; + do { + status = lapic_read(LAPIC_ICR) & LAPIC_ICR_BUSY; + } while (status == LAPIC_ICR_BUSY && timeout++ < 1000); + + timeout = 0; + do { + status = lapic_read(LAPIC_ICR) & LAPIC_ICR_RR_MASK; + } while (status == LAPIC_ICR_RR_INPROG && timeout++ < 1000); + + result = -1; + + if (status == LAPIC_ICR_RR_VALID) { + *pvalue = lapic_read(LAPIC_RRR); + result = 0; + } + return result; +} + +#if SET_FIDVID == 1 +static void init_fidvid_ap(u32 bsp_apicid, u32 apicid, u32 nodeid, u32 coreid); +#endif + +static inline __attribute__ ((always_inline)) +void print_apicid_nodeid_coreid(u32 apicid, struct node_core_id id, + const char *str) +{ + printk(BIOS_DEBUG, + "%s --- { APICID = %02x NODEID = %02x COREID = %02x} ---\n", str, + apicid, id.nodeid, id.coreid); +} + +static u32 wait_cpu_state(u32 apicid, u32 state) +{ + u32 readback = 0; + u32 timeout = 1; + int loop = 4000000; + while (--loop > 0) { + if (lapic_remote_read(apicid, LAPIC_MSG_REG, &readback) != 0) + continue; + if ((readback & 0x3f) == state || (readback & 0x3f) == F10_APSTATE_RESET) { + timeout = 0; + break; //target cpu is in stage started + } + } + if (timeout) { + if (readback) { + timeout = readback; + } + } + + return timeout; +} + +static void wait_ap_started(u32 ap_apicid, void *gp) +{ + u32 timeout; + timeout = wait_cpu_state(ap_apicid, F10_APSTATE_STARTED); + printk(BIOS_DEBUG, "* AP %02x", ap_apicid); + if (timeout) { + printk(BIOS_DEBUG, " timed out:%08x\n", timeout); + } else { + printk(BIOS_DEBUG, "started\n"); + } +} + +void wait_all_other_cores_started(u32 bsp_apicid) +{ + // all aps other than core0 + printk(BIOS_DEBUG, "started ap apicid: "); + for_each_ap(bsp_apicid, 2, wait_ap_started, (void *)0); + printk(BIOS_DEBUG, "\n"); +} + +void allow_all_aps_stop(u32 bsp_apicid) +{ + /* Called by the BSP to indicate AP can stop */ + + /* FIXME Do APs use this? */ + + // allow aps to stop use 6 bits for state + lapic_write(LAPIC_MSG_REG, (bsp_apicid << 24) | F10_APSTATE_STOPPED); +} + +static void enable_apic_ext_id(u32 node) +{ + u32 val; + + val = pci_read_config32(NODE_HT(node), 0x68); + val |= (HTTC_APIC_EXT_SPUR | HTTC_APIC_EXT_ID | HTTC_APIC_EXT_BRD_CST); + pci_write_config32(NODE_HT(node), 0x68, val); +} + +static void STOP_CAR_AND_CPU(void) +{ + msr_t msr; + + /* Disable L2 IC to L3 connection (Only for CAR) */ + msr = rdmsr(BU_CFG2); + msr.lo &= ~(1 << ClLinesToNbDis); + wrmsr(BU_CFG2, msr); + + disable_cache_as_ram(); // inline + /* stop all cores except node0/core0 the bsp .... */ + stop_this_cpu(); +} + +#if CONFIG_RAMINIT_SYSINFO +static u32 init_cpus(u32 cpu_init_detectedx, struct sys_info *sysinfo) +#else +static u32 init_cpus(u32 cpu_init_detectedx) +#endif +{ + u32 bsp_apicid = 0; + u32 apicid; + struct node_core_id id; + + /* + * already set early mtrr in cache_as_ram.inc + */ + + /* that is from initial apicid, we need nodeid and coreid + later */ + id = get_node_core_id_x(); + + /* NB_CFG MSR is shared between cores, so we need make sure + core0 is done at first --- use wait_all_core0_started */ + if (id.coreid == 0) { + set_apicid_cpuid_lo(); /* only set it on core0 */ + set_EnableCf8ExtCfg(); /* only set it on core0 */ +#if (CONFIG_ENABLE_APIC_EXT_ID == 1) + enable_apic_ext_id(id.nodeid); +#endif + } + + enable_lapic(); + +#if (CONFIG_ENABLE_APIC_EXT_ID == 1) && (CONFIG_APIC_ID_OFFSET > 0) + u32 initial_apicid = get_initial_apicid(); + +#if CONFIG_LIFT_BSP_APIC_ID == 0 + if (initial_apicid != 0) // other than bsp +#endif + { + /* use initial apic id to lift it */ + u32 dword = lapic_read(LAPIC_ID); + dword &= ~(0xff << 24); + dword |= + (((initial_apicid + CONFIG_APIC_ID_OFFSET) & 0xff) << 24); + + lapic_write(LAPIC_ID, dword); + } +#if CONFIG_LIFT_BSP_APIC_ID == 1 + bsp_apicid += CONFIG_APIC_ID_OFFSET; +#endif + +#endif + + /* get the apicid, it may be lifted already */ + apicid = lapicid(); + + // show our apicid, nodeid, and coreid + if (id.coreid == 0) { + if (id.nodeid != 0) //all core0 except bsp + print_apicid_nodeid_coreid(apicid, id, " core0: "); + } else { //all other cores + print_apicid_nodeid_coreid(apicid, id, " corex: "); + } + + if (cpu_init_detectedx) { + print_apicid_nodeid_coreid(apicid, id, + "\n\n\nINIT detected from "); + printk(BIOS_DEBUG, "\nIssuing SOFT_RESET...\n"); + soft_reset(); + } + + if (id.coreid == 0) { + if (!(warm_reset_detect(id.nodeid))) //FIXME: INIT is checked above but check for more resets? + distinguish_cpu_resets(id.nodeid); // Also indicates we are started + } + // Mark the core as started. + lapic_write(LAPIC_MSG_REG, (apicid << 24) | F10_APSTATE_STARTED); + + if (apicid != bsp_apicid) { + /* Setup each AP's cores MSRs. + * This happens after HTinit. + * The BSP runs this code in it's own path. + */ + update_microcode(cpuid_eax(1)); + cpuSetAMDMSR(); + +#if SET_FIDVID == 1 +#if (CONFIG_LOGICAL_CPUS == 1) && (SET_FIDVID_CORE0_ONLY == 1) + // Run on all AP for proper FID/VID setup. + if (id.coreid == 0) // only need set fid for core0 +#endif + { + // check warm(bios) reset to call stage2 otherwise do stage1 + if (warm_reset_detect(id.nodeid)) { + printk(BIOS_DEBUG, + "init_fidvid_stage2 apicid: %02x\n", + apicid); + init_fidvid_stage2(apicid, id.nodeid); + } else { + printk(BIOS_DEBUG, + "init_fidvid_ap(stage1) apicid: %02x\n", + apicid); + init_fidvid_ap(bsp_apicid, apicid, id.nodeid, + id.coreid); + } + } +#endif + + /* AP is ready, configure MTRRs and go to sleep */ + set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, MTRR_TYPE_WRBACK); + + STOP_CAR_AND_CPU(); + + printk(BIOS_DEBUG, + "\nAP %02x should be halted but you are reading this....\n", + apicid); + } + + return bsp_apicid; +} + +static u32 is_core0_started(u32 nodeid) +{ + u32 htic; + device_t device; + device = NODE_PCI(nodeid, 0); + htic = pci_read_config32(device, HT_INIT_CONTROL); + htic &= HTIC_ColdR_Detect; + return htic; +} + +void wait_all_core0_started(void) +{ + /* When core0 is started, it will distingush_cpu_resets + * So wait for that to finish */ + u32 i; + u32 nodes = get_nodes(); + + printk(BIOS_DEBUG, "core0 started: "); + for (i = 1; i < nodes; i++) { // skip bsp, because it is running on bsp + while (!is_core0_started(i)) { + } + printk(BIOS_DEBUG, " %02x", i); + } + printk(BIOS_DEBUG, "\n"); +} + +#if CONFIG_MAX_PHYSICAL_CPUS > 1 +/** + * void start_node(u32 node) + * + * start the core0 in node, so it can generate HT packet to feature code. + * + * This function starts the AP nodes core0s. wait_all_core0_started() in + * romstage.c waits for all the AP to be finished before continuing + * system init. + */ +static void start_node(u8 node) +{ + u32 val; + + /* Enable routing table */ + printk(BIOS_DEBUG, "Start node %02x", node); + +#if CONFIG_NORTHBRIDGE_AMD_AMDFAM10 + /* For FAM10 support, we need to set Dram base/limit for the new node */ + pci_write_config32(NODE_MP(node), 0x44, 0); + pci_write_config32(NODE_MP(node), 0x40, 3); +#endif + + /* Allow APs to make requests (ROM fetch) */ + val = pci_read_config32(NODE_HT(node), 0x6c); + val &= ~(1 << 1); + pci_write_config32(NODE_HT(node), 0x6c, val); + + printk(BIOS_DEBUG, " done.\n"); +} + +/** + * static void setup_remote_node(u32 node) + * + * Copy the BSP Adress Map to each AP. + */ +static void setup_remote_node(u8 node) +{ + /* There registers can be used with F1x114_x Address Map at the + same time, So must set them even 32 node */ + static const u16 pci_reg[] = { + /* DRAM Base/Limits Registers */ + 0x44, 0x4c, 0x54, 0x5c, 0x64, 0x6c, 0x74, 0x7c, + 0x40, 0x48, 0x50, 0x58, 0x60, 0x68, 0x70, 0x78, + 0x144, 0x14c, 0x154, 0x15c, 0x164, 0x16c, 0x174, 0x17c, + 0x140, 0x148, 0x150, 0x158, 0x160, 0x168, 0x170, 0x178, + /* MMIO Base/Limits Registers */ + 0x84, 0x8c, 0x94, 0x9c, 0xa4, 0xac, 0xb4, 0xbc, + 0x80, 0x88, 0x90, 0x98, 0xa0, 0xa8, 0xb0, 0xb8, + /* IO Base/Limits Registers */ + 0xc4, 0xcc, 0xd4, 0xdc, + 0xc0, 0xc8, 0xd0, 0xd8, + /* Configuration Map Registers */ + 0xe0, 0xe4, 0xe8, 0xec, + }; + u16 i; + + printk(BIOS_DEBUG, "setup_remote_node: %02x", node); + + /* copy the default resource map from node 0 */ + for (i = 0; i < ARRAY_SIZE(pci_reg); i++) { + u32 value; + u16 reg; + reg = pci_reg[i]; + value = pci_read_config32(NODE_MP(0), reg); + pci_write_config32(NODE_MP(node), reg, value); + + } + printk(BIOS_DEBUG, " done\n"); +} +#endif /* CONFIG_MAX_PHYSICAL_CPUS > 1 */ + +static void AMD_Errata281(u8 node, u32 revision, u32 platform) +{ + /* Workaround for Transaction Scheduling Conflict in + * Northbridge Cross Bar. Implement XCS Token adjustment + * for ganged links. Also, perform fix up for the mixed + * revision case. + */ + + u32 reg, val; + u8 i; + u8 mixed = 0; + u8 nodes = get_nodes(); + + if (platform & AMD_PTYPE_SVR) { + /* For each node we need to check for a "broken" node */ + if (!(revision & (AMD_DR_B0 | AMD_DR_B1))) { + for (i = 0; i < nodes; i++) { + if (mctGetLogicalCPUID(i) & + (AMD_DR_B0 | AMD_DR_B1)) { + mixed = 1; + break; + } + } + } + + if ((revision & (AMD_DR_B0 | AMD_DR_B1)) || mixed) { + + /* F0X68[22:21] DsNpReqLmt0 = 01b */ + val = pci_read_config32(NODE_PCI(node, 0), 0x68); + val &= ~0x00600000; + val |= 0x00200000; + pci_write_config32(NODE_PCI(node, 0), 0x68, val); + + /* F3X6C */ + val = pci_read_config32(NODE_PCI(node, 3), 0x6C); + val &= ~0x700780F7; + val |= 0x00010094; + pci_write_config32(NODE_PCI(node, 3), 0x6C, val); + + /* F3X7C */ + val = pci_read_config32(NODE_PCI(node, 3), 0x7C); + val &= ~0x707FFF1F; + val |= 0x00144514; + pci_write_config32(NODE_PCI(node, 3), 0x7C, val); + + /* F3X144[3:0] RspTok = 0001b */ + val = pci_read_config32(NODE_PCI(node, 3), 0x144); + val &= ~0x0000000F; + val |= 0x00000001; + pci_write_config32(NODE_PCI(node, 3), 0x144, val); + + for (i = 0; i < 3; i++) { + reg = 0x148 + (i * 4); + val = pci_read_config32(NODE_PCI(node, 3), reg); + val &= ~0x000000FF; + val |= 0x000000DB; + pci_write_config32(NODE_PCI(node, 3), reg, val); + } + } + } +} + +static void AMD_Errata298(void) +{ + /* Workaround for L2 Eviction May Occur during operation to + * set Accessed or dirty bit. + */ + + msr_t msr; + u8 i; + u8 affectedRev = 0; + u8 nodes = get_nodes(); + + /* For each core we need to check for a "broken" node */ + for (i = 0; i < nodes; i++) { + if (mctGetLogicalCPUID(i) & (AMD_DR_B0 | AMD_DR_B1 | AMD_DR_B2)) { + affectedRev = 1; + break; + } + } + + if (affectedRev) { + msr = rdmsr(HWCR); + msr.lo |= 0x08; /* Set TlbCacheDis bit[3] */ + wrmsr(HWCR, msr); + + msr = rdmsr(BU_CFG); + msr.lo |= 0x02; /* Set TlbForceMemTypeUc bit[1] */ + wrmsr(BU_CFG, msr); + + msr = rdmsr(OSVW_ID_Length); + msr.lo |= 0x01; /* OS Visible Workaround - MSR */ + wrmsr(OSVW_ID_Length, msr); + + msr = rdmsr(OSVW_Status); + msr.lo |= 0x01; /* OS Visible Workaround - MSR */ + wrmsr(OSVW_Status, msr); + } + + if (!affectedRev && (mctGetLogicalCPUID(0xFF) & AMD_DR_B3)) { + msr = rdmsr(OSVW_ID_Length); + msr.lo |= 0x01; /* OS Visible Workaround - MSR */ + wrmsr(OSVW_ID_Length, msr); + + } +} + +static u32 get_platform_type(void) +{ + u32 ret = 0; + + switch (SYSTEM_TYPE) { + case 1: + ret |= AMD_PTYPE_DSK; + break; + case 2: + ret |= AMD_PTYPE_MOB; + break; + case 0: + ret |= AMD_PTYPE_SVR; + break; + default: + break; + } + + /* FIXME: add UMA support. */ + + /* All Fam10 are multi core */ + ret |= AMD_PTYPE_MC; + + return ret; +} + +static void AMD_SetupPSIVID_d(u32 platform_type, u8 node) +{ + u32 dword; + int i; + msr_t msr; + + if (platform_type & (AMD_PTYPE_MOB | AMD_PTYPE_DSK)) { + + /* The following code sets the PSIVID to the lowest support P state + * assuming that the VID for the lowest power state is below + * the VDD voltage regulator threshold. (This also assumes that there + * is a Pstate lower than P0) + */ + + for (i = 4; i >= 0; i--) { + msr = rdmsr(PS_REG_BASE + i); + /* Pstate valid? */ + if (msr.hi & PS_EN_MASK) { + dword = pci_read_config32(NODE_PCI(i, 3), 0xA0); + dword &= ~0x7F; + dword |= (msr.lo >> 9) & 0x7F; + pci_write_config32(NODE_PCI(i, 3), 0xA0, dword); + break; + } + } + } +} + +/** + * AMD_CpuFindCapability - Traverse PCI capability list to find host HT links. + * HT Phy operations are not valid on links that aren't present, so this + * prevents invalid accesses. + * + * Returns the offset of the link register. + */ +static BOOL AMD_CpuFindCapability(u8 node, u8 cap_count, u8 * offset) +{ + u32 reg; + u32 val; + + /* get start of CPU HT Host Capabilities */ + val = pci_read_config32(NODE_PCI(node, 0), 0x34); + val &= 0xFF; //reg offset of first link + + cap_count++; + + /* Traverse through the capabilities. */ + do { + reg = pci_read_config32(NODE_PCI(node, 0), val); + /* Is the capability block a HyperTransport capability block? */ + if ((reg & 0xFF) == 0x08) { + /* Is the HT capability block an HT Host Capability? */ + if ((reg & 0xE0000000) == (1 << 29)) + cap_count--; + } + + if (cap_count) + val = (reg >> 8) & 0xFF; //update reg offset + } while (cap_count && val); + + *offset = (u8) val; + + /* If requested capability found val != 0 */ + if (!cap_count) + return TRUE; + else + return FALSE; +} + +/** + * AMD_checkLinkType - Compare desired link characteristics using a logical + * link type mask. + * + * Returns the link characteristic mask. + */ +static u32 AMD_checkLinkType(u8 node, u8 link, u8 regoff) +{ + u32 val; + u32 linktype = 0; + + /* Check connect, init and coherency */ + val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x18); + val &= 0x1F; + + if (val == 3) + linktype |= HTPHY_LINKTYPE_COHERENT; + + if (val == 7) + linktype |= HTPHY_LINKTYPE_NONCOHERENT; + + if (linktype) { + /* Check gen3 */ + val = pci_read_config32(NODE_PCI(node, 0), regoff + 0x08); + + if (((val >> 8) & 0x0F) > 6) + linktype |= HTPHY_LINKTYPE_HT3; + else + linktype |= HTPHY_LINKTYPE_HT1; + + /* Check ganged */ + val = pci_read_config32(NODE_PCI(node, 0), (link << 2) + 0x170); + + if (val & 1) + linktype |= HTPHY_LINKTYPE_GANGED; + else + linktype |= HTPHY_LINKTYPE_UNGANGED; + } + return linktype; +} + +/** + * AMD_SetHtPhyRegister - Use the HT link's HT Phy portal registers to update + * a phy setting for that link. + */ +static void AMD_SetHtPhyRegister(u8 node, u8 link, u8 entry) +{ + u32 phyReg; + u32 phyBase; + u32 val; + + /* Determine this link's portal */ + if (link > 3) + link -= 4; + + phyBase = ((u32) link << 3) | 0x180; + + /* Get the portal control register's initial value + * and update it to access the desired phy register + */ + phyReg = pci_read_config32(NODE_PCI(node, 4), phyBase); + + if (fam10_htphy_default[entry].htreg > 0x1FF) { + phyReg &= ~HTPHY_DIRECT_OFFSET_MASK; + phyReg |= HTPHY_DIRECT_MAP; + } else { + phyReg &= ~HTPHY_OFFSET_MASK; + } + + /* Now get the current phy register data + * LinkPhyDone = 0, LinkPhyWrite = 0 is a read + */ + phyReg |= fam10_htphy_default[entry].htreg; + pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg); + + do { + val = pci_read_config32(NODE_PCI(node, 4), phyBase); + } while (!(val & HTPHY_IS_COMPLETE_MASK)); + + /* Now we have the phy register data, apply the change */ + val = pci_read_config32(NODE_PCI(node, 4), phyBase + 4); + val &= ~fam10_htphy_default[entry].mask; + val |= fam10_htphy_default[entry].data; + pci_write_config32(NODE_PCI(node, 4), phyBase + 4, val); + + /* write it through the portal to the phy + * LinkPhyDone = 0, LinkPhyWrite = 1 is a write + */ + phyReg |= HTPHY_WRITE_CMD; + pci_write_config32(NODE_PCI(node, 4), phyBase, phyReg); + + do { + val = pci_read_config32(NODE_PCI(node, 4), phyBase); + } while (!(val & HTPHY_IS_COMPLETE_MASK)); +} + +void cpuSetAMDMSR(void) +{ + /* This routine loads the CPU with default settings in fam10_msr_default + * table . It must be run after Cache-As-RAM has been enabled, and + * Hypertransport initialization has taken place. Also note + * that it is run on the current processor only, and only for the current + * processor core. + */ + msr_t msr; + u8 i; + u32 revision, platform; + + printk(BIOS_DEBUG, "cpuSetAMDMSR "); + + revision = mctGetLogicalCPUID(0xFF); + platform = get_platform_type(); + + for (i = 0; i < ARRAY_SIZE(fam10_msr_default); i++) { + if ((fam10_msr_default[i].revision & revision) && + (fam10_msr_default[i].platform & platform)) { + msr = rdmsr(fam10_msr_default[i].msr); + msr.hi &= ~fam10_msr_default[i].mask_hi; + msr.hi |= fam10_msr_default[i].data_hi; + msr.lo &= ~fam10_msr_default[i].mask_lo; + msr.lo |= fam10_msr_default[i].data_lo; + wrmsr(fam10_msr_default[i].msr, msr); + } + } + AMD_Errata298(); + + printk(BIOS_DEBUG, " done\n"); +} + +static void cpuSetAMDPCI(u8 node) +{ + /* This routine loads the CPU with default settings in fam10_pci_default + * table . It must be run after Cache-As-RAM has been enabled, and + * Hypertransport initialization has taken place. Also note + * that it is run for the first core on each node + */ + u8 i, j; + u32 revision, platform; + u32 val; + u8 offset; + + printk(BIOS_DEBUG, "cpuSetAMDPCI %02d", node); + + revision = mctGetLogicalCPUID(node); + platform = get_platform_type(); + + AMD_SetupPSIVID_d(platform, node); /* Set PSIVID offset which is not table driven */ + + for (i = 0; i < ARRAY_SIZE(fam10_pci_default); i++) { + if ((fam10_pci_default[i].revision & revision) && + (fam10_pci_default[i].platform & platform)) { + val = pci_read_config32(NODE_PCI(node, + fam10_pci_default[i]. + function), + fam10_pci_default[i].offset); + val &= ~fam10_pci_default[i].mask; + val |= fam10_pci_default[i].data; + pci_write_config32(NODE_PCI(node, + fam10_pci_default[i]. + function), + fam10_pci_default[i].offset, val); + } + } + + for (i = 0; i < ARRAY_SIZE(fam10_htphy_default); i++) { + if ((fam10_htphy_default[i].revision & revision) && + (fam10_htphy_default[i].platform & platform)) { + /* HT Phy settings either apply to both sublinks or have + * separate registers for sublink zero and one, so there + * will be two table entries. So, here we only loop + * through the sublink zeros in function zero. + */ + for (j = 0; j < 4; j++) { + if (AMD_CpuFindCapability(node, j, &offset)) { + if (AMD_checkLinkType(node, j, offset) + & fam10_htphy_default[i].linktype) { + AMD_SetHtPhyRegister(node, j, + i); + } + } else { + /* No more capabilities, + * link not present + */ + break; + } + } + } + } + + /* FIXME: add UMA support and programXbarToSriReg(); */ + + AMD_Errata281(node, revision, platform); + + /* FIXME: if the dct phy doesn't init correct it needs to reset. + if (revision & (AMD_DR_B2 | AMD_DR_B3)) + dctPhyDiag(); */ + + printk(BIOS_DEBUG, " done\n"); +} + +#ifdef UNUSED_CODE +static void cpuInitializeMCA(void) +{ + /* Clears Machine Check Architecture (MCA) registers, which power on + * containing unknown data, on currently running processor. + * This routine should only be executed on initial power on (cold boot), + * not across a warm reset because valid data is present at that time. + */ + + msr_t msr; + u32 reg; + u8 i; + + if (cpuid_edx(1) & 0x4080) { /* MCE and MCA (edx[7] and edx[14]) */ + msr = rdmsr(MCG_CAP); + if (msr.lo & MCG_CTL_P) { /* MCG_CTL_P bit is set? */ + msr.lo &= 0xFF; + msr.lo--; + msr.lo <<= 2; /* multiply the count by 4 */ + reg = MC0_STA + msr.lo; + msr.lo = msr.hi = 0; + for (i = 0; i < 4; i++) { + wrmsr(reg, msr); + reg -= 4; /* Touch status regs for each bank */ + } + } + } +} +#endif + +/** + * finalize_node_setup() + * + * Do any additional post HT init + * + */ +static void finalize_node_setup(struct sys_info *sysinfo) +{ + u8 i; + u8 nodes = get_nodes(); + u32 reg; + +#if CONFIG_RAMINIT_SYSINFO + /* read Node0 F0_0x64 bit [8:10] to find out SbLink # */ + reg = pci_read_config32(NODE_HT(0), 0x64); + sysinfo->sblk = (reg >> 8) & 7; + sysinfo->sbbusn = 0; + sysinfo->nodes = nodes; + sysinfo->sbdn = get_sbdn(sysinfo->sbbusn); +#endif + + for (i = 0; i < nodes; i++) { + cpuSetAMDPCI(i); + } + +#if SET_FIDVID == 1 + // Prep each node for FID/VID setup. + prep_fid_change(); +#endif + +#if CONFIG_MAX_PHYSICAL_CPUS > 1 + /* Skip the BSP, start at node 1 */ + for (i = 1; i < nodes; i++) { + setup_remote_node(i); + start_node(i); + } +#endif +} + +#include "fidvid.c" diff --git a/src/northbridge/amd/amdfam10/amdfam10.h b/src/northbridge/amd/amdfam10/amdfam10.h index 400ed9a292..b1eaecd049 100644 --- a/src/northbridge/amd/amdfam10/amdfam10.h +++ b/src/northbridge/amd/amdfam10/amdfam10.h @@ -1,1202 +1,1202 @@ -/*
- * This file is part of the coreboot project.
- *
- * Copyright (C) 2007 Advanced Micro Devices, Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AMDFAM10_H
-
-#define AMDFAM10_H
-/* Definitions of various FAM10 registers */
-/* Function 0 */
-#define HT_TRANSACTION_CONTROL 0x68
-#define HTTC_DIS_RD_B_P (1 << 0)
-#define HTTC_DIS_RD_DW_P (1 << 1)
-#define HTTC_DIS_WR_B_P (1 << 2)
-#define HTTC_DIS_WR_DW_P (1 << 3)
-#define HTTC_DIS_MTS (1 << 4)
-#define HTTC_CPU1_EN (1 << 5)
-#define HTTC_CPU_REQ_PASS_PW (1 << 6)
-#define HTTC_CPU_RD_RSP_PASS_PW (1 << 7)
-#define HTTC_DIS_P_MEM_C (1 << 8)
-#define HTTC_DIS_RMT_MEM_C (1 << 9)
-#define HTTC_DIS_FILL_P (1 << 10)
-#define HTTC_RSP_PASS_PW (1 << 11)
-#define HTTC_BUF_REL_PRI_SHIFT 13
-#define HTTC_BUF_REL_PRI_MASK 3
-#define HTTC_BUF_REL_PRI_64 0
-#define HTTC_BUF_REL_PRI_16 1
-#define HTTC_BUF_REL_PRI_8 2
-#define HTTC_BUF_REL_PRI_2 3
-#define HTTC_LIMIT_CLDT_CFG (1 << 15)
-#define HTTC_LINT_EN (1 << 16)
-#define HTTC_APIC_EXT_BRD_CST (1 << 17)
-#define HTTC_APIC_EXT_ID (1 << 18)
-#define HTTC_APIC_EXT_SPUR (1 << 19)
-#define HTTC_SEQ_ID_SRC_NODE_EN (1 << 20)
-#define HTTC_DS_NP_REQ_LIMIT_SHIFT 21
-#define HTTC_DS_NP_REQ_LIMIT_MASK 3
-#define HTTC_DS_NP_REQ_LIMIT_NONE 0
-#define HTTC_DS_NP_REQ_LIMIT_1 1
-#define HTTC_DS_NP_REQ_LIMIT_4 2
-#define HTTC_DS_NP_REQ_LIMIT_8 3
-
-
-/* Function 1 */
-#define PCI_IO_BASE0 0xc0
-#define PCI_IO_BASE1 0xc8
-#define PCI_IO_BASE2 0xd0
-#define PCI_IO_BASE3 0xd8
-#define PCI_IO_BASE_VGA_EN (1 << 4)
-#define PCI_IO_BASE_NO_ISA (1 << 5)
-
-/* Function 2 */
-// 0x1xx is for DCT1
-#define DRAM_CSBASE 0x40
-#define DRAM_CSMASK 0x60
-#define DRAM_BANK_ADDR_MAP 0x80
-
-#define DRAM_CTRL 0x78
-#define DC_RdPtrInit_SHIFT 0
-#define DC_RdPrtInit_MASK 0xf
-#define DC_Twrrd3_2_SHIFT 8 /*DDR3 */
-#define DC_Twrrd3_2_MASK 3
-#define DC_Twrwr3_2_SHIFT 10 /*DDR3 */
-#define DC_Twrwr3_2_MASK 3
-#define DC_Trdrd3_2_SHIFT 12 /*DDR3 */
-#define DC_Trdrd3_2_MASK 3
-#define DC_AltVidC3MemClkTriEn (1<<16)
-#define DC_DqsRcvEnTrain (1<<18)
-#define DC_MaxRdLatency_SHIFT 22
-#define DC_MaxRdLatency_MASK 0x3ff
-
-#define DRAM_INIT 0x7c
-#define DI_MrsAddress_SHIFT 0
-#define DI_MrsAddress_MASK 0xffff
-#define DI_MrsBank_SHIFT 16
-#define DI_MrsBank_MASK 7
-#define DI_MrsChipSel_SHIFT 20
-#define DI_MrsChipSel_MASK 7
-#define DI_SendRchgAll (1<<24)
-#define DI_SendAutoRefresh (1<<25)
-#define DI_SendMrsCmd (1<<26)
-#define DI_DeassertMemRstX (1<<27)
-#define DI_AssertCke (1<<28)
-#define DI_SendZQCmd (1<<29) /*DDR3 */
-#define DI_EnMrsCmd (1<<30)
-#define DI_EnDramInit (1<<31)
-
-#define DRAM_MRS 0x84
-#define DM_BurstCtrl_SHIFT 0
-#define DM_BurstCtrl_MASK 3
-#define DM_DrvImpCtrl_SHIFT 2 /* DDR3 */
-#define DM_DrvImpCtrl_MASK 3
-#define DM_Twr_SHIFT 4 /* DDR3 */
-#define DM_Twr_MASK 7
-#define DM_Twr_BASE 4
-#define DM_Twr_MIN 5
-#define DM_Twr_MAX 12
-#define DM_DramTerm_SHIFT 7 /*DDR3 */
-#define DM_DramTerm_MASK 7
-#define DM_DramTermDyn_SHIFT 10 /* DDR3 */
-#define DM_DramTermDyn_MASK 3
-#define DM_Ooff (1<<13)
-#define DM_ASR (1<<18)
-#define DM_SRT (1<<19)
-#define DM_Tcwl_SHIFT 20
-#define DM_Tcwl_MASK 7
-#define DM_PchgPDModeSel (1<<23) /* DDR3 */
-#define DM_MPrLoc_SHIFT 24 /* DDR3 */
-#define DM_MPrLoc_MASK 3
-#define DM_MprEn (1<<26) /* DDR3 */
-
-#define DRAM_TIMING_LOW 0x88
-#define DTL_TCL_SHIFT 0
-#define DTL_TCL_MASK 0xf
-#define DTL_TCL_BASE 1 /* DDR3 =4 */
-#define DTL_TCL_MIN 3 /* DDR3 =4 */
-#define DTL_TCL_MAX 6 /* DDR3 =12 */
-#define DTL_TRCD_SHIFT 4
-#define DTL_TRCD_MASK 3 /* DDR3 =7 */
-#define DTL_TRCD_BASE 3 /* DDR3 =5 */
-#define DTL_TRCD_MIN 3 /* DDR3 =5 */
-#define DTL_TRCD_MAX 6 /* DDR3 =12 */
-#define DTL_TRP_SHIFT 8 /* DDR3 =7 */
-#define DTL_TRP_MASK 3 /* DDR3 =7 */
-#define DTL_TRP_BASE 3 /* DDR3 =5 */
-#define DTL_TRP_MIN 3 /* DDR3 =5 */
-#define DTL_TRP_MAX 6 /* DDR3 =12 */
-#define DTL_TRTP_SHIFT 11 /*DDR3 =10 */
-#define DTL_TRTP_MASK 1 /*DDR3 =3 */
-#define DTL_TRTP_BASE 2 /* DDR3 =4 */
-#define DTL_TRTP_MIN 2 /* 4 for 64 bytes*/ /* DDR3 =4 for 32bytes or 64bytes */
-#define DTL_TRTP_MAX 3 /* 5 for 64 bytes */ /* DDR3 =7 for 32Bytes or 64bytes */
-#define DTL_TRAS_SHIFT 12
-#define DTL_TRAS_MASK 0xf
-#define DTL_TRAS_BASE 3 /* DDR3 =15 */
-#define DTL_TRAS_MIN 5 /* DDR3 =15 */
-#define DTL_TRAS_MAX 18 /*DDR3 =30 */
-#define DTL_TRC_SHIFT 16
-#define DTL_TRC_MASK 0xf /* DDR3 =0x1f */
-#define DTL_TRC_BASE 11
-#define DTL_TRC_MIN 11
-#define DTL_TRC_MAX 26 /* DDR3 =43 */
-#define DTL_TWR_SHIFT 20 /* only for DDR2, DDR3's is on DC */
-#define DTL_TWR_MASK 3
-#define DTL_TWR_BASE 3
-#define DTL_TWR_MIN 3
-#define DTL_TWR_MAX 6
-#define DTL_TRRD_SHIFT 22
-#define DTL_TRRD_MASK 3
-#define DTL_TRRD_BASE 2 /* DDR3 =4 */
-#define DTL_TRRD_MIN 2 /* DDR3 =4 */
-#define DTL_TRRD_MAX 5 /* DDR3 =7 */
-#define DTL_MemClkDis_SHIFT 24 /* Channel A */
-#define DTL_MemClkDis3 (1 << 26)
-#define DTL_MemClkDis2 (1 << 27)
-#define DTL_MemClkDis1 (1 << 28)
-#define DTL_MemClkDis0 (1 << 29)
-/* DTL_MemClkDis for m2 and s1g1 is different */
-
-#define DRAM_TIMING_HIGH 0x8c
-#define DTH_TRWTWB_SHIFT 0
-#define DTH_TRWTWB_MASK 3
-#define DTH_TRWTWB_BASE 3 /* DDR3 =4 */
-#define DTH_TRWTWB_MIN 3 /* DDR3 =5 */
-#define DTH_TRWTWB_MAX 10 /* DDR3 =11 */
-#define DTH_TRWTTO_SHIFT 4
-#define DTH_TRWTTO_MASK 7
-#define DTH_TRWTTO_BASE 2 /* DDR3 =3 */
-#define DTH_TRWTTO_MIN 2 /* DDR3 =3 */
-#define DTH_TRWTTO_MAX 9 /* DDR3 =10 */
-#define DTH_TWTR_SHIFT 8
-#define DTH_TWTR_MASK 3
-#define DTH_TWTR_BASE 0 /* DDR3 =4 */
-#define DTH_TWTR_MIN 1 /* DDR3 =4 */
-#define DTH_TWTR_MAX 3 /* DDR3 =7 */
-#define DTH_TWRRD_SHIFT 10
-#define DTH_TWRRD_MASK 3 /* For DDR3 3_2 is at 0x78 DC */
-#define DTH_TWRRD_BASE 0 /* DDR3 =0 */
-#define DTH_TWRRD_MIN 0 /* DDR3 =2 */
-#define DTH_TWRRD_MAX 3 /* DDR3 =12 */
-#define DTH_TWRWR_SHIFT 12
-#define DTH_TWRWR_MASK 3 /* For DDR3 3_2 is at 0x78 DC */
-#define DTH_TWRWR_BASE 1
-#define DTH_TWRWR_MIN 1 /* DDR3 =3 */
-#define DTH_TWRWR_MAX 3 /* DDR3 =12 */
-#define DTH_TRDRD_SHIFT 14
-#define DTH_TRDRD_MASK 3 /* For DDR3 3_2 is at 0x78 DC */
-#define DTH_TRDRD_BASE 2
-#define DTH_TRDRD_MIN 2
-#define DTH_TRDRD_MAX 5 /* DDR3 =10 */
-#define DTH_TREF_SHIFT 16
-#define DTH_TREF_MASK 3
-#define DTH_TREF_7_8_US 2
-#define DTH_TREF_3_9_US 3
-#define DTH_DisAutoRefresh (1<<18)
-#define DTH_TRFC0_SHIFT 20 /* for Logical DIMM0 */
-#define DTH_TRFC_MASK 7
-#define DTH_TRFC_75_256M 0
-#define DTH_TRFC_105_512M 1
-#define DTH_TRFC_127_5_1G 2
-#define DTH_TRFC_195_2G 3
-#define DTH_TRFC_327_5_4G 4
-#if 0
-//DDR3
-#define DTH_TRFC_90_512M 1
-#define DTH_TRFC_110_5_1G 2
-#define DTH_TRFC_160_2G 3
-#define DTH_TRFC_300_4G 4
-#define DTH_TRFC_UNDEFINED_8G 5
-#endif
-#define DTH_TRFC1_SHIFT 23 /*for Logical DIMM1 */
-#define DTH_TRFC2_SHIFT 26 /*for Logical DIMM2 */
-#define DTH_TRFC3_SHIFT 29 /*for Logical DIMM3 */
-
-#define DRAM_CONFIG_LOW 0x90
-#define DCL_InitDram (1<<0)
-#define DCL_ExitSelfRef (1<<1)
-#define DCL_PllLockTime_SHIFT 2
-#define DCL_PllLockTime_MASK 3
-#define DCL_PllLockTime_15US 0
-#define DCL_PllLockTime_6US 1
-#define DCL_DramTerm_SHIFT 4
-#define DCL_DramTerm_MASK 3
-#define DCL_DramTerm_No 0
-#define DCL_DramTerm_75_OH 1
-#define DCL_DramTerm_150_OH 2
-#define DCL_DramTerm_50_OH 3
-#define DCL_DisDqsBar (1<<6) /* only for DDR2 */
-#define DCL_DramDrvWeak (1<<7) /* only for DDR2 */
-#define DCL_ParEn (1<<8)
-#define DCL_SelfRefRateEn (1<<9) /* only for DDR2 */
-#define DCL_BurstLength32 (1<<10) /* only for DDR3 */
-#define DCL_Width128 (1<<11)
-#define DCL_X4Dimm_SHIFT 12
-#define DCL_X4Dimm_MASK 0xf
-#define DCL_UnBuffDimm (1<<16)
-#define DCL_EnPhyDqsRcvEnTr (1<<18)
-#define DCL_DimmEccEn (1<<19)
-#define DCL_DynPageCloseEn (1<<20)
-#define DCL_IdleCycInit_SHIFT 21
-#define DCL_IdleCycInit_MASK 3
-#define DCL_IdleCycInit_16CLK 0
-#define DCL_IdleCycInit_32CLK 1
-#define DCL_IdleCycInit_64CLK 2
-#define DCL_IdleCycInit_96CLK 3
-#define DCL_ForceAutoPchg (1<<23)
-
-#define DRAM_CONFIG_HIGH 0x94
-#define DCH_MemClkFreq_SHIFT 0
-#define DCH_MemClkFreq_MASK 7
-#define DCH_MemClkFreq_200MHz 0 /* DDR2 */
-#define DCH_MemClkFreq_266MHz 1 /* DDR2 */
-#define DCH_MemClkFreq_333MHz 2 /* DDR2 */
-#define DCH_MemClkFreq_400MHz 3 /* DDR2 and DDR 3*/
-#define DCH_MemClkFreq_533MHz 4 /* DDR 3 */
-#define DCH_MemClkFreq_667MHz 5 /* DDR 3 */
-#define DCH_MemClkFreq_800MHz 6 /* DDR 3 */
-#define DCH_MemClkFreqVal (1<<3)
-#define DCH_Ddr3Mode (1<<8)
-#define DCH_LegacyBiosMode (1<<9)
-#define DCH_ZqcsInterval_SHIFT 10
-#define DCH_ZqcsInterval_MASK 3
-#define DCH_ZqcsInterval_DIS 0
-#define DCH_ZqcsInterval_64MS 1
-#define DCH_ZqcsInterval_128MS 2
-#define DCH_ZqcsInterval_256MS 3
-#define DCH_RDqsEn (1<<12) /* only for DDR2 */
-#define DCH_DisSimulRdWr (1<<13)
-#define DCH_DisDramInterface (1<<14)
-#define DCH_PowerDownEn (1<<15)
-#define DCH_PowerDownMode_SHIFT 16
-#define DCH_PowerDownMode_MASK 1
-#define DCH_PowerDownMode_Channel_CKE 0
-#define DCH_PowerDownMode_ChipSelect_CKE 1
-#define DCH_FourRankSODimm (1<<17)
-#define DCH_FourRankRDimm (1<<18)
-#define DCH_SlowAccessMode (1<<20)
-#define DCH_BankSwizzleMode (1<<22)
-#define DCH_DcqBypassMax_SHIFT 24
-#define DCH_DcqBypassMax_MASK 0xf
-#define DCH_DcqBypassMax_BASE 0
-#define DCH_DcqBypassMax_MIN 0
-#define DCH_DcqBypassMax_MAX 15
-#define DCH_FourActWindow_SHIFT 28
-#define DCH_FourActWindow_MASK 0xf
-#define DCH_FourActWindow_BASE 7 /* DDR3 15 */
-#define DCH_FourActWindow_MIN 8 /* DDR3 16 */
-#define DCH_FourActWindow_MAX 20 /* DDR3 30 */
-
-
-// for 0x98 index and 0x9c data for DCT0
-// for 0x198 index and 0x19c data for DCT1
-// even at ganged mode, 0x198/0x19c will be used for channnel B
-
-#define DRAM_CTRL_ADDI_DATA_OFFSET 0x98
-#define DCAO_DctOffset_SHIFT 0
-#define DCAO_DctOffset_MASK 0x3fffffff
-#define DCAO_DctAccessWrite (1<<30)
-#define DCAO_DctAccessDone (1<<31)
-
-#define DRAM_CTRL_ADDI_DATA_PORT 0x9c
-
-#define DRAM_OUTPUT_DRV_COMP_CTRL 0x00
-#define DODCC_CkeDrvStren_SHIFT 0
-#define DODCC_CkeDrvStren_MASK 3
-#define DODCC_CkeDrvStren_1_0X 0
-#define DODCC_CkeDrvStren_1_25X 1
-#define DODCC_CkeDrvStren_1_5X 2
-#define DODCC_CkeDrvStren_2_0X 3
-#define DODCC_CsOdtDrvStren_SHIFT 4
-#define DODCC_CsOdtDrvStren_MASK 3
-#define DODCC_CsOdtDrvStren_1_0X 0
-#define DODCC_CsOdtDrvStren_1_25X 1
-#define DODCC_CsOdtDrvStren_1_5X 2
-#define DODCC_CsOdtDrvStren_2_0X 3
-#define DODCC_AddrCmdDrvStren_SHIFT 8
-#define DODCC_AddrCmdDrvStren_MASK 3
-#define DODCC_AddrCmdDrvStren_1_0X 0
-#define DODCC_AddrCmdDrvStren_1_25X 1
-#define DODCC_AddrCmdDrvStren_1_5X 2
-#define DODCC_AddrCmdDrvStren_2_0X 3
-#define DODCC_ClkDrvStren_SHIFT 12
-#define DODCC_ClkDrvStren_MASK 3
-#define DODCC_ClkDrvStren_0_75X 0
-#define DODCC_ClkDrvStren_1_0X 1
-#define DODCC_ClkDrvStren_1_25X 2
-#define DODCC_ClkDrvStren_1_5X 3
-#define DODCC_DataDrvStren_SHIFT 16
-#define DODCC_DataDrvStren_MASK 3
-#define DODCC_DataDrvStren_0_75X 0
-#define DODCC_DataDrvStren_1_0X 1
-#define DODCC_DataDrvStren_1_25X 2
-#define DODCC_DataDrvStren_1_5X 3
-#define DODCC_DqsDrvStren_SHIFT 20
-#define DODCC_DqsDrvStren_MASK 3
-#define DODCC_DqsDrvStren_0_75X 0
-#define DODCC_DqsDrvStren_1_0X 1
-#define DODCC_DqsDrvStren_1_25X 2
-#define DODCC_DqsDrvStren_1_5X 3
-#define DODCC_ProcOdt_SHIFT 28
-#define DODCC_ProcOdt_MASK 3
-#define DODCC_ProcOdt_300_OHMS 0
-#define DODCC_ProcOdt_150_OHMS 1
-#define DODCC_ProcOdt_75_OHMS 2
-#if 0
-//DDR3
-#define DODCC_ProcOdt_240_OHMS 0
-#define DODCC_ProcOdt_120_OHMS 1
-#define DODCC_ProcOdt_60_OHMS 2
-#endif
-
-/*
- for DDR2 400, 533, 667, F2x[1,0]9C_x[02:01], [03], [06:05], [07] controll timing of all DIMMs
- for DDR2 800, DDR3 800, 1067, 1333, 1600, F2x[1,0]9C_x[02:01], [03], [06:05], [07] controll timing of DIMM0
- F2x[1,0]9C_x[102:101], [103], [106:105], [107] controll timing of DIMM1
- So Socket F with Four Logical DIMM will only support DDR2 800 ?
-*/
-/* there are index +100 ===> for DIMM1
-that are corresponding to 0x01, 0x02, 0x03, 0x05, 0x06, 0x07
-*/
-//02/15/2006 18:37
-#define DRAM_WRITE_DATA_TIMING_CTRL_LOW 0x01
-#define DWDTC_WrDatFineDlyByte0_SHIFT 0
-#define DWDTC_WrDatFineDlyByte_MASK 0x1f
-#define DWDTC_WrDatFineDlyByte_BASE 0
-#define DWDTC_WrDatFineDlyByte_MIN 0
-#define DWDTC_WrDatFineDlyByte_MAX 31 // 1/64 MEMCLK
-#define DWDTC_WrDatGrossDlyByte0_SHIFT 5
-#define DWDTC_WrDatGrossDlyByte_MASK 0x3
-#define DWDTC_WrDatGrossDlyByte_NO_DELAY 0
-#define DWDTC_WrDatGrossDlyByte_0_5_ 1
-#define DWDTC_WrDatGrossDlyByte_1 2
-#define DWDTC_WrDatFineDlyByte1_SHIFT 8
-#define DWDTC_WrDatGrossDlyByte1_SHIFT 13
-#define DWDTC_WrDatFineDlyByte2_SHIFT 16
-#define DWDTC_WrDatGrossDlyByte2_SHIFT 21
-#define DWDTC_WrDatFineDlyByte3_SHIFT 24
-#define DWDTC_WrDatGrossDlyByte3_SHIFT 29
-
-#define DRAM_WRITE_DATA_TIMING_CTRL_HIGH 0x02
-#define DWDTC_WrDatFineDlyByte4_SHIFT 0
-#define DWDTC_WrDatGrossDlyByte4_SHIFT 5
-#define DWDTC_WrDatFineDlyByte5_SHIFT 8
-#define DWDTC_WrDatGrossDlyByte5_SHIFT 13
-#define DWDTC_WrDatFineDlyByte6_SHIFT 16
-#define DWDTC_WrDatGrossDlyByte6_SHIFT 21
-#define DWDTC_WrDatFineDlyByte7_SHIFT 24
-#define DWDTC_WrDatGrossDlyByte7_SHIFT 29
-
-#define DRAM_WRITE_ECC_TIMING_CTRL 0x03
-#define DWETC_WrChkFinDly_SHIFT 0
-#define DWETC_WrChkGrossDly_SHIFT 5
-
-#define DRAM_ADDR_CMD_TIMING_CTRL 0x04
-#define DACTC_CkeFineDelay_SHIFT 0
-#define DACTC_CkeFineDelay_MASK 0x1f
-#define DACTC_CkeFineDelay_BASE 0
-#define DACTC_CkeFineDelay_MIN 0
-#define DACTC_CkeFineDelay_MAX 31
-#define DACTC_CkeSetup (1<<5)
-#define DACTC_CsOdtFineDelay_SHIFT 8
-#define DACTC_CsOdtFineDelay_MASK 0x1f
-#define DACTC_CsOdtFineDelay_BASE 0
-#define DACTC_CsOdtFineDelay_MIN 0
-#define DACTC_CsOdtFineDelay_MAX 31
-#define DACTC_CsOdtSetup (1<<13)
-#define DACTC_AddrCmdFineDelay_SHIFT 16
-#define DACTC_AddrCmdFineDelay_MASK 0x1f
-#define DACTC_AddrCmdFineDelay_BASE 0
-#define DACTC_AddrCmdFineDelay_MIN 0
-#define DACTC_AddrCmdFineDelay_MAX 31
-#define DACTC_AddrCmdSetup (1<<21)
-
-#define DRAM_READ_DQS_TIMING_CTRL_LOW 0x05
-#define DRDTC_RdDqsTimeByte0_SHIFT 0
-#define DRDTC_RdDqsTimeByte_MASK 0x3f
-#define DRDTC_RdDqsTimeByte_BASE 0
-#define DRDTC_RdDqsTimeByte_MIN 0
-#define DRDTC_RdDqsTimeByte_MAX 63 // 1/128 MEMCLK
-#define DRDTC_RdDqsTimeByte1_SHIFT 8
-#define DRDTC_RdDqsTimeByte2_SHIFT 16
-#define DRDTC_RdDqsTimeByte3_SHIFT 24
-
-#define DRAM_READ_DQS_TIMING_CTRL_HIGH 0x06
-#define DRDTC_RdDqsTimeByte4_SHIFT 0
-#define DRDTC_RdDqsTimeByte5_SHIFT 8
-#define DRDTC_RdDqsTimeByte6_SHIFT 16
-#define DRDTC_RdDqsTimeByte7_SHIFT 24
-
-#define DRAM_READ_DQS_ECC_TIMING_CTRL 0x07
-#define DRDETC_RdDqsTimeCheck_SHIFT 0
-
-#define DRAM_PHY_CTRL 0x08
-#define DPC_WrtLvTrEn (1<<0)
-#define DPC_WrtLvTrMode (1<<1)
-#define DPC_TrNibbleSel (1<<2)
-#define DPC_TrDimmSel_SHIFT 4
-#define DPC_TrDimmSel_MASK 3 /* 0-->dimm0, 1-->dimm1, 2--->dimm2, 3--->dimm3 */
-#define DPC_WrLvOdt_SHIFT 8
-#define DPC_WrLvOdt_MASK 0xf /* bit 0-->odt 0, ...*/
-#define DPC_WrLvODtEn (1<<12)
-#define DPC_DqsRcvTrEn (1<<13)
-#define DPC_DisAutoComp (1<<30)
-#define DPC_AsyncCompUpdate (1<<31)
-
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_0 0x10 //DIMM0 Channel A
-#define DDRETC_DqsRcvEnFineDelayByte0_SHIFT 0
-#define DDRETC_DqsRcvEnFineDelayByte0_MASK 0x1f
-#define DDRETC_DqsRcvEnGrossDelayByte0_SHIFT 5
-#define DDRETC_DqsRcvEnGrossDelayByte0_MASK 0x3
-#define DDRETC_DqsRcvEnFineDelayByte1_SHIFT 8
-#define DDRETC_DqsRcvEnGrossDelayByte1_SHIFT 13
-#define DDRETC_DqsRcvEnFineDelayByte2_SHIFT 16
-#define DDRETC_DqsRcvEnGrossDelayByte2_SHIFT 21
-#define DDRETC_DqsRcvEnFineDelayByte3_SHIFT 24
-#define DDRETC_DqsRcvEnGrossDelayByte3_SHIFT 29
-
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_1 0x11 //DIMM0 Channel A
-#define DDRETC_DqsRcvEnFineDelayByte4_SHIFT 0
-#define DDRETC_DqsRcvEnGrossDelayByte4_SHIFT 5
-#define DDRETC_DqsRcvEnFineDelayByte5_SHIFT 8
-#define DDRETC_DqsRcvEnGrossDelayByte5_SHIFT 13
-#define DDRETC_DqsRcvEnFineDelayByte6_SHIFT 16
-#define DDRETC_DqsRcvEnGrossDelayByte6_SHIFT 21
-#define DDRETC_DqsRcvEnFineDelayByte7_SHIFT 24
-#define DDRETC_DqsRcvEnGrossDelayByte7_SHIFT 29
-
-#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_0_0 0x12
-#define DDRETCE_WrChkFineDlyByte0_SHIFT 0
-#define DDRETCE_WrChkGrossDlyByte0_SHIFT 5
-
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_2 0x20 //DIMM0 channel B
-#define DDRETC_DqsRcvEnFineDelayByte8_SHIFT 0
-#define DDRETC_DqsRcvEnGrossDelayByte8_SHIFT 5
-#define DDRETC_DqsRcvEnFineDelayByte9_SHIFT 8
-#define DDRETC_DqsRcvEnGrossDelayByte9_SHIFT 13
-#define DDRETC_DqsRcvEnFineDelayByte10_SHIFT 16
-#define DDRETC_DqsRcvEnGrossDelayByte10_SHIFT 21
-#define DDRETC_DqsRcvEnFineDelayByte11_SHIFT 24
-#define DDRETC_DqsRcvEnGrossDelayByte11_SHIFT 29
-
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_3 0x21 // DIMM0 Channel B
-#define DDRETC_DqsRcvEnFineDelayByte12_SHIFT 0
-#define DDRETC_DqsRcvEnGrossDelayByte12_SHIFT 5
-#define DDRETC_DqsRcvEnFineDelayByte13_SHIFT 8
-#define DDRETC_DqsRcvEnGrossDelayByte13_SHIFT 13
-#define DDRETC_DqsRcvEnFineDelayByte14_SHIFT 16
-#define DDRETC_DqsRcvEnGrossDelayByte14_SHIFT 21
-#define DDRETC_DqsRcvEnFineDelayByte15_SHIFT 24
-#define DDRETC_DqsRcvEnGrossDelayByte15_SHIFT 29
-
-#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_0_1 0x22
-#define DDRETCE_WrChkFineDlyByte1_SHIFT 0
-#define DDRETCE_WrChkGrossDlyByte1_SHIFT 5
-
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_0 0x13 //DIMM1
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_1 0x14
-#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_1_0 0x15
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_2 0x23
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_3 0x24
-#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_1_1 0x25
-
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_0 0x16 // DIMM2
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_1 0x17
-#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_2_0 0x18
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_2 0x26
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_3 0x27
-#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_2_1 0x28
-
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_0 0x19 // DIMM3
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_1 0x1a
-#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_3_0 0x1b
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_2 0x29
-#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_3 0x2a
-#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_3_1 0x2b
-
-/* 04.06.2006 19:12 */
-
-#if 0
-//DDR3
-#define DRAM_DQS_WRITE_TIME_CTRL_0_0 0x30 //DIMM0 Channel A
-#define DDWTC_WrDqsFineDlyByte0_SHIFT 0
-#define DDWTC_WrDqsFineDlyByte0_MASK 0x1f
-#define DDWTC_WrDqsGrossDlyByte0_SHIFT 5
-#define DDWTC_WrDqsGrossDlyByte0_MASK 0x3
-#define DDWTC_WrDqsFineDlyByte1_SHIFT 8
-#define DDWTC_WrDqsGrossDlyByte1_SHIFT 13
-#define DDWTC_WrDqsFineDlyByte2_SHIFT 16
-#define DDWTC_WrDqsGrossDlyByte2_SHIFT 21
-#define DDWTC_WrDqsFineDlyByte3_SHIFT 24
-#define DDWTC_WrDqsGrossDlyByte3_SHIFT 29
-
-#define DRAM_DQS_WRTIE_TIME_CTRL_0_1 0x31 //DIMM0 Channel A
-#define DDWTC_WrDqsFineDlyByte4_SHIFT 0
-#define DDWTC_WrDqsGrossDlyByte4_SHIFT 5
-#define DDWTC_WrDqsFineDlyByte5_SHIFT 8
-#define DDWTC_WrDqsGrossDlyByte5_SHIFT 13
-#define DDWTC_WrDqsFineDlyByte6_SHIFT 16
-#define DDWTC_WrDqsGrossDlyByte6_SHIFT 21
-#define DDWTC_WrDqsFineDlyByte7_SHIFT 24
-#define DDWTC_WrDqsGrossDlyByte7_SHIFT 29
-
-#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_0_0 0x32
-#define DDWTCE_WrDqsChkFineDlyByte0_SHIFT 0
-#define DDWTCE_WrDqsChkGrossDlyByte0_SHIFT 5
-
-#define DRAM_DQS_WRITE_TIME_CTRL_0_2 0x40 //DIMM0 Channel B
-#define DDWTC_WrDqsFineDlyByte8_SHIFT 0
-#define DDWTC_WrDqsGrossDlyByte8_SHIFT 5
-#define DDWTC_WrDqsFineDlyByte9_SHIFT 8
-#define DDWTC_WrDqsGrossDlyByte9_SHIFT 13
-#define DDWTC_WrDqsFineDlyByte10_SHIFT 16
-#define DDWTC_WrDqsGrossDlyByte10_SHIFT 21
-#define DDWTC_WrDqsFineDlyByte11_SHIFT 24
-#define DDWTC_WrDqsGrossDlyByte11_SHIFT 29
-
-#define DRAM_DQS_WRTIE_TIME_CTRL_0_3 0x41 //DIMM0 Channel B
-#define DDWTC_WrDqsFineDlyByte12_SHIFT 0
-#define DDWTC_WrDqsGrossDlyByte12_SHIFT 5
-#define DDWTC_WrDqsFineDlyByte13_SHIFT 8
-#define DDWTC_WrDqsGrossDlyByte13_SHIFT 13
-#define DDWTC_WrDqsFineDlyByte14_SHIFT 16
-#define DDWTC_WrDqsGrossDlyByte14_SHIFT 21
-#define DDWTC_WrDqsFineDlyByte15_SHIFT 24
-#define DDWTC_WrDqsGrossDlyByte15_SHIFT 29
-
-#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_0_1 0x42
-#define DDWTCE_WrDqsChkFineDlyByte1_SHIFT 0
-#define DDWTCE_WrDqsChkGrossDlyByte1_SHIFT 5
-
-#define DRAM_DQS_WRITE_TIME_CTRL_1_0 0x33 //DIMM1 Channel A
-#define DRAM_DQS_WRTIE_TIME_CTRL_1_1 0x34 //DIMM1 Channel A
-#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_1_0 0x35
-#define DRAM_DQS_WRITE_TIME_CTRL_1_2 0x43 //DIMM1 Channel B
-#define DRAM_DQS_WRTIE_TIME_CTRL_1_3 0x44 //DIMM1 Channel B
-#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_1_1 0x45
-#endif
-
-#define DRAM_PHASE_RECOVERY_CTRL_0 0x50
-#define DPRC_PhRecFineDlyByte0_SHIFT 0
-#define DDWTC_PhRecFineDlyByte0_MASK 0x1f
-#define DDWTC_PhRecGrossDlyByte0_SHIFT 5
-#define DDWTC_PhRecGrossDlyByte0_MASK 0x3
-#define DDWTC_PhRecFineDlyByte1_SHIFT 8
-#define DDWTC_PhRecGrossDlyByte1_SHIFT 13
-#define DDWTC_PhRecFineDlyByte2_SHIFT 16
-#define DDWTC_PhRecGrossDlyByte2_SHIFT 21
-#define DDWTC_PhRecFineDlyByte3_SHIFT 24
-#define DDWTC_PhRecGrossDlyByte3_SHIFT 29
-
-#define DRAM_PHASE_RECOVERY_CTRL_1 0x51
-#define DPRC_PhRecFineDlyByte4_SHIFT 0
-#define DDWTC_PhRecGrossDlyByte4_SHIFT 5
-#define DDWTC_PhRecFineDlyByte5_SHIFT 8
-#define DDWTC_PhRecGrossDlyByte5_SHIFT 13
-#define DDWTC_PhRecFineDlyByte6_SHIFT 16
-#define DDWTC_PhRecGrossDlyByte6_SHIFT 21
-#define DDWTC_PhRecFineDlyByte7_SHIFT 24
-#define DDWTC_PhRecGrossDlyByte7_SHIFT 29
-
-#define DRAM_ECC_PHASE_RECOVERY_CTRL 0x52
-#define DEPRC_PhRecEccDlyByte0_SHIFT 0
-#define DEPRC_PhRecEccGrossDlyByte0_SHIFT 5
-
-#define DRAM_WRITE_LEVEL_ERROR 0x53 /* read only */
-#define DWLE_WrLvErr_SHIFT 0
-#define DWLE_WrLvErr_MASK 0xff
-
-#define DRAM_CTRL_MISC 0xa0
-#define DCM_MemCleared (1<<0) /* RD == F2x110 [MemCleared] */
-#define DCM_DramEnabled (1<<9) /* RD == F2x110 [DramEnabled] */
-
-#define NB_TIME_STAMP_COUNT_LOW 0xb0
-#define TscLow_SHIFT 0
-#define TscLow_MASK 0xffffffff
-
-#define NB_TIME_STAMP_COUNT_HIGH 0xb4
-#define TscHigh_SHIFT 0
-#define TscHigh_Mask 0xff
-
-#define DCT_DEBUG_CTRL 0xf0 /* 0xf0 for DCT0, 0x1f0 is for DCT1*/
-#define DDC_DllAdjust_SHIFT 0
-#define DDC_DllAdjust_MASK 0xff
-#define DDC_DllSlower (1<<8)
-#define DDC_DllFaster (1<<9)
-#define DDC_WrtDqsAdjust_SHIFT 16
-#define DDC_WrtDqsAdjust_MASK 0x7
-#define DDC_WrtDqsAdjustEn (1<<19)
-
-#define DRAM_CTRL_SEL_LOW 0x110
-#define DCSL_DctSelHiRngEn (1<<0)
-#define DCSL_DctSelHi (1<<1)
-#define DCSL_DctSelIntLvEn (1<<2)
-#define DCSL_MemClrInit (1<<3) /* WR only */
-#define DCSL_DctGangEn (1<<4)
-#define DCSL_DctDataIntLv (1<<5)
-#define DCSL_DctSelIntLvAddr_SHIFT
-#define DCSL_DctSelIntLvAddr_MASK 3
-#define DCSL_DramEnable (1<<8) /* RD only */
-#define DCSL_MemClrBusy (1<<9) /* RD only */
-#define DCSL_MemCleared (1<<10) /* RD only */
-#define DCSL_DctSelBaseAddr_47_27_SHIFT 11
-#define DCSL_DctSelBaseAddr_47_27_MASK 0x1fffff
-
-#define DRAM_CTRL_SEL_HIGH 0x114
-#define DCSH_DctSelBaseOffset_47_26_SHIFT 10
-#define DCSH_DctSelBaseOffset_47_26_MASK 0x3fffff
-
-#define MEM_CTRL_CONF_LOW 0x118
-#define MCCL_MctPriCpuRd (1<<0)
-#define MCCL_MctPriCpuWr (1<<1)
-#define MCCL_MctPriIsocRd_SHIFT 4
-#define MCCL_MctPriIsoc_MASK 0x3
-#define MCCL_MctPriIsocWr_SHIFT 6
-#define MCCL_MctPriIsocWe_MASK 0x3
-#define MCCL_MctPriDefault_SHIFT 8
-#define MCCL_MctPriDefault_MASK 0x3
-#define MCCL_MctPriWr_SHIFT 10
-#define MCCL_MctPriWr_MASK 0x3
-#define MCCL_MctPriIsoc_SHIFT 12
-#define MCCL_MctPriIsoc_MASK 0x3
-#define MCCL_MctPriTrace_SHIFT 14
-#define MCCL_MctPriTrace_MASK 0x3
-#define MCCL_MctPriScrub_SHIFT 16
-#define MCCL_MctPriScrub_MASK 0x3
-#define MCCL_McqMedPriByPassMax_SHIFT 20
-#define MCCL_McqMedPriByPassMax_MASK 0x7
-#define MCCL_McqHiPriByPassMax_SHIFT 24
-#define MCCL_McqHiPriByPassMax_MASK 0x7
-#define MCCL_MctVarPriCntLmt_SHIFT 28
-#define MCCL_MctVarPriCntLmt_MASK 0x7
-
-#define MEM_CTRL_CONF_HIGH 0x11c
-#define MCCH_DctWrLimit_SHIFT 0
-#define MCCH_DctWrLimit_MASK 0x3
-#define MCCH_MctWrLimit_SHIFT 2
-#define MCCH_MctWrLimit_MASK 0x1f
-#define MCCH_MctPrefReqLimit_SHIFT 7
-#define MCCH_MctPrefReqLimit_MASK 0x1f
-#define MCCH_PrefCpuDis (1<<12)
-#define MCCH_PrefIoDis (1<<13)
-#define MCCH_PrefIoFixStrideEn (1<<14)
-#define MCCH_PrefFixStrideEn (1<<15)
-#define MCCH_PrefFixDist_SHIFT 16
-#define MCCH_PrefFixDist_MASK 0x3
-#define MCCH_PrefConfSat_SHIFT 18
-#define MCCH_PrefConfSat_MASK 0x3
-#define MCCH_PrefOneConf_SHIFT 20
-#define MCCH_PrefOneConf_MASK 0x3
-#define MCCH_PrefTwoConf_SHIFT 22
-#define MCCH_PrefTwoConf_MASK 0x7
-#define MCCH_PrefThreeConf_SHIFT 25
-#define MCCH_prefThreeConf_MASK 0x7
-#define MCCH_PrefDramTrainMode (1<<28)
-#define MCCH_FlushWrOnStpGnt (1<<29)
-#define MCCH_FlushWr (1<<30)
-#define MCCH_MctScrubEn (1<<31)
-
-
-/* Function 3 */
-#define MCA_NB_CONTROL 0x40
-#define MNCT_CorrEccEn (1<<0)
-#define MNCT_UnCorrEccEn (1<<1)
-#define MNCT_CrcErr0En (1<<2) /* Link 0 */
-#define MNCT_CrcErr1En (1<<3)
-#define MNCT_CrcErr2En (1<<4)
-#define MBCT_SyncPkt0En (1<<5) /* Link 0 */
-#define MBCT_SyncPkt1En (1<<6)
-#define MBCT_SyncPkt2En (1<<7)
-#define MBCT_MstrAbrtEn (1<<8)
-#define MBCT_TgtAbrtEn (1<<9)
-#define MBCT_GartTblEkEn (1<<10)
-#define MBCT_AtomicRMWEn (1<<11)
-#define MBCT_WdogTmrRptEn (1<<12)
-#define MBCT_DevErrEn (1<<13)
-#define MBCT_L3ArrayCorEn (1<<14)
-#define MBCT_L3ArrayUncEn (1<<15)
-#define MBCT_HtProtEn (1<<16)
-#define MBCT_HtDataEn (1<<17)
-#define MBCT_DramParEn (1<<18)
-#define MBCT_RtryHt0En (1<<19) /* Link 0 */
-#define MBCT_RtryHt1En (1<<20)
-#define MBCT_RtryHt2En (1<<21)
-#define MBCT_RtryHt3En (1<<22)
-#define MBCT_CrcErr3En (1<<23) /* Link 3*/
-#define MBCT_SyncPkt3En (1<<24) /* Link 4 */
-#define MBCT_McaUsPwDatErrEn (1<<25)
-#define MBCT_NbArrayParEn (1<<26)
-#define MBCT_TblWlkDatErrEn (1<<27)
-#define MBCT_FbDimmCorErrEn (1<<28)
-#define MBCT_FbDimmUnCorErrEn (1<<29)
-
-
-
-#define MCA_NB_CONFIG 0x44
-#define MNC_CpuRdDatErrEn (1<<1)
-#define MNC_SyncOnUcEccEn (1<<2)
-#define MNC_SynvPktGenDis (1<<3)
-#define MNC_SyncPktPropDis (1<<4)
-#define MNC_IoMstAbortDis (1<<5)
-#define MNC_CpuErrDis (1<<6)
-#define MNC_IoErrDis (1<<7)
-#define MNC_WdogTmrDis (1<<8)
-#define MNC_WdogTmrCntSel_2_0_SHIFT 9 /* 3 is ar f3x180 */
-#define MNC_WdogTmrCntSel_2_0_MASK 0x3
-#define MNC_WdogTmrBaseSel_SHIFT 12
-#define MNC_WdogTmrBaseSel_MASK 0x3
-#define MNC_LdtLinkSel_SHIFT 14
-#define MNC_LdtLinkSel_MASK 0x3
-#define MNC_GenCrcErrByte0 (1<<16)
-#define MNC_GenCrcErrByte1 (1<<17)
-#define MNC_SubLinkSel_SHIFT 18
-#define MNC_SubLinkSel_MASK 0x3
-#define MNC_SyncOnWdogEn (1<<20)
-#define MNC_SyncOnAnyErrEn (1<<21)
-#define MNC_DramEccEn (1<<22)
-#define MNC_ChipKillEccEn (1<<23)
-#define MNC_IoRdDatErrEn (1<<24)
-#define MNC_DisPciCfgCpuErrRsp (1<<25)
-#define MNC_CorrMcaExcEn (1<<26)
-#define MNC_NbMcaToMstCpuEn (1<<27)
-#define MNC_DisTgtAbtCpuErrRsp (1<<28)
-#define MNC_DisMstAbtCpuErrRsp (1<<29)
-#define MNC_SyncOnDramAdrParErrEn (1<<30)
-#define MNC_NbMcaLogEn (1<<31)
-
-#define MCA_NB_STATUS_LOW 0x48
-#define MNSL_ErrorCode_SHIFT 0
-#define MNSL_ErrorCode_MASK 0xffff
-#define MNSL_ErrorCodeExt_SHIFT 16
-#define MNSL_ErrorCodeExt_MASK 0x1f
-#define MNSL_Syndrome_15_8_SHIFT 24
-#define MNSL_Syndrome_15_8_MASK 0xff
-
-#define MCA_NB_STATUS_HIGH 0x4c
-#define MNSH_ErrCPU_SHIFT 0
-#define MNSH_ErrCPU_MASK 0xf
-#define MNSH_LDTLink_SHIFT 4
-#define MNSH_LDTLink_MASK 0xf
-#define MNSH_ErrScrub (1<<8)
-#define MNSH_SubLink (1<<9)
-#define MNSH_McaStatusSubCache_SHIFT 10
-#define MNSH_McaStatusSubCache_MASK 0x3
-#define MNSH_Deffered (1<<12)
-#define MNSH_UnCorrECC (1<<13)
-#define MNSH_CorrECC (1<<14)
-#define MNSH_Syndrome_7_0_SHIFT 15
-#define MNSH_Syndrome_7_0_MASK 0xff
-#define MNSH_PCC (1<<25)
-#define MNSH_ErrAddrVal (1<<26)
-#define MNSH_ErrMiscVal (1<<27)
-#define MNSH_ErrEn (1<<28)
-#define MNSH_ErrUnCorr (1<<29)
-#define MNSH_ErrOver (1<<30)
-#define MNSH_ErrValid (1<<31)
-
-#define MCA_NB_ADDR_LOW 0x50
-#define MNAL_ErrAddr_31_1_SHIFT 1
-#define MNAL_ErrAddr_31_1_MASK 0x7fffffff
-
-#define MCA_NB_ADDR_HIGH 0x54
-#define MNAL_ErrAddr_47_32_SHIFT 0
-#define MNAL_ErrAddr_47_32_MASK 0xffff
-
-#define DRAM_SCRUB_RATE_CTRL 0x58
-#define SCRUB_NONE 0
-#define SCRUB_40ns 1
-#define SCRUB_80ns 2
-#define SCRUB_160ns 3
-#define SCRUB_320ns 4
-#define SCRUB_640ns 5
-#define SCRUB_1_28us 6
-#define SCRUB_2_56us 7
-#define SCRUB_5_12us 8
-#define SCRUB_10_2us 9
-#define SCRUB_20_5us 0xa
-#define SCRUB_41_0us 0xb
-#define SCRUB_81_9us 0xc
-#define SCRUB_163_8us 0xd
-#define SCRUB_327_7us 0xe
-#define SCRUB_655_4us 0xf
-#define SCRUB_1_31ms 0x10
-#define SCRUB_2_62ms 0x11
-#define SCRUB_5_24ms 0x12
-#define SCRUB_10_49ms 0x13
-#define SCRUB_20_97ms 0x14
-#define SCRUB_42ms 0x15
-#define SCRUB_84ms 0x16
-#define DSRC_DramScrub_SHFIT 0
-#define DSRC_DramScrub_MASK 0x1f
-#define DSRC_L2Scrub_SHIFT 8
-#define DSRC_L2Scrub_MASK 0x1f
-#define DSRC_DcacheScrub_SHIFT 16
-#define DSRC_DcacheScrub_MASK 0x1f
-#define DSRC_L3Scrub_SHIFT 24
-#define DSRC_L3Scrub_MASK 0x1f
-
-#define DRAM_SCRUB_ADDR_LOW 0x5C
-#define DSAL_ScrubReDirEn (1<<0)
-#define DSAL_ScrubAddrLo_SHIFT 6
-#define DSAL_ScrubAddrLo_MASK 0x3ffffff
-
-#define DRAM_SCRUB_ADDR_HIGH 0x60
-#define DSAH_ScrubAddrHi_SHIFT 0
-#define DSAH_ScrubAddrHi_MASK 0xffff
-
-#define HW_THERMAL_CTRL 0x64
-
-#define SW_THERMAL_CTRL 0x68
-
-#define DATA_BUF_CNT 0x6c
-
-#define SRI_XBAR_CMD_BUF_CNT 0x70
-
-#define XBAR_SRI_CMD_BUF_CNT 0x74
-
-#define MCT_XBAR_CMD_BUF_CNT 0x78
-
-#define ACPI_PWR_STATE_CTRL 0x80 /* till 0x84 */
-
-#define NB_CONFIG_LOW 0x88
-#define NB_CONFIG_HIGH 0x8c
-
-#define GART_APERTURE_CTRL 0x90
-
-#define GART_APERTURE_BASE 0x94
-
-#define GART_TBL_BASE 0x98
-
-#define GART_CACHE_CTRL 0x9c
-
-#define PWR_CTRL_MISC 0xa0
-
-#define RPT_TEMP_CTRL 0xa4
-
-#define ON_LINE_SPARE_CTRL 0xb0
-
-#define SBI_P_STATE_LIMIT 0xc4
-
-#define CLK_PWR_TIMING_CTRL0 0xd4
-#define CLK_PWR_TIMING_CTRL1 0xd8
-#define CLK_PWR_TIMING_CTRL2 0xdc
-
-#define THERMTRIP_STATUS 0xE4
-
-
-#define NORTHBRIDGE_CAP 0xE8
-#define NBCAP_TwoChanDRAMcap (1 << 0)
-#define NBCAP_DualNodeMPcap (1 << 1)
-#define NBCAP_EightNodeMPcap (1 << 2)
-#define NBCAP_ECCcap (1 << 3)
-#define NBCAP_ChipkillECCcap (1 << 4)
-#define NBCAP_DdrMaxRate_SHIFT 5
-#define NBCAP_DdrMaxRate_MASK 7
-#define NBCAP_DdrMaxRate_400 7
-#define NBCAP_DdrMaxRate_533 6
-#define NBCAP_DdrMaxRate_667 5
-#define NBCAP_DdrMaxRate_800 4
-#define NBCAP_DdrMaxRate_1067 3
-#define NBCAP_DdrMaxRate_1333 2
-#define NBCAP_DdrMaxRate_1600 1
-#define NBCAP_DdrMaxRate_3_2G 6
-#define NBCAP_DdrMaxRate_4_0G 5
-#define NBCAP_DdrMaxRate_4_8G 4
-#define NBCAP_DdrMaxRate_6_4G 3
-#define NBCAP_DdrMaxRate_8_0G 2
-#define NBCAP_DdrMaxRate_9_6G 1
-#define NBCAP_Mem_ctrl_cap (1 << 8)
-#define MBCAP_SVMCap (1<<9)
-#define NBCAP_HtcCap (1<<10)
-#define NBCAP_CmpCap_SHIFT 12
-#define NBCAP_CmpCap_MASK 3
-#define NBCAP_MpCap_SHIFT 16
-#define NBCAP_MpCap_MASK 7
-#define NBCAP_MpCap_1N 7
-#define NBCAP_MpCap_2N 6
-#define NBCAP_MpCap_4N 5
-#define NBCAP_MpCap_8N 4
-#define NBCAP_MpCap_32N 0
-#define NBCAP_UnGangEn_SHIFT 20
-#define NBCAP_UnGangEn_MASK 0xf
-#define NBCAP_L3Cap (1<<25)
-#define NBCAP_HtAcCap (1<<26)
-
-/* 04/04/2006 18:00 */
-
-#define EXT_NB_MCA_CTRL 0x180
-
-#define NB_EXT_CONF 0x188
-#define DOWNCORE_CTRL 0x190
-#define DWNCC_DisCore_SHIFT 0
-#define DWNCC_DisCore_MASK 0xf
-
-/* Function 5 for FBDIMM */
-#define FBD_DRAM_TIMING_LOW
-
-#define LinkConnected (1 << 0)
-#define InitComplete (1 << 1)
-#define NonCoherent (1 << 2)
-#define ConnectionPending (1 << 4)
-
-// Use the LAPIC timer count register to hold each core's init status
-// Format: byte 0 - state
-// byte 1 - fid_max
-// byte 2 - nb_cof_vid_update
-// byte 3 - apic id
-
-#define LAPIC_MSG_REG 0x380
-#define F10_APSTATE_STARTED 0x13 // start of AP execution
-#define F10_APSTATE_STOPPED 0x14 // allow AP to stop
-#define F10_APSTATE_RESET 0x01 // waiting for warm reset
-
-#include "amdfam10_nums.h"
-
-#ifdef __PRE_RAM__
-#if NODE_NUMS==64
- #define NODE_PCI(x, fn) ((x<32)?(PCI_DEV(CONFIG_CBB,(CONFIG_CDB+x),fn)):(PCI_DEV((CONFIG_CBB-1),(CONFIG_CDB+x-32),fn)))
-#else
- #define NODE_PCI(x, fn) PCI_DEV(CONFIG_CBB,(CONFIG_CDB+x),fn)
-#endif
-#endif
-
-#include "raminit.h"
-
-#if CONFIG_AMDMCT == 0
-
-//struct definitions
-
-struct dimm_size {
- u8 per_rank; // it is rows + col + bank_lines + data lines */
- u8 rows;
- u8 col;
- u8 bank; //1, 2, 3 mean 2, 4, 8
- u8 rank;
-} __attribute__((packed));
-
-struct mem_info { // pernode
- u32 dimm_mask;
- struct dimm_size sz[DIMM_SOCKETS*2]; // for ungang support
- u32 x4_mask;
- u32 x16_mask;
- u32 single_rank_mask;
- u32 page_1k_mask;
-// u32 ecc_mask;
-// u32 registered_mask;
- u8 is_opteron;
- u8 is_registered; //don't support mixing on the same channel or between channel
- u8 is_ecc; //don't support mixing on the same channel or between channel
- u8 is_Width128;
- u8 memclk_set; // we need to use this to retrieve the mem param, all dimms need to work at same freq for one node
- u8 is_cs_interleaved[2]; //cs
- u8 rsv[1];
-} __attribute__((packed));
-#else
- #if (CONFIG_DIMM_SUPPORT & 0x000F)==0x0005 /* AMD_FAM10_DDR3 */
- #include "../amdmct/mct_ddr3/mct_d.h"
- #else
- #include "../amdmct/mct/mct_d.h"
- #endif
-#endif
-
-struct link_pair_t {
- device_t udev;
- u32 upos;
- u32 uoffs;
- device_t dev;
- u32 pos;
- u32 offs;
- u8 host;
- u8 nodeid;
- u8 linkn;
- u8 rsv;
-} __attribute__((packed));
-
-struct nodes_info_t {
- u32 nodes_in_group; // could be 2, 3, 4, 5, 6, 7, 8
- u32 groups_in_plane; // could be 1, 2, 3, 4, 5
- u32 planes; // could be 1, 2
- u32 up_planes; // down planes will be [up_planes, planes)
-} __attribute__((packed));
-
-/* be careful with the alignment of sysinfo, bacause sysinfo may be shared by coreboot_car and coreboot_ram stage. and coreboot_ram may be running at 64bit later.*/
-#if CONFIG_AMDMCT == 0
-
-//#define MEM_CS_COPY 1
-#define MEM_CS_COPY NODE_NUMS
-
-#if CONFIG_MEM_TRAIN_SEQ == 0
- #define DQS_DELAY_COPY NODE_NUMS
-#else
-// #define DQS_DELAY_COPY 1
- #define DQS_DELAY_COPY NODE_NUMS
-#endif
-#endif
-
-
-struct sys_info {
- int32_t needs_reset;
-
- u8 ln[NODE_NUMS*NODE_NUMS];// [0, 3] link n, [4, 7] will be hop num
- u16 ln_tn[NODE_NUMS*8]; // for 0x0zzz: bit [0,7] target node num, bit[8,11] respone link from target num; 0x80ff mean not inited, 0x4yyy mean non coherent and yyy is link pair index
- struct nodes_info_t nodes_info;
- u32 nodes;
-
- u8 host_link_freq[NODE_NUMS*8]; // record freq for every link from cpu, 0x0f means don't need to touch it
- u16 host_link_freq_cap[NODE_NUMS*8]; //cap
-
- u32 segbit;
- u32 sbdn;
- u32 sblk;
- u32 sbbusn;
-
- u32 ht_c_num;
- u32 ht_c_conf_bus[HC_NUMS]; // 4-->32
-
- struct link_pair_t link_pair[HC_NUMS*4];// enough? only in_conherent, 32 chain and every chain have 4 HT device
- u32 link_pair_num;
-
- struct mem_controller ctrl[NODE_NUMS];
-
-#if CONFIG_AMDMCT
-// sMCTStruct MCTData;
-// sDCTStruct *DCTNodeData[NODE_NUMS];
-// sDCTStruct DCTNodeData_a[NODE_NUMS];
- struct MCTStatStruc MCTstat;
- struct DCTStatStruc DCTstatA[NODE_NUMS];
-#else
-
- u8 ctrl_present[NODE_NUMS];
- struct mem_info meminfo[NODE_NUMS];
- u8 mem_trained[NODE_NUMS]; //0: no dimm, 1: trained, 0x80: not started, 0x81: recv1 fail, 0x82: Pos Fail, 0x83:recv2 fail
- u32 tom_m;
- u32 tom2_m;
-
- //if we are getting tight of global space, may need to squesh following to one copy
- u32 mem_base[MEM_CS_COPY][2]; // two dct
- u32 cs_base[MEM_CS_COPY][2][8]; //8 cs_idx
- u32 hole_startk; // 0 mean hole
-
- u8 dqs_delay_a[DQS_DELAY_COPY*2*4*2*9]; //8 node, channel 2, dimm 4, direction 2 , bytelane *9
- u8 dqs_rcvr_dly_a[DQS_DELAY_COPY*2*4*9]; //8 node, channel 2, dimm 4, bytelane *9
- u8 dqs_rcvr_dly_a_1[9]; //8 node, channel 2, dimm 4, bytelane *9
-#endif
-
-} __attribute__((packed));
-
-#ifndef __PRE_RAM__
-device_t get_node_pci(u32 nodeid, u32 fn);
-#endif
-
-#if CONFIG_AMDMCT == 0
-
-#ifdef __PRE_RAM__
-static void soft_reset(void);
-#endif
-static void wait_all_core0_mem_trained(struct sys_info *sysinfo)
-{
- int i;
- u32 mask_lo = 0;
- u32 mask_hi = 0;
- unsigned needs_reset = 0;
-
- if(sysinfo->nodes == 1) return; // in case only one cpu installed
- for(i=1; i<sysinfo->nodes; i++) {
- /* Skip everything if I don't have any memory on this controller */
- if(sysinfo->mem_trained[i]==0x00) continue;
-
- if(i<32) {
- mask_lo |= (1<<i);
- } else {
- mask_hi |= (1<<(i-32));
- }
- }
-
- i = 1;
- while(1) {
- if(i<32) {
- if(mask_lo & (1<<i)) {
- if(sysinfo->mem_trained[i] != 0x80) {
- mask_lo &= ~(1<<i);
- }
- }
- } else {
- if(mask_hi & (1<<(i-32))) {
- if(sysinfo->mem_trained[i] != 0x80) {
- mask_hi &= ~(1<<(i-32));
- }
- }
- }
-
- if((!mask_lo) && (!mask_hi)) break;
-
- i++;
- i%=sysinfo->nodes;
- }
-
- for(i=0; i<sysinfo->nodes; i++) {
-#ifdef __PRE_RAM__
- print_debug("mem_trained["); print_debug_hex8(i); print_debug("]="); print_debug_hex8(sysinfo->mem_trained[i]); print_debug("\n");
-#else
- printk(BIOS_DEBUG, "mem_trained[%02x]=%02x\n", i, sysinfo->mem_trained[i]);
-#endif
- switch(sysinfo->mem_trained[i]) {
- case 0: //don't need train
- case 1: //trained
- break;
- case 0x81: //recv1: fail
- case 0x82: //Pos :fail
- case 0x83: //recv2: fail
- needs_reset = 1;
- break;
- }
- }
- if(needs_reset) {
-#ifdef __PRE_RAM__
- print_debug("mem trained failed\n");
- soft_reset();
-#else
- printk(BIOS_DEBUG, "mem trained failed\n");
- hard_reset();
-#endif
- }
-
-}
-
-#endif
-
-#ifdef __PRE_RAM__
-void showallroutes(int level, device_t dev);
-
-void setup_resource_map_offset(const u32 *register_values, u32 max, u32
- offset_pci_dev, u32 offset_io_base);
-
-void setup_resource_map_x_offset(const u32 *register_values, u32 max, u32
- offset_pci_dev, u32 offset_io_base);
-
-void setup_resource_map_x(const u32 *register_values, u32 max);
-
-/* reset_test.c */
-u32 cpu_init_detected(u8 nodeid);
-u32 bios_reset_detected(void);
-u32 cold_reset_detected(void);
-u32 other_reset_detected(void);
-u32 get_sblk(void);
-u8 get_sbbusn(u8 sblk);
-#endif
-
-#endif /* AMDFAM10_H */
+/* + * This file is part of the coreboot project. + * + * Copyright (C) 2007 Advanced Micro Devices, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AMDFAM10_H + +#define AMDFAM10_H +/* Definitions of various FAM10 registers */ +/* Function 0 */ +#define HT_TRANSACTION_CONTROL 0x68 +#define HTTC_DIS_RD_B_P (1 << 0) +#define HTTC_DIS_RD_DW_P (1 << 1) +#define HTTC_DIS_WR_B_P (1 << 2) +#define HTTC_DIS_WR_DW_P (1 << 3) +#define HTTC_DIS_MTS (1 << 4) +#define HTTC_CPU1_EN (1 << 5) +#define HTTC_CPU_REQ_PASS_PW (1 << 6) +#define HTTC_CPU_RD_RSP_PASS_PW (1 << 7) +#define HTTC_DIS_P_MEM_C (1 << 8) +#define HTTC_DIS_RMT_MEM_C (1 << 9) +#define HTTC_DIS_FILL_P (1 << 10) +#define HTTC_RSP_PASS_PW (1 << 11) +#define HTTC_BUF_REL_PRI_SHIFT 13 +#define HTTC_BUF_REL_PRI_MASK 3 +#define HTTC_BUF_REL_PRI_64 0 +#define HTTC_BUF_REL_PRI_16 1 +#define HTTC_BUF_REL_PRI_8 2 +#define HTTC_BUF_REL_PRI_2 3 +#define HTTC_LIMIT_CLDT_CFG (1 << 15) +#define HTTC_LINT_EN (1 << 16) +#define HTTC_APIC_EXT_BRD_CST (1 << 17) +#define HTTC_APIC_EXT_ID (1 << 18) +#define HTTC_APIC_EXT_SPUR (1 << 19) +#define HTTC_SEQ_ID_SRC_NODE_EN (1 << 20) +#define HTTC_DS_NP_REQ_LIMIT_SHIFT 21 +#define HTTC_DS_NP_REQ_LIMIT_MASK 3 +#define HTTC_DS_NP_REQ_LIMIT_NONE 0 +#define HTTC_DS_NP_REQ_LIMIT_1 1 +#define HTTC_DS_NP_REQ_LIMIT_4 2 +#define HTTC_DS_NP_REQ_LIMIT_8 3 + + +/* Function 1 */ +#define PCI_IO_BASE0 0xc0 +#define PCI_IO_BASE1 0xc8 +#define PCI_IO_BASE2 0xd0 +#define PCI_IO_BASE3 0xd8 +#define PCI_IO_BASE_VGA_EN (1 << 4) +#define PCI_IO_BASE_NO_ISA (1 << 5) + +/* Function 2 */ +// 0x1xx is for DCT1 +#define DRAM_CSBASE 0x40 +#define DRAM_CSMASK 0x60 +#define DRAM_BANK_ADDR_MAP 0x80 + +#define DRAM_CTRL 0x78 +#define DC_RdPtrInit_SHIFT 0 +#define DC_RdPrtInit_MASK 0xf +#define DC_Twrrd3_2_SHIFT 8 /*DDR3 */ +#define DC_Twrrd3_2_MASK 3 +#define DC_Twrwr3_2_SHIFT 10 /*DDR3 */ +#define DC_Twrwr3_2_MASK 3 +#define DC_Trdrd3_2_SHIFT 12 /*DDR3 */ +#define DC_Trdrd3_2_MASK 3 +#define DC_AltVidC3MemClkTriEn (1<<16) +#define DC_DqsRcvEnTrain (1<<18) +#define DC_MaxRdLatency_SHIFT 22 +#define DC_MaxRdLatency_MASK 0x3ff + +#define DRAM_INIT 0x7c +#define DI_MrsAddress_SHIFT 0 +#define DI_MrsAddress_MASK 0xffff +#define DI_MrsBank_SHIFT 16 +#define DI_MrsBank_MASK 7 +#define DI_MrsChipSel_SHIFT 20 +#define DI_MrsChipSel_MASK 7 +#define DI_SendRchgAll (1<<24) +#define DI_SendAutoRefresh (1<<25) +#define DI_SendMrsCmd (1<<26) +#define DI_DeassertMemRstX (1<<27) +#define DI_AssertCke (1<<28) +#define DI_SendZQCmd (1<<29) /*DDR3 */ +#define DI_EnMrsCmd (1<<30) +#define DI_EnDramInit (1<<31) + +#define DRAM_MRS 0x84 +#define DM_BurstCtrl_SHIFT 0 +#define DM_BurstCtrl_MASK 3 +#define DM_DrvImpCtrl_SHIFT 2 /* DDR3 */ +#define DM_DrvImpCtrl_MASK 3 +#define DM_Twr_SHIFT 4 /* DDR3 */ +#define DM_Twr_MASK 7 +#define DM_Twr_BASE 4 +#define DM_Twr_MIN 5 +#define DM_Twr_MAX 12 +#define DM_DramTerm_SHIFT 7 /*DDR3 */ +#define DM_DramTerm_MASK 7 +#define DM_DramTermDyn_SHIFT 10 /* DDR3 */ +#define DM_DramTermDyn_MASK 3 +#define DM_Ooff (1<<13) +#define DM_ASR (1<<18) +#define DM_SRT (1<<19) +#define DM_Tcwl_SHIFT 20 +#define DM_Tcwl_MASK 7 +#define DM_PchgPDModeSel (1<<23) /* DDR3 */ +#define DM_MPrLoc_SHIFT 24 /* DDR3 */ +#define DM_MPrLoc_MASK 3 +#define DM_MprEn (1<<26) /* DDR3 */ + +#define DRAM_TIMING_LOW 0x88 +#define DTL_TCL_SHIFT 0 +#define DTL_TCL_MASK 0xf +#define DTL_TCL_BASE 1 /* DDR3 =4 */ +#define DTL_TCL_MIN 3 /* DDR3 =4 */ +#define DTL_TCL_MAX 6 /* DDR3 =12 */ +#define DTL_TRCD_SHIFT 4 +#define DTL_TRCD_MASK 3 /* DDR3 =7 */ +#define DTL_TRCD_BASE 3 /* DDR3 =5 */ +#define DTL_TRCD_MIN 3 /* DDR3 =5 */ +#define DTL_TRCD_MAX 6 /* DDR3 =12 */ +#define DTL_TRP_SHIFT 8 /* DDR3 =7 */ +#define DTL_TRP_MASK 3 /* DDR3 =7 */ +#define DTL_TRP_BASE 3 /* DDR3 =5 */ +#define DTL_TRP_MIN 3 /* DDR3 =5 */ +#define DTL_TRP_MAX 6 /* DDR3 =12 */ +#define DTL_TRTP_SHIFT 11 /*DDR3 =10 */ +#define DTL_TRTP_MASK 1 /*DDR3 =3 */ +#define DTL_TRTP_BASE 2 /* DDR3 =4 */ +#define DTL_TRTP_MIN 2 /* 4 for 64 bytes*/ /* DDR3 =4 for 32bytes or 64bytes */ +#define DTL_TRTP_MAX 3 /* 5 for 64 bytes */ /* DDR3 =7 for 32Bytes or 64bytes */ +#define DTL_TRAS_SHIFT 12 +#define DTL_TRAS_MASK 0xf +#define DTL_TRAS_BASE 3 /* DDR3 =15 */ +#define DTL_TRAS_MIN 5 /* DDR3 =15 */ +#define DTL_TRAS_MAX 18 /*DDR3 =30 */ +#define DTL_TRC_SHIFT 16 +#define DTL_TRC_MASK 0xf /* DDR3 =0x1f */ +#define DTL_TRC_BASE 11 +#define DTL_TRC_MIN 11 +#define DTL_TRC_MAX 26 /* DDR3 =43 */ +#define DTL_TWR_SHIFT 20 /* only for DDR2, DDR3's is on DC */ +#define DTL_TWR_MASK 3 +#define DTL_TWR_BASE 3 +#define DTL_TWR_MIN 3 +#define DTL_TWR_MAX 6 +#define DTL_TRRD_SHIFT 22 +#define DTL_TRRD_MASK 3 +#define DTL_TRRD_BASE 2 /* DDR3 =4 */ +#define DTL_TRRD_MIN 2 /* DDR3 =4 */ +#define DTL_TRRD_MAX 5 /* DDR3 =7 */ +#define DTL_MemClkDis_SHIFT 24 /* Channel A */ +#define DTL_MemClkDis3 (1 << 26) +#define DTL_MemClkDis2 (1 << 27) +#define DTL_MemClkDis1 (1 << 28) +#define DTL_MemClkDis0 (1 << 29) +/* DTL_MemClkDis for m2 and s1g1 is different */ + +#define DRAM_TIMING_HIGH 0x8c +#define DTH_TRWTWB_SHIFT 0 +#define DTH_TRWTWB_MASK 3 +#define DTH_TRWTWB_BASE 3 /* DDR3 =4 */ +#define DTH_TRWTWB_MIN 3 /* DDR3 =5 */ +#define DTH_TRWTWB_MAX 10 /* DDR3 =11 */ +#define DTH_TRWTTO_SHIFT 4 +#define DTH_TRWTTO_MASK 7 +#define DTH_TRWTTO_BASE 2 /* DDR3 =3 */ +#define DTH_TRWTTO_MIN 2 /* DDR3 =3 */ +#define DTH_TRWTTO_MAX 9 /* DDR3 =10 */ +#define DTH_TWTR_SHIFT 8 +#define DTH_TWTR_MASK 3 +#define DTH_TWTR_BASE 0 /* DDR3 =4 */ +#define DTH_TWTR_MIN 1 /* DDR3 =4 */ +#define DTH_TWTR_MAX 3 /* DDR3 =7 */ +#define DTH_TWRRD_SHIFT 10 +#define DTH_TWRRD_MASK 3 /* For DDR3 3_2 is at 0x78 DC */ +#define DTH_TWRRD_BASE 0 /* DDR3 =0 */ +#define DTH_TWRRD_MIN 0 /* DDR3 =2 */ +#define DTH_TWRRD_MAX 3 /* DDR3 =12 */ +#define DTH_TWRWR_SHIFT 12 +#define DTH_TWRWR_MASK 3 /* For DDR3 3_2 is at 0x78 DC */ +#define DTH_TWRWR_BASE 1 +#define DTH_TWRWR_MIN 1 /* DDR3 =3 */ +#define DTH_TWRWR_MAX 3 /* DDR3 =12 */ +#define DTH_TRDRD_SHIFT 14 +#define DTH_TRDRD_MASK 3 /* For DDR3 3_2 is at 0x78 DC */ +#define DTH_TRDRD_BASE 2 +#define DTH_TRDRD_MIN 2 +#define DTH_TRDRD_MAX 5 /* DDR3 =10 */ +#define DTH_TREF_SHIFT 16 +#define DTH_TREF_MASK 3 +#define DTH_TREF_7_8_US 2 +#define DTH_TREF_3_9_US 3 +#define DTH_DisAutoRefresh (1<<18) +#define DTH_TRFC0_SHIFT 20 /* for Logical DIMM0 */ +#define DTH_TRFC_MASK 7 +#define DTH_TRFC_75_256M 0 +#define DTH_TRFC_105_512M 1 +#define DTH_TRFC_127_5_1G 2 +#define DTH_TRFC_195_2G 3 +#define DTH_TRFC_327_5_4G 4 +#if 0 +//DDR3 +#define DTH_TRFC_90_512M 1 +#define DTH_TRFC_110_5_1G 2 +#define DTH_TRFC_160_2G 3 +#define DTH_TRFC_300_4G 4 +#define DTH_TRFC_UNDEFINED_8G 5 +#endif +#define DTH_TRFC1_SHIFT 23 /*for Logical DIMM1 */ +#define DTH_TRFC2_SHIFT 26 /*for Logical DIMM2 */ +#define DTH_TRFC3_SHIFT 29 /*for Logical DIMM3 */ + +#define DRAM_CONFIG_LOW 0x90 +#define DCL_InitDram (1<<0) +#define DCL_ExitSelfRef (1<<1) +#define DCL_PllLockTime_SHIFT 2 +#define DCL_PllLockTime_MASK 3 +#define DCL_PllLockTime_15US 0 +#define DCL_PllLockTime_6US 1 +#define DCL_DramTerm_SHIFT 4 +#define DCL_DramTerm_MASK 3 +#define DCL_DramTerm_No 0 +#define DCL_DramTerm_75_OH 1 +#define DCL_DramTerm_150_OH 2 +#define DCL_DramTerm_50_OH 3 +#define DCL_DisDqsBar (1<<6) /* only for DDR2 */ +#define DCL_DramDrvWeak (1<<7) /* only for DDR2 */ +#define DCL_ParEn (1<<8) +#define DCL_SelfRefRateEn (1<<9) /* only for DDR2 */ +#define DCL_BurstLength32 (1<<10) /* only for DDR3 */ +#define DCL_Width128 (1<<11) +#define DCL_X4Dimm_SHIFT 12 +#define DCL_X4Dimm_MASK 0xf +#define DCL_UnBuffDimm (1<<16) +#define DCL_EnPhyDqsRcvEnTr (1<<18) +#define DCL_DimmEccEn (1<<19) +#define DCL_DynPageCloseEn (1<<20) +#define DCL_IdleCycInit_SHIFT 21 +#define DCL_IdleCycInit_MASK 3 +#define DCL_IdleCycInit_16CLK 0 +#define DCL_IdleCycInit_32CLK 1 +#define DCL_IdleCycInit_64CLK 2 +#define DCL_IdleCycInit_96CLK 3 +#define DCL_ForceAutoPchg (1<<23) + +#define DRAM_CONFIG_HIGH 0x94 +#define DCH_MemClkFreq_SHIFT 0 +#define DCH_MemClkFreq_MASK 7 +#define DCH_MemClkFreq_200MHz 0 /* DDR2 */ +#define DCH_MemClkFreq_266MHz 1 /* DDR2 */ +#define DCH_MemClkFreq_333MHz 2 /* DDR2 */ +#define DCH_MemClkFreq_400MHz 3 /* DDR2 and DDR 3*/ +#define DCH_MemClkFreq_533MHz 4 /* DDR 3 */ +#define DCH_MemClkFreq_667MHz 5 /* DDR 3 */ +#define DCH_MemClkFreq_800MHz 6 /* DDR 3 */ +#define DCH_MemClkFreqVal (1<<3) +#define DCH_Ddr3Mode (1<<8) +#define DCH_LegacyBiosMode (1<<9) +#define DCH_ZqcsInterval_SHIFT 10 +#define DCH_ZqcsInterval_MASK 3 +#define DCH_ZqcsInterval_DIS 0 +#define DCH_ZqcsInterval_64MS 1 +#define DCH_ZqcsInterval_128MS 2 +#define DCH_ZqcsInterval_256MS 3 +#define DCH_RDqsEn (1<<12) /* only for DDR2 */ +#define DCH_DisSimulRdWr (1<<13) +#define DCH_DisDramInterface (1<<14) +#define DCH_PowerDownEn (1<<15) +#define DCH_PowerDownMode_SHIFT 16 +#define DCH_PowerDownMode_MASK 1 +#define DCH_PowerDownMode_Channel_CKE 0 +#define DCH_PowerDownMode_ChipSelect_CKE 1 +#define DCH_FourRankSODimm (1<<17) +#define DCH_FourRankRDimm (1<<18) +#define DCH_SlowAccessMode (1<<20) +#define DCH_BankSwizzleMode (1<<22) +#define DCH_DcqBypassMax_SHIFT 24 +#define DCH_DcqBypassMax_MASK 0xf +#define DCH_DcqBypassMax_BASE 0 +#define DCH_DcqBypassMax_MIN 0 +#define DCH_DcqBypassMax_MAX 15 +#define DCH_FourActWindow_SHIFT 28 +#define DCH_FourActWindow_MASK 0xf +#define DCH_FourActWindow_BASE 7 /* DDR3 15 */ +#define DCH_FourActWindow_MIN 8 /* DDR3 16 */ +#define DCH_FourActWindow_MAX 20 /* DDR3 30 */ + + +// for 0x98 index and 0x9c data for DCT0 +// for 0x198 index and 0x19c data for DCT1 +// even at ganged mode, 0x198/0x19c will be used for channnel B + +#define DRAM_CTRL_ADDI_DATA_OFFSET 0x98 +#define DCAO_DctOffset_SHIFT 0 +#define DCAO_DctOffset_MASK 0x3fffffff +#define DCAO_DctAccessWrite (1<<30) +#define DCAO_DctAccessDone (1<<31) + +#define DRAM_CTRL_ADDI_DATA_PORT 0x9c + +#define DRAM_OUTPUT_DRV_COMP_CTRL 0x00 +#define DODCC_CkeDrvStren_SHIFT 0 +#define DODCC_CkeDrvStren_MASK 3 +#define DODCC_CkeDrvStren_1_0X 0 +#define DODCC_CkeDrvStren_1_25X 1 +#define DODCC_CkeDrvStren_1_5X 2 +#define DODCC_CkeDrvStren_2_0X 3 +#define DODCC_CsOdtDrvStren_SHIFT 4 +#define DODCC_CsOdtDrvStren_MASK 3 +#define DODCC_CsOdtDrvStren_1_0X 0 +#define DODCC_CsOdtDrvStren_1_25X 1 +#define DODCC_CsOdtDrvStren_1_5X 2 +#define DODCC_CsOdtDrvStren_2_0X 3 +#define DODCC_AddrCmdDrvStren_SHIFT 8 +#define DODCC_AddrCmdDrvStren_MASK 3 +#define DODCC_AddrCmdDrvStren_1_0X 0 +#define DODCC_AddrCmdDrvStren_1_25X 1 +#define DODCC_AddrCmdDrvStren_1_5X 2 +#define DODCC_AddrCmdDrvStren_2_0X 3 +#define DODCC_ClkDrvStren_SHIFT 12 +#define DODCC_ClkDrvStren_MASK 3 +#define DODCC_ClkDrvStren_0_75X 0 +#define DODCC_ClkDrvStren_1_0X 1 +#define DODCC_ClkDrvStren_1_25X 2 +#define DODCC_ClkDrvStren_1_5X 3 +#define DODCC_DataDrvStren_SHIFT 16 +#define DODCC_DataDrvStren_MASK 3 +#define DODCC_DataDrvStren_0_75X 0 +#define DODCC_DataDrvStren_1_0X 1 +#define DODCC_DataDrvStren_1_25X 2 +#define DODCC_DataDrvStren_1_5X 3 +#define DODCC_DqsDrvStren_SHIFT 20 +#define DODCC_DqsDrvStren_MASK 3 +#define DODCC_DqsDrvStren_0_75X 0 +#define DODCC_DqsDrvStren_1_0X 1 +#define DODCC_DqsDrvStren_1_25X 2 +#define DODCC_DqsDrvStren_1_5X 3 +#define DODCC_ProcOdt_SHIFT 28 +#define DODCC_ProcOdt_MASK 3 +#define DODCC_ProcOdt_300_OHMS 0 +#define DODCC_ProcOdt_150_OHMS 1 +#define DODCC_ProcOdt_75_OHMS 2 +#if 0 +//DDR3 +#define DODCC_ProcOdt_240_OHMS 0 +#define DODCC_ProcOdt_120_OHMS 1 +#define DODCC_ProcOdt_60_OHMS 2 +#endif + +/* + for DDR2 400, 533, 667, F2x[1,0]9C_x[02:01], [03], [06:05], [07] controll timing of all DIMMs + for DDR2 800, DDR3 800, 1067, 1333, 1600, F2x[1,0]9C_x[02:01], [03], [06:05], [07] controll timing of DIMM0 + F2x[1,0]9C_x[102:101], [103], [106:105], [107] controll timing of DIMM1 + So Socket F with Four Logical DIMM will only support DDR2 800 ? +*/ +/* there are index +100 ===> for DIMM1 +that are corresponding to 0x01, 0x02, 0x03, 0x05, 0x06, 0x07 +*/ +//02/15/2006 18:37 +#define DRAM_WRITE_DATA_TIMING_CTRL_LOW 0x01 +#define DWDTC_WrDatFineDlyByte0_SHIFT 0 +#define DWDTC_WrDatFineDlyByte_MASK 0x1f +#define DWDTC_WrDatFineDlyByte_BASE 0 +#define DWDTC_WrDatFineDlyByte_MIN 0 +#define DWDTC_WrDatFineDlyByte_MAX 31 // 1/64 MEMCLK +#define DWDTC_WrDatGrossDlyByte0_SHIFT 5 +#define DWDTC_WrDatGrossDlyByte_MASK 0x3 +#define DWDTC_WrDatGrossDlyByte_NO_DELAY 0 +#define DWDTC_WrDatGrossDlyByte_0_5_ 1 +#define DWDTC_WrDatGrossDlyByte_1 2 +#define DWDTC_WrDatFineDlyByte1_SHIFT 8 +#define DWDTC_WrDatGrossDlyByte1_SHIFT 13 +#define DWDTC_WrDatFineDlyByte2_SHIFT 16 +#define DWDTC_WrDatGrossDlyByte2_SHIFT 21 +#define DWDTC_WrDatFineDlyByte3_SHIFT 24 +#define DWDTC_WrDatGrossDlyByte3_SHIFT 29 + +#define DRAM_WRITE_DATA_TIMING_CTRL_HIGH 0x02 +#define DWDTC_WrDatFineDlyByte4_SHIFT 0 +#define DWDTC_WrDatGrossDlyByte4_SHIFT 5 +#define DWDTC_WrDatFineDlyByte5_SHIFT 8 +#define DWDTC_WrDatGrossDlyByte5_SHIFT 13 +#define DWDTC_WrDatFineDlyByte6_SHIFT 16 +#define DWDTC_WrDatGrossDlyByte6_SHIFT 21 +#define DWDTC_WrDatFineDlyByte7_SHIFT 24 +#define DWDTC_WrDatGrossDlyByte7_SHIFT 29 + +#define DRAM_WRITE_ECC_TIMING_CTRL 0x03 +#define DWETC_WrChkFinDly_SHIFT 0 +#define DWETC_WrChkGrossDly_SHIFT 5 + +#define DRAM_ADDR_CMD_TIMING_CTRL 0x04 +#define DACTC_CkeFineDelay_SHIFT 0 +#define DACTC_CkeFineDelay_MASK 0x1f +#define DACTC_CkeFineDelay_BASE 0 +#define DACTC_CkeFineDelay_MIN 0 +#define DACTC_CkeFineDelay_MAX 31 +#define DACTC_CkeSetup (1<<5) +#define DACTC_CsOdtFineDelay_SHIFT 8 +#define DACTC_CsOdtFineDelay_MASK 0x1f +#define DACTC_CsOdtFineDelay_BASE 0 +#define DACTC_CsOdtFineDelay_MIN 0 +#define DACTC_CsOdtFineDelay_MAX 31 +#define DACTC_CsOdtSetup (1<<13) +#define DACTC_AddrCmdFineDelay_SHIFT 16 +#define DACTC_AddrCmdFineDelay_MASK 0x1f +#define DACTC_AddrCmdFineDelay_BASE 0 +#define DACTC_AddrCmdFineDelay_MIN 0 +#define DACTC_AddrCmdFineDelay_MAX 31 +#define DACTC_AddrCmdSetup (1<<21) + +#define DRAM_READ_DQS_TIMING_CTRL_LOW 0x05 +#define DRDTC_RdDqsTimeByte0_SHIFT 0 +#define DRDTC_RdDqsTimeByte_MASK 0x3f +#define DRDTC_RdDqsTimeByte_BASE 0 +#define DRDTC_RdDqsTimeByte_MIN 0 +#define DRDTC_RdDqsTimeByte_MAX 63 // 1/128 MEMCLK +#define DRDTC_RdDqsTimeByte1_SHIFT 8 +#define DRDTC_RdDqsTimeByte2_SHIFT 16 +#define DRDTC_RdDqsTimeByte3_SHIFT 24 + +#define DRAM_READ_DQS_TIMING_CTRL_HIGH 0x06 +#define DRDTC_RdDqsTimeByte4_SHIFT 0 +#define DRDTC_RdDqsTimeByte5_SHIFT 8 +#define DRDTC_RdDqsTimeByte6_SHIFT 16 +#define DRDTC_RdDqsTimeByte7_SHIFT 24 + +#define DRAM_READ_DQS_ECC_TIMING_CTRL 0x07 +#define DRDETC_RdDqsTimeCheck_SHIFT 0 + +#define DRAM_PHY_CTRL 0x08 +#define DPC_WrtLvTrEn (1<<0) +#define DPC_WrtLvTrMode (1<<1) +#define DPC_TrNibbleSel (1<<2) +#define DPC_TrDimmSel_SHIFT 4 +#define DPC_TrDimmSel_MASK 3 /* 0-->dimm0, 1-->dimm1, 2--->dimm2, 3--->dimm3 */ +#define DPC_WrLvOdt_SHIFT 8 +#define DPC_WrLvOdt_MASK 0xf /* bit 0-->odt 0, ...*/ +#define DPC_WrLvODtEn (1<<12) +#define DPC_DqsRcvTrEn (1<<13) +#define DPC_DisAutoComp (1<<30) +#define DPC_AsyncCompUpdate (1<<31) + +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_0 0x10 //DIMM0 Channel A +#define DDRETC_DqsRcvEnFineDelayByte0_SHIFT 0 +#define DDRETC_DqsRcvEnFineDelayByte0_MASK 0x1f +#define DDRETC_DqsRcvEnGrossDelayByte0_SHIFT 5 +#define DDRETC_DqsRcvEnGrossDelayByte0_MASK 0x3 +#define DDRETC_DqsRcvEnFineDelayByte1_SHIFT 8 +#define DDRETC_DqsRcvEnGrossDelayByte1_SHIFT 13 +#define DDRETC_DqsRcvEnFineDelayByte2_SHIFT 16 +#define DDRETC_DqsRcvEnGrossDelayByte2_SHIFT 21 +#define DDRETC_DqsRcvEnFineDelayByte3_SHIFT 24 +#define DDRETC_DqsRcvEnGrossDelayByte3_SHIFT 29 + +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_1 0x11 //DIMM0 Channel A +#define DDRETC_DqsRcvEnFineDelayByte4_SHIFT 0 +#define DDRETC_DqsRcvEnGrossDelayByte4_SHIFT 5 +#define DDRETC_DqsRcvEnFineDelayByte5_SHIFT 8 +#define DDRETC_DqsRcvEnGrossDelayByte5_SHIFT 13 +#define DDRETC_DqsRcvEnFineDelayByte6_SHIFT 16 +#define DDRETC_DqsRcvEnGrossDelayByte6_SHIFT 21 +#define DDRETC_DqsRcvEnFineDelayByte7_SHIFT 24 +#define DDRETC_DqsRcvEnGrossDelayByte7_SHIFT 29 + +#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_0_0 0x12 +#define DDRETCE_WrChkFineDlyByte0_SHIFT 0 +#define DDRETCE_WrChkGrossDlyByte0_SHIFT 5 + +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_2 0x20 //DIMM0 channel B +#define DDRETC_DqsRcvEnFineDelayByte8_SHIFT 0 +#define DDRETC_DqsRcvEnGrossDelayByte8_SHIFT 5 +#define DDRETC_DqsRcvEnFineDelayByte9_SHIFT 8 +#define DDRETC_DqsRcvEnGrossDelayByte9_SHIFT 13 +#define DDRETC_DqsRcvEnFineDelayByte10_SHIFT 16 +#define DDRETC_DqsRcvEnGrossDelayByte10_SHIFT 21 +#define DDRETC_DqsRcvEnFineDelayByte11_SHIFT 24 +#define DDRETC_DqsRcvEnGrossDelayByte11_SHIFT 29 + +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_0_3 0x21 // DIMM0 Channel B +#define DDRETC_DqsRcvEnFineDelayByte12_SHIFT 0 +#define DDRETC_DqsRcvEnGrossDelayByte12_SHIFT 5 +#define DDRETC_DqsRcvEnFineDelayByte13_SHIFT 8 +#define DDRETC_DqsRcvEnGrossDelayByte13_SHIFT 13 +#define DDRETC_DqsRcvEnFineDelayByte14_SHIFT 16 +#define DDRETC_DqsRcvEnGrossDelayByte14_SHIFT 21 +#define DDRETC_DqsRcvEnFineDelayByte15_SHIFT 24 +#define DDRETC_DqsRcvEnGrossDelayByte15_SHIFT 29 + +#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_0_1 0x22 +#define DDRETCE_WrChkFineDlyByte1_SHIFT 0 +#define DDRETCE_WrChkGrossDlyByte1_SHIFT 5 + +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_0 0x13 //DIMM1 +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_1 0x14 +#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_1_0 0x15 +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_2 0x23 +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_1_3 0x24 +#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_1_1 0x25 + +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_0 0x16 // DIMM2 +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_1 0x17 +#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_2_0 0x18 +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_2 0x26 +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_2_3 0x27 +#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_2_1 0x28 + +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_0 0x19 // DIMM3 +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_1 0x1a +#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_3_0 0x1b +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_2 0x29 +#define DRAM_DQS_RECV_ENABLE_TIME_CTRL_3_3 0x2a +#define DRAM_DQS_RECV_ENABLE_TIMING_CTRL_ECC_3_1 0x2b + +/* 04.06.2006 19:12 */ + +#if 0 +//DDR3 +#define DRAM_DQS_WRITE_TIME_CTRL_0_0 0x30 //DIMM0 Channel A +#define DDWTC_WrDqsFineDlyByte0_SHIFT 0 +#define DDWTC_WrDqsFineDlyByte0_MASK 0x1f +#define DDWTC_WrDqsGrossDlyByte0_SHIFT 5 +#define DDWTC_WrDqsGrossDlyByte0_MASK 0x3 +#define DDWTC_WrDqsFineDlyByte1_SHIFT 8 +#define DDWTC_WrDqsGrossDlyByte1_SHIFT 13 +#define DDWTC_WrDqsFineDlyByte2_SHIFT 16 +#define DDWTC_WrDqsGrossDlyByte2_SHIFT 21 +#define DDWTC_WrDqsFineDlyByte3_SHIFT 24 +#define DDWTC_WrDqsGrossDlyByte3_SHIFT 29 + +#define DRAM_DQS_WRTIE_TIME_CTRL_0_1 0x31 //DIMM0 Channel A +#define DDWTC_WrDqsFineDlyByte4_SHIFT 0 +#define DDWTC_WrDqsGrossDlyByte4_SHIFT 5 +#define DDWTC_WrDqsFineDlyByte5_SHIFT 8 +#define DDWTC_WrDqsGrossDlyByte5_SHIFT 13 +#define DDWTC_WrDqsFineDlyByte6_SHIFT 16 +#define DDWTC_WrDqsGrossDlyByte6_SHIFT 21 +#define DDWTC_WrDqsFineDlyByte7_SHIFT 24 +#define DDWTC_WrDqsGrossDlyByte7_SHIFT 29 + +#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_0_0 0x32 +#define DDWTCE_WrDqsChkFineDlyByte0_SHIFT 0 +#define DDWTCE_WrDqsChkGrossDlyByte0_SHIFT 5 + +#define DRAM_DQS_WRITE_TIME_CTRL_0_2 0x40 //DIMM0 Channel B +#define DDWTC_WrDqsFineDlyByte8_SHIFT 0 +#define DDWTC_WrDqsGrossDlyByte8_SHIFT 5 +#define DDWTC_WrDqsFineDlyByte9_SHIFT 8 +#define DDWTC_WrDqsGrossDlyByte9_SHIFT 13 +#define DDWTC_WrDqsFineDlyByte10_SHIFT 16 +#define DDWTC_WrDqsGrossDlyByte10_SHIFT 21 +#define DDWTC_WrDqsFineDlyByte11_SHIFT 24 +#define DDWTC_WrDqsGrossDlyByte11_SHIFT 29 + +#define DRAM_DQS_WRTIE_TIME_CTRL_0_3 0x41 //DIMM0 Channel B +#define DDWTC_WrDqsFineDlyByte12_SHIFT 0 +#define DDWTC_WrDqsGrossDlyByte12_SHIFT 5 +#define DDWTC_WrDqsFineDlyByte13_SHIFT 8 +#define DDWTC_WrDqsGrossDlyByte13_SHIFT 13 +#define DDWTC_WrDqsFineDlyByte14_SHIFT 16 +#define DDWTC_WrDqsGrossDlyByte14_SHIFT 21 +#define DDWTC_WrDqsFineDlyByte15_SHIFT 24 +#define DDWTC_WrDqsGrossDlyByte15_SHIFT 29 + +#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_0_1 0x42 +#define DDWTCE_WrDqsChkFineDlyByte1_SHIFT 0 +#define DDWTCE_WrDqsChkGrossDlyByte1_SHIFT 5 + +#define DRAM_DQS_WRITE_TIME_CTRL_1_0 0x33 //DIMM1 Channel A +#define DRAM_DQS_WRTIE_TIME_CTRL_1_1 0x34 //DIMM1 Channel A +#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_1_0 0x35 +#define DRAM_DQS_WRITE_TIME_CTRL_1_2 0x43 //DIMM1 Channel B +#define DRAM_DQS_WRTIE_TIME_CTRL_1_3 0x44 //DIMM1 Channel B +#define DRAM_DQS_WRITE_TIMING_CTRL_ECC_1_1 0x45 +#endif + +#define DRAM_PHASE_RECOVERY_CTRL_0 0x50 +#define DPRC_PhRecFineDlyByte0_SHIFT 0 +#define DDWTC_PhRecFineDlyByte0_MASK 0x1f +#define DDWTC_PhRecGrossDlyByte0_SHIFT 5 +#define DDWTC_PhRecGrossDlyByte0_MASK 0x3 +#define DDWTC_PhRecFineDlyByte1_SHIFT 8 +#define DDWTC_PhRecGrossDlyByte1_SHIFT 13 +#define DDWTC_PhRecFineDlyByte2_SHIFT 16 +#define DDWTC_PhRecGrossDlyByte2_SHIFT 21 +#define DDWTC_PhRecFineDlyByte3_SHIFT 24 +#define DDWTC_PhRecGrossDlyByte3_SHIFT 29 + +#define DRAM_PHASE_RECOVERY_CTRL_1 0x51 +#define DPRC_PhRecFineDlyByte4_SHIFT 0 +#define DDWTC_PhRecGrossDlyByte4_SHIFT 5 +#define DDWTC_PhRecFineDlyByte5_SHIFT 8 +#define DDWTC_PhRecGrossDlyByte5_SHIFT 13 +#define DDWTC_PhRecFineDlyByte6_SHIFT 16 +#define DDWTC_PhRecGrossDlyByte6_SHIFT 21 +#define DDWTC_PhRecFineDlyByte7_SHIFT 24 +#define DDWTC_PhRecGrossDlyByte7_SHIFT 29 + +#define DRAM_ECC_PHASE_RECOVERY_CTRL 0x52 +#define DEPRC_PhRecEccDlyByte0_SHIFT 0 +#define DEPRC_PhRecEccGrossDlyByte0_SHIFT 5 + +#define DRAM_WRITE_LEVEL_ERROR 0x53 /* read only */ +#define DWLE_WrLvErr_SHIFT 0 +#define DWLE_WrLvErr_MASK 0xff + +#define DRAM_CTRL_MISC 0xa0 +#define DCM_MemCleared (1<<0) /* RD == F2x110 [MemCleared] */ +#define DCM_DramEnabled (1<<9) /* RD == F2x110 [DramEnabled] */ + +#define NB_TIME_STAMP_COUNT_LOW 0xb0 +#define TscLow_SHIFT 0 +#define TscLow_MASK 0xffffffff + +#define NB_TIME_STAMP_COUNT_HIGH 0xb4 +#define TscHigh_SHIFT 0 +#define TscHigh_Mask 0xff + +#define DCT_DEBUG_CTRL 0xf0 /* 0xf0 for DCT0, 0x1f0 is for DCT1*/ +#define DDC_DllAdjust_SHIFT 0 +#define DDC_DllAdjust_MASK 0xff +#define DDC_DllSlower (1<<8) +#define DDC_DllFaster (1<<9) +#define DDC_WrtDqsAdjust_SHIFT 16 +#define DDC_WrtDqsAdjust_MASK 0x7 +#define DDC_WrtDqsAdjustEn (1<<19) + +#define DRAM_CTRL_SEL_LOW 0x110 +#define DCSL_DctSelHiRngEn (1<<0) +#define DCSL_DctSelHi (1<<1) +#define DCSL_DctSelIntLvEn (1<<2) +#define DCSL_MemClrInit (1<<3) /* WR only */ +#define DCSL_DctGangEn (1<<4) +#define DCSL_DctDataIntLv (1<<5) +#define DCSL_DctSelIntLvAddr_SHIFT +#define DCSL_DctSelIntLvAddr_MASK 3 +#define DCSL_DramEnable (1<<8) /* RD only */ +#define DCSL_MemClrBusy (1<<9) /* RD only */ +#define DCSL_MemCleared (1<<10) /* RD only */ +#define DCSL_DctSelBaseAddr_47_27_SHIFT 11 +#define DCSL_DctSelBaseAddr_47_27_MASK 0x1fffff + +#define DRAM_CTRL_SEL_HIGH 0x114 +#define DCSH_DctSelBaseOffset_47_26_SHIFT 10 +#define DCSH_DctSelBaseOffset_47_26_MASK 0x3fffff + +#define MEM_CTRL_CONF_LOW 0x118 +#define MCCL_MctPriCpuRd (1<<0) +#define MCCL_MctPriCpuWr (1<<1) +#define MCCL_MctPriIsocRd_SHIFT 4 +#define MCCL_MctPriIsoc_MASK 0x3 +#define MCCL_MctPriIsocWr_SHIFT 6 +#define MCCL_MctPriIsocWe_MASK 0x3 +#define MCCL_MctPriDefault_SHIFT 8 +#define MCCL_MctPriDefault_MASK 0x3 +#define MCCL_MctPriWr_SHIFT 10 +#define MCCL_MctPriWr_MASK 0x3 +#define MCCL_MctPriIsoc_SHIFT 12 +#define MCCL_MctPriIsoc_MASK 0x3 +#define MCCL_MctPriTrace_SHIFT 14 +#define MCCL_MctPriTrace_MASK 0x3 +#define MCCL_MctPriScrub_SHIFT 16 +#define MCCL_MctPriScrub_MASK 0x3 +#define MCCL_McqMedPriByPassMax_SHIFT 20 +#define MCCL_McqMedPriByPassMax_MASK 0x7 +#define MCCL_McqHiPriByPassMax_SHIFT 24 +#define MCCL_McqHiPriByPassMax_MASK 0x7 +#define MCCL_MctVarPriCntLmt_SHIFT 28 +#define MCCL_MctVarPriCntLmt_MASK 0x7 + +#define MEM_CTRL_CONF_HIGH 0x11c +#define MCCH_DctWrLimit_SHIFT 0 +#define MCCH_DctWrLimit_MASK 0x3 +#define MCCH_MctWrLimit_SHIFT 2 +#define MCCH_MctWrLimit_MASK 0x1f +#define MCCH_MctPrefReqLimit_SHIFT 7 +#define MCCH_MctPrefReqLimit_MASK 0x1f +#define MCCH_PrefCpuDis (1<<12) +#define MCCH_PrefIoDis (1<<13) +#define MCCH_PrefIoFixStrideEn (1<<14) +#define MCCH_PrefFixStrideEn (1<<15) +#define MCCH_PrefFixDist_SHIFT 16 +#define MCCH_PrefFixDist_MASK 0x3 +#define MCCH_PrefConfSat_SHIFT 18 +#define MCCH_PrefConfSat_MASK 0x3 +#define MCCH_PrefOneConf_SHIFT 20 +#define MCCH_PrefOneConf_MASK 0x3 +#define MCCH_PrefTwoConf_SHIFT 22 +#define MCCH_PrefTwoConf_MASK 0x7 +#define MCCH_PrefThreeConf_SHIFT 25 +#define MCCH_prefThreeConf_MASK 0x7 +#define MCCH_PrefDramTrainMode (1<<28) +#define MCCH_FlushWrOnStpGnt (1<<29) +#define MCCH_FlushWr (1<<30) +#define MCCH_MctScrubEn (1<<31) + + +/* Function 3 */ +#define MCA_NB_CONTROL 0x40 +#define MNCT_CorrEccEn (1<<0) +#define MNCT_UnCorrEccEn (1<<1) +#define MNCT_CrcErr0En (1<<2) /* Link 0 */ +#define MNCT_CrcErr1En (1<<3) +#define MNCT_CrcErr2En (1<<4) +#define MBCT_SyncPkt0En (1<<5) /* Link 0 */ +#define MBCT_SyncPkt1En (1<<6) +#define MBCT_SyncPkt2En (1<<7) +#define MBCT_MstrAbrtEn (1<<8) +#define MBCT_TgtAbrtEn (1<<9) +#define MBCT_GartTblEkEn (1<<10) +#define MBCT_AtomicRMWEn (1<<11) +#define MBCT_WdogTmrRptEn (1<<12) +#define MBCT_DevErrEn (1<<13) +#define MBCT_L3ArrayCorEn (1<<14) +#define MBCT_L3ArrayUncEn (1<<15) +#define MBCT_HtProtEn (1<<16) +#define MBCT_HtDataEn (1<<17) +#define MBCT_DramParEn (1<<18) +#define MBCT_RtryHt0En (1<<19) /* Link 0 */ +#define MBCT_RtryHt1En (1<<20) +#define MBCT_RtryHt2En (1<<21) +#define MBCT_RtryHt3En (1<<22) +#define MBCT_CrcErr3En (1<<23) /* Link 3*/ +#define MBCT_SyncPkt3En (1<<24) /* Link 4 */ +#define MBCT_McaUsPwDatErrEn (1<<25) +#define MBCT_NbArrayParEn (1<<26) +#define MBCT_TblWlkDatErrEn (1<<27) +#define MBCT_FbDimmCorErrEn (1<<28) +#define MBCT_FbDimmUnCorErrEn (1<<29) + + + +#define MCA_NB_CONFIG 0x44 +#define MNC_CpuRdDatErrEn (1<<1) +#define MNC_SyncOnUcEccEn (1<<2) +#define MNC_SynvPktGenDis (1<<3) +#define MNC_SyncPktPropDis (1<<4) +#define MNC_IoMstAbortDis (1<<5) +#define MNC_CpuErrDis (1<<6) +#define MNC_IoErrDis (1<<7) +#define MNC_WdogTmrDis (1<<8) +#define MNC_WdogTmrCntSel_2_0_SHIFT 9 /* 3 is ar f3x180 */ +#define MNC_WdogTmrCntSel_2_0_MASK 0x3 +#define MNC_WdogTmrBaseSel_SHIFT 12 +#define MNC_WdogTmrBaseSel_MASK 0x3 +#define MNC_LdtLinkSel_SHIFT 14 +#define MNC_LdtLinkSel_MASK 0x3 +#define MNC_GenCrcErrByte0 (1<<16) +#define MNC_GenCrcErrByte1 (1<<17) +#define MNC_SubLinkSel_SHIFT 18 +#define MNC_SubLinkSel_MASK 0x3 +#define MNC_SyncOnWdogEn (1<<20) +#define MNC_SyncOnAnyErrEn (1<<21) +#define MNC_DramEccEn (1<<22) +#define MNC_ChipKillEccEn (1<<23) +#define MNC_IoRdDatErrEn (1<<24) +#define MNC_DisPciCfgCpuErrRsp (1<<25) +#define MNC_CorrMcaExcEn (1<<26) +#define MNC_NbMcaToMstCpuEn (1<<27) +#define MNC_DisTgtAbtCpuErrRsp (1<<28) +#define MNC_DisMstAbtCpuErrRsp (1<<29) +#define MNC_SyncOnDramAdrParErrEn (1<<30) +#define MNC_NbMcaLogEn (1<<31) + +#define MCA_NB_STATUS_LOW 0x48 +#define MNSL_ErrorCode_SHIFT 0 +#define MNSL_ErrorCode_MASK 0xffff +#define MNSL_ErrorCodeExt_SHIFT 16 +#define MNSL_ErrorCodeExt_MASK 0x1f +#define MNSL_Syndrome_15_8_SHIFT 24 +#define MNSL_Syndrome_15_8_MASK 0xff + +#define MCA_NB_STATUS_HIGH 0x4c +#define MNSH_ErrCPU_SHIFT 0 +#define MNSH_ErrCPU_MASK 0xf +#define MNSH_LDTLink_SHIFT 4 +#define MNSH_LDTLink_MASK 0xf +#define MNSH_ErrScrub (1<<8) +#define MNSH_SubLink (1<<9) +#define MNSH_McaStatusSubCache_SHIFT 10 +#define MNSH_McaStatusSubCache_MASK 0x3 +#define MNSH_Deffered (1<<12) +#define MNSH_UnCorrECC (1<<13) +#define MNSH_CorrECC (1<<14) +#define MNSH_Syndrome_7_0_SHIFT 15 +#define MNSH_Syndrome_7_0_MASK 0xff +#define MNSH_PCC (1<<25) +#define MNSH_ErrAddrVal (1<<26) +#define MNSH_ErrMiscVal (1<<27) +#define MNSH_ErrEn (1<<28) +#define MNSH_ErrUnCorr (1<<29) +#define MNSH_ErrOver (1<<30) +#define MNSH_ErrValid (1<<31) + +#define MCA_NB_ADDR_LOW 0x50 +#define MNAL_ErrAddr_31_1_SHIFT 1 +#define MNAL_ErrAddr_31_1_MASK 0x7fffffff + +#define MCA_NB_ADDR_HIGH 0x54 +#define MNAL_ErrAddr_47_32_SHIFT 0 +#define MNAL_ErrAddr_47_32_MASK 0xffff + +#define DRAM_SCRUB_RATE_CTRL 0x58 +#define SCRUB_NONE 0 +#define SCRUB_40ns 1 +#define SCRUB_80ns 2 +#define SCRUB_160ns 3 +#define SCRUB_320ns 4 +#define SCRUB_640ns 5 +#define SCRUB_1_28us 6 +#define SCRUB_2_56us 7 +#define SCRUB_5_12us 8 +#define SCRUB_10_2us 9 +#define SCRUB_20_5us 0xa +#define SCRUB_41_0us 0xb +#define SCRUB_81_9us 0xc +#define SCRUB_163_8us 0xd +#define SCRUB_327_7us 0xe +#define SCRUB_655_4us 0xf +#define SCRUB_1_31ms 0x10 +#define SCRUB_2_62ms 0x11 +#define SCRUB_5_24ms 0x12 +#define SCRUB_10_49ms 0x13 +#define SCRUB_20_97ms 0x14 +#define SCRUB_42ms 0x15 +#define SCRUB_84ms 0x16 +#define DSRC_DramScrub_SHFIT 0 +#define DSRC_DramScrub_MASK 0x1f +#define DSRC_L2Scrub_SHIFT 8 +#define DSRC_L2Scrub_MASK 0x1f +#define DSRC_DcacheScrub_SHIFT 16 +#define DSRC_DcacheScrub_MASK 0x1f +#define DSRC_L3Scrub_SHIFT 24 +#define DSRC_L3Scrub_MASK 0x1f + +#define DRAM_SCRUB_ADDR_LOW 0x5C +#define DSAL_ScrubReDirEn (1<<0) +#define DSAL_ScrubAddrLo_SHIFT 6 +#define DSAL_ScrubAddrLo_MASK 0x3ffffff + +#define DRAM_SCRUB_ADDR_HIGH 0x60 +#define DSAH_ScrubAddrHi_SHIFT 0 +#define DSAH_ScrubAddrHi_MASK 0xffff + +#define HW_THERMAL_CTRL 0x64 + +#define SW_THERMAL_CTRL 0x68 + +#define DATA_BUF_CNT 0x6c + +#define SRI_XBAR_CMD_BUF_CNT 0x70 + +#define XBAR_SRI_CMD_BUF_CNT 0x74 + +#define MCT_XBAR_CMD_BUF_CNT 0x78 + +#define ACPI_PWR_STATE_CTRL 0x80 /* till 0x84 */ + +#define NB_CONFIG_LOW 0x88 +#define NB_CONFIG_HIGH 0x8c + +#define GART_APERTURE_CTRL 0x90 + +#define GART_APERTURE_BASE 0x94 + +#define GART_TBL_BASE 0x98 + +#define GART_CACHE_CTRL 0x9c + +#define PWR_CTRL_MISC 0xa0 + +#define RPT_TEMP_CTRL 0xa4 + +#define ON_LINE_SPARE_CTRL 0xb0 + +#define SBI_P_STATE_LIMIT 0xc4 + +#define CLK_PWR_TIMING_CTRL0 0xd4 +#define CLK_PWR_TIMING_CTRL1 0xd8 +#define CLK_PWR_TIMING_CTRL2 0xdc + +#define THERMTRIP_STATUS 0xE4 + + +#define NORTHBRIDGE_CAP 0xE8 +#define NBCAP_TwoChanDRAMcap (1 << 0) +#define NBCAP_DualNodeMPcap (1 << 1) +#define NBCAP_EightNodeMPcap (1 << 2) +#define NBCAP_ECCcap (1 << 3) +#define NBCAP_ChipkillECCcap (1 << 4) +#define NBCAP_DdrMaxRate_SHIFT 5 +#define NBCAP_DdrMaxRate_MASK 7 +#define NBCAP_DdrMaxRate_400 7 +#define NBCAP_DdrMaxRate_533 6 +#define NBCAP_DdrMaxRate_667 5 +#define NBCAP_DdrMaxRate_800 4 +#define NBCAP_DdrMaxRate_1067 3 +#define NBCAP_DdrMaxRate_1333 2 +#define NBCAP_DdrMaxRate_1600 1 +#define NBCAP_DdrMaxRate_3_2G 6 +#define NBCAP_DdrMaxRate_4_0G 5 +#define NBCAP_DdrMaxRate_4_8G 4 +#define NBCAP_DdrMaxRate_6_4G 3 +#define NBCAP_DdrMaxRate_8_0G 2 +#define NBCAP_DdrMaxRate_9_6G 1 +#define NBCAP_Mem_ctrl_cap (1 << 8) +#define MBCAP_SVMCap (1<<9) +#define NBCAP_HtcCap (1<<10) +#define NBCAP_CmpCap_SHIFT 12 +#define NBCAP_CmpCap_MASK 3 +#define NBCAP_MpCap_SHIFT 16 +#define NBCAP_MpCap_MASK 7 +#define NBCAP_MpCap_1N 7 +#define NBCAP_MpCap_2N 6 +#define NBCAP_MpCap_4N 5 +#define NBCAP_MpCap_8N 4 +#define NBCAP_MpCap_32N 0 +#define NBCAP_UnGangEn_SHIFT 20 +#define NBCAP_UnGangEn_MASK 0xf +#define NBCAP_L3Cap (1<<25) +#define NBCAP_HtAcCap (1<<26) + +/* 04/04/2006 18:00 */ + +#define EXT_NB_MCA_CTRL 0x180 + +#define NB_EXT_CONF 0x188 +#define DOWNCORE_CTRL 0x190 +#define DWNCC_DisCore_SHIFT 0 +#define DWNCC_DisCore_MASK 0xf + +/* Function 5 for FBDIMM */ +#define FBD_DRAM_TIMING_LOW + +#define LinkConnected (1 << 0) +#define InitComplete (1 << 1) +#define NonCoherent (1 << 2) +#define ConnectionPending (1 << 4) + +// Use the LAPIC timer count register to hold each core's init status +// Format: byte 0 - state +// byte 1 - fid_max +// byte 2 - nb_cof_vid_update +// byte 3 - apic id + +#define LAPIC_MSG_REG 0x380 +#define F10_APSTATE_STARTED 0x13 // start of AP execution +#define F10_APSTATE_STOPPED 0x14 // allow AP to stop +#define F10_APSTATE_RESET 0x01 // waiting for warm reset + +#include "amdfam10_nums.h" + +#ifdef __PRE_RAM__ +#if NODE_NUMS==64 + #define NODE_PCI(x, fn) ((x<32)?(PCI_DEV(CONFIG_CBB,(CONFIG_CDB+x),fn)):(PCI_DEV((CONFIG_CBB-1),(CONFIG_CDB+x-32),fn))) +#else + #define NODE_PCI(x, fn) PCI_DEV(CONFIG_CBB,(CONFIG_CDB+x),fn) +#endif +#endif + +#include "raminit.h" + +#if CONFIG_AMDMCT == 0 + +//struct definitions + +struct dimm_size { + u8 per_rank; // it is rows + col + bank_lines + data lines */ + u8 rows; + u8 col; + u8 bank; //1, 2, 3 mean 2, 4, 8 + u8 rank; +} __attribute__((packed)); + +struct mem_info { // pernode + u32 dimm_mask; + struct dimm_size sz[DIMM_SOCKETS*2]; // for ungang support + u32 x4_mask; + u32 x16_mask; + u32 single_rank_mask; + u32 page_1k_mask; +// u32 ecc_mask; +// u32 registered_mask; + u8 is_opteron; + u8 is_registered; //don't support mixing on the same channel or between channel + u8 is_ecc; //don't support mixing on the same channel or between channel + u8 is_Width128; + u8 memclk_set; // we need to use this to retrieve the mem param, all dimms need to work at same freq for one node + u8 is_cs_interleaved[2]; //cs + u8 rsv[1]; +} __attribute__((packed)); +#else + #if (CONFIG_DIMM_SUPPORT & 0x000F)==0x0005 /* AMD_FAM10_DDR3 */ + #include "../amdmct/mct_ddr3/mct_d.h" + #else + #include "../amdmct/mct/mct_d.h" + #endif +#endif + +struct link_pair_t { + device_t udev; + u32 upos; + u32 uoffs; + device_t dev; + u32 pos; + u32 offs; + u8 host; + u8 nodeid; + u8 linkn; + u8 rsv; +} __attribute__((packed)); + +struct nodes_info_t { + u32 nodes_in_group; // could be 2, 3, 4, 5, 6, 7, 8 + u32 groups_in_plane; // could be 1, 2, 3, 4, 5 + u32 planes; // could be 1, 2 + u32 up_planes; // down planes will be [up_planes, planes) +} __attribute__((packed)); + +/* be careful with the alignment of sysinfo, bacause sysinfo may be shared by coreboot_car and coreboot_ram stage. and coreboot_ram may be running at 64bit later.*/ +#if CONFIG_AMDMCT == 0 + +//#define MEM_CS_COPY 1 +#define MEM_CS_COPY NODE_NUMS + +#if CONFIG_MEM_TRAIN_SEQ == 0 + #define DQS_DELAY_COPY NODE_NUMS +#else +// #define DQS_DELAY_COPY 1 + #define DQS_DELAY_COPY NODE_NUMS +#endif +#endif + + +struct sys_info { + int32_t needs_reset; + + u8 ln[NODE_NUMS*NODE_NUMS];// [0, 3] link n, [4, 7] will be hop num + u16 ln_tn[NODE_NUMS*8]; // for 0x0zzz: bit [0,7] target node num, bit[8,11] respone link from target num; 0x80ff mean not inited, 0x4yyy mean non coherent and yyy is link pair index + struct nodes_info_t nodes_info; + u32 nodes; + + u8 host_link_freq[NODE_NUMS*8]; // record freq for every link from cpu, 0x0f means don't need to touch it + u16 host_link_freq_cap[NODE_NUMS*8]; //cap + + u32 segbit; + u32 sbdn; + u32 sblk; + u32 sbbusn; + + u32 ht_c_num; + u32 ht_c_conf_bus[HC_NUMS]; // 4-->32 + + struct link_pair_t link_pair[HC_NUMS*4];// enough? only in_conherent, 32 chain and every chain have 4 HT device + u32 link_pair_num; + + struct mem_controller ctrl[NODE_NUMS]; + +#if CONFIG_AMDMCT +// sMCTStruct MCTData; +// sDCTStruct *DCTNodeData[NODE_NUMS]; +// sDCTStruct DCTNodeData_a[NODE_NUMS]; + struct MCTStatStruc MCTstat; + struct DCTStatStruc DCTstatA[NODE_NUMS]; +#else + + u8 ctrl_present[NODE_NUMS]; + struct mem_info meminfo[NODE_NUMS]; + u8 mem_trained[NODE_NUMS]; //0: no dimm, 1: trained, 0x80: not started, 0x81: recv1 fail, 0x82: Pos Fail, 0x83:recv2 fail + u32 tom_m; + u32 tom2_m; + + //if we are getting tight of global space, may need to squesh following to one copy + u32 mem_base[MEM_CS_COPY][2]; // two dct + u32 cs_base[MEM_CS_COPY][2][8]; //8 cs_idx + u32 hole_startk; // 0 mean hole + + u8 dqs_delay_a[DQS_DELAY_COPY*2*4*2*9]; //8 node, channel 2, dimm 4, direction 2 , bytelane *9 + u8 dqs_rcvr_dly_a[DQS_DELAY_COPY*2*4*9]; //8 node, channel 2, dimm 4, bytelane *9 + u8 dqs_rcvr_dly_a_1[9]; //8 node, channel 2, dimm 4, bytelane *9 +#endif + +} __attribute__((packed)); + +#ifndef __PRE_RAM__ +device_t get_node_pci(u32 nodeid, u32 fn); +#endif + +#if CONFIG_AMDMCT == 0 + +#ifdef __PRE_RAM__ +static void soft_reset(void); +#endif +static void wait_all_core0_mem_trained(struct sys_info *sysinfo) +{ + int i; + u32 mask_lo = 0; + u32 mask_hi = 0; + unsigned needs_reset = 0; + + if(sysinfo->nodes == 1) return; // in case only one cpu installed + for(i=1; i<sysinfo->nodes; i++) { + /* Skip everything if I don't have any memory on this controller */ + if(sysinfo->mem_trained[i]==0x00) continue; + + if(i<32) { + mask_lo |= (1<<i); + } else { + mask_hi |= (1<<(i-32)); + } + } + + i = 1; + while(1) { + if(i<32) { + if(mask_lo & (1<<i)) { + if(sysinfo->mem_trained[i] != 0x80) { + mask_lo &= ~(1<<i); + } + } + } else { + if(mask_hi & (1<<(i-32))) { + if(sysinfo->mem_trained[i] != 0x80) { + mask_hi &= ~(1<<(i-32)); + } + } + } + + if((!mask_lo) && (!mask_hi)) break; + + i++; + i%=sysinfo->nodes; + } + + for(i=0; i<sysinfo->nodes; i++) { +#ifdef __PRE_RAM__ + print_debug("mem_trained["); print_debug_hex8(i); print_debug("]="); print_debug_hex8(sysinfo->mem_trained[i]); print_debug("\n"); +#else + printk(BIOS_DEBUG, "mem_trained[%02x]=%02x\n", i, sysinfo->mem_trained[i]); +#endif + switch(sysinfo->mem_trained[i]) { + case 0: //don't need train + case 1: //trained + break; + case 0x81: //recv1: fail + case 0x82: //Pos :fail + case 0x83: //recv2: fail + needs_reset = 1; + break; + } + } + if(needs_reset) { +#ifdef __PRE_RAM__ + print_debug("mem trained failed\n"); + soft_reset(); +#else + printk(BIOS_DEBUG, "mem trained failed\n"); + hard_reset(); +#endif + } + +} + +#endif + +#ifdef __PRE_RAM__ +void showallroutes(int level, device_t dev); + +void setup_resource_map_offset(const u32 *register_values, u32 max, u32 + offset_pci_dev, u32 offset_io_base); + +void setup_resource_map_x_offset(const u32 *register_values, u32 max, u32 + offset_pci_dev, u32 offset_io_base); + +void setup_resource_map_x(const u32 *register_values, u32 max); + +/* reset_test.c */ +u32 cpu_init_detected(u8 nodeid); +u32 bios_reset_detected(void); +u32 cold_reset_detected(void); +u32 other_reset_detected(void); +u32 get_sblk(void); +u8 get_sbbusn(u8 sblk); +#endif + +#endif /* AMDFAM10_H */ |