summaryrefslogtreecommitdiff
path: root/src/northbridge/intel
diff options
context:
space:
mode:
authorAngel Pons <th3fanbus@gmail.com>2020-01-05 20:21:20 +0100
committerFelix Held <felix-coreboot@felixheld.de>2020-01-10 14:59:46 +0000
commit88521881133e62d8f3298388faa718efabc9107a (patch)
treeb02e605a39741d1416d3ecdd80cdcaa13a774cbc /src/northbridge/intel
parentd589be3648f3e3c9ef5a0aaac9bfe713c8fef333 (diff)
downloadcoreboot-88521881133e62d8f3298388faa718efabc9107a.tar.xz
nb/intel/sandybridge: Add a bunch of MCHBAR defines
While we are at it, also: - Rename related variables to match the register names. - Update some comments to better reflect what some registers are about. - Add various FIXME comments on registers that seem to be used wrongly. With BUILD_TIMELESS=1, this commit does not change the coreboot build of: - Asus P8H61-M PRO with native raminit. - Gigabyte GA-H61MA-D3V with native raminit. - Lenovo Thinkpad X230 with native raminit. - Lenovo Thinkpad X220 with MRC raminit. Change-Id: I5e5fe56eaa90842dbbdd1bfbbcb7709237b4c486 Signed-off-by: Angel Pons <th3fanbus@gmail.com> Reviewed-on: https://review.coreboot.org/c/coreboot/+/38036 Tested-by: build bot (Jenkins) <no-reply@coreboot.org> Reviewed-by: Felix Held <felix-coreboot@felixheld.de>
Diffstat (limited to 'src/northbridge/intel')
-rw-r--r--src/northbridge/intel/sandybridge/early_init.c20
-rw-r--r--src/northbridge/intel/sandybridge/finalize.c10
-rw-r--r--src/northbridge/intel/sandybridge/northbridge.c14
-rw-r--r--src/northbridge/intel/sandybridge/raminit.c8
-rw-r--r--src/northbridge/intel/sandybridge/raminit_common.c1369
-rw-r--r--src/northbridge/intel/sandybridge/raminit_common.h55
-rw-r--r--src/northbridge/intel/sandybridge/raminit_ivy.c54
-rw-r--r--src/northbridge/intel/sandybridge/raminit_mrc.c2
-rw-r--r--src/northbridge/intel/sandybridge/raminit_sandy.c44
-rw-r--r--src/northbridge/intel/sandybridge/sandybridge.h221
10 files changed, 1065 insertions, 732 deletions
diff --git a/src/northbridge/intel/sandybridge/early_init.c b/src/northbridge/intel/sandybridge/early_init.c
index 74ae4f5e08..e966095c84 100644
--- a/src/northbridge/intel/sandybridge/early_init.c
+++ b/src/northbridge/intel/sandybridge/early_init.c
@@ -34,10 +34,10 @@ static void systemagent_vtd_init(void)
return;
/* setup BARs */
- MCHBAR32(0x5404) = IOMMU_BASE1 >> 32;
- MCHBAR32(0x5400) = IOMMU_BASE1 | 1;
- MCHBAR32(0x5414) = IOMMU_BASE2 >> 32;
- MCHBAR32(0x5410) = IOMMU_BASE2 | 1;
+ MCHBAR32(VTD1_BASE + 4) = IOMMU_BASE1 >> 32;
+ MCHBAR32(VTD1_BASE) = IOMMU_BASE1 | 1;
+ MCHBAR32(VTD2_BASE + 4) = IOMMU_BASE2 >> 32;
+ MCHBAR32(VTD2_BASE) = IOMMU_BASE2 | 1;
/* lock policies */
write32((void *)(IOMMU_BASE1 + 0xff0), 0x80000000);
@@ -126,13 +126,13 @@ static void sandybridge_setup_graphics(void)
pci_write_config8(PCI_DEV(0, 2, 0), MSAC, reg8);
/* Erratum workarounds */
- reg32 = MCHBAR32(0x5f00);
+ reg32 = MCHBAR32(SAPMCTL);
reg32 |= (1 << 9)|(1 << 10);
- MCHBAR32(0x5f00) = reg32;
+ MCHBAR32(SAPMCTL) = reg32;
/* Enable SA Clock Gating */
- reg32 = MCHBAR32(0x5f00);
- MCHBAR32(0x5f00) = reg32 | 1;
+ reg32 = MCHBAR32(SAPMCTL);
+ MCHBAR32(SAPMCTL) = reg32 | 1;
/* GPU RC6 workaround for sighting 366252 */
reg32 = MCHBAR32(0x5d14);
@@ -144,9 +144,9 @@ static void sandybridge_setup_graphics(void)
reg32 &= ~(1 << 0);
MCHBAR32(0x6120) = reg32;
- reg32 = MCHBAR32(0x5418);
+ reg32 = MCHBAR32(PAIR_CTL);
reg32 |= (1 << 4) | (1 << 5);
- MCHBAR32(0x5418) = reg32;
+ MCHBAR32(PAIR_CTL) = reg32;
}
static void start_peg_link_training(void)
diff --git a/src/northbridge/intel/sandybridge/finalize.c b/src/northbridge/intel/sandybridge/finalize.c
index e07c6c2d5a..e3383724cd 100644
--- a/src/northbridge/intel/sandybridge/finalize.c
+++ b/src/northbridge/intel/sandybridge/finalize.c
@@ -34,16 +34,16 @@ void intel_sandybridge_finalize_smm(void)
pci_or_config32(PCI_DEV_SNB, TSEGMB, 1 << 0);
pci_or_config32(PCI_DEV_SNB, TOLUD, 1 << 0);
- MCHBAR32_OR(0x5500, 1 << 0); /* PAVP */
- MCHBAR32_OR(0x5f00, 1 << 31); /* SA PM */
- MCHBAR32_OR(0x6020, 1 << 0); /* UMA GFX */
- MCHBAR32_OR(0x63fc, 1 << 0); /* VTDTRK */
+ MCHBAR32_OR(MMIO_PAVP_CTL, 1 << 0); /* PAVP */
+ MCHBAR32_OR(SAPMCTL, 1 << 31); /* SA PM */
+ MCHBAR32_OR(0x6020, 1 << 0); /* UMA GFX */
+ MCHBAR32_OR(0x63fc, 1 << 0); /* VTDTRK */
MCHBAR32_OR(0x6800, 1 << 31);
MCHBAR32_OR(0x7000, 1 << 31);
MCHBAR32_OR(0x77fc, 1 << 0);
/* Memory Controller Lockdown */
- MCHBAR8(0x50fc) = 0x8f;
+ MCHBAR8(MC_LOCK) = 0x8f;
/* Read+write the following */
MCHBAR32(0x6030) = MCHBAR32(0x6030);
diff --git a/src/northbridge/intel/sandybridge/northbridge.c b/src/northbridge/intel/sandybridge/northbridge.c
index 5ff123e63b..a9b1c251d0 100644
--- a/src/northbridge/intel/sandybridge/northbridge.c
+++ b/src/northbridge/intel/sandybridge/northbridge.c
@@ -391,15 +391,15 @@ static void northbridge_init(struct device *dev)
northbridge_dmi_init(dev);
- bridge_type = MCHBAR32(0x5f10);
+ bridge_type = MCHBAR32(SAPMTIMERS);
bridge_type &= ~0xff;
if ((bridge_silicon_revision() & BASE_REV_MASK) == BASE_REV_IVB) {
/* Enable Power Aware Interrupt Routing */
- u8 pair = MCHBAR8(0x5418);
+ u8 pair = MCHBAR8(PAIR_CTL);
pair &= ~0xf; /* Clear 3:0 */
pair |= 0x4; /* Fixed Priority */
- MCHBAR8(0x5418) = pair;
+ MCHBAR8(PAIR_CTL) = pair;
/* 30h for IvyBridge */
bridge_type |= 0x30;
@@ -407,7 +407,7 @@ static void northbridge_init(struct device *dev)
/* 20h for Sandybridge */
bridge_type |= 0x20;
}
- MCHBAR32(0x5f10) = bridge_type;
+ MCHBAR32(SAPMTIMERS) = bridge_type;
/* Turn off unused devices. Has to be done before
* setting BIOS_RESET_CPL.
@@ -433,12 +433,12 @@ static void northbridge_init(struct device *dev)
*/
if (cpu_config_tdp_levels()) {
msr_t msr = rdmsr(MSR_PKG_POWER_LIMIT);
- MCHBAR32(0x59A0) = msr.lo;
- MCHBAR32(0x59A4) = msr.hi;
+ MCHBAR32(MC_TURBO_PL1) = msr.lo;
+ MCHBAR32(MC_TURBO_PL2) = msr.hi;
}
/* Set here before graphics PM init */
- MCHBAR32(0x5500) = 0x00100001;
+ MCHBAR32(MMIO_PAVP_CTL) = 0x00100001;
}
void northbridge_write_smram(u8 smram)
diff --git a/src/northbridge/intel/sandybridge/raminit.c b/src/northbridge/intel/sandybridge/raminit.c
index de6a542745..4ec8492b18 100644
--- a/src/northbridge/intel/sandybridge/raminit.c
+++ b/src/northbridge/intel/sandybridge/raminit.c
@@ -293,7 +293,7 @@ static void init_dram_ddr3(int min_tck, int s3resume)
int err;
u32 cpu;
- MCHBAR32(0x5f00) |= 1;
+ MCHBAR32(SAPMCTL) |= 1;
/* Wait for ME to be ready */
intel_early_me_init();
@@ -404,12 +404,12 @@ static void init_dram_ddr3(int min_tck, int s3resume)
if (err)
die("raminit failed");
- /* FIXME: should be hardware revision-dependent. */
- MCHBAR32(0x5024) = 0x00a030ce;
+ /* FIXME: should be hardware revision-dependent. The register only exists on IVB. */
+ MCHBAR32(CHANNEL_HASH) = 0x00a030ce;
set_scrambling_seed(&ctrl);
- set_42a0(&ctrl);
+ set_normal_operation(&ctrl);
final_registers(&ctrl);
diff --git a/src/northbridge/intel/sandybridge/raminit_common.c b/src/northbridge/intel/sandybridge/raminit_common.c
index da12718b10..43e9e91f12 100644
--- a/src/northbridge/intel/sandybridge/raminit_common.c
+++ b/src/northbridge/intel/sandybridge/raminit_common.c
@@ -33,38 +33,162 @@
/* FIXME: no support for 3-channel chipsets. */
/*
- * Register description:
+ * ### IOSAV command queue notes ###
+ *
* Intel provides a command queue of depth four.
- * Every command is configured by using multiple registers.
- * On executing the command queue you have to provide the depth used.
+ * Every command is configured by using multiple MCHBAR registers.
+ * On executing the command queue, you have to specify its depth (number of commands).
+ *
+ * The macros for these registers can take some integer parameters, within these bounds:
+ * channel: [0..1]
+ * index: [0..3]
+ * lane: [0..8]
+ *
+ * Note that these ranges are 'closed': both endpoints are included.
+ *
+ *
+ *
+ * ### Register description ###
+ *
+ * IOSAV_n_SP_CMD_ADDR_ch(channel, index)
+ * Sub-sequence command addresses. Controls the address, bank address and slotrank signals.
+ *
+ * Bitfields:
+ * [0..15] Row / Column Address.
+ * [16..18] The result of (10 + [16..18]) is the number of valid row bits.
+ * Note: Value 1 is not implemented. Not that it really matters, though.
+ * Value 7 is reserved, as the hardware does not support it.
+ * [20..22] Bank Address.
+ * [24..25] Rank select. Let's call it "ranksel", as it is mentioned later.
+ *
+ * IOSAV_n_ADDR_UPD_ch(channel, index)
+ * How the address shall be updated after executing the sub-sequence command.
+ *
+ * Bitfields:
+ * [0] Increment CAS/RAS by 1.
+ * [1] Increment CAS/RAS by 8.
+ * [2] Increment bank select by 1.
+ * [3..4] Increment rank select by 1, 2 or 3.
+ * [5..9] Known as "addr_wrap". Address bits will wrap around the [addr_wrap..0] range.
+ * [10..11] LFSR update:
+ * 00: Do not use the LFSR function.
+ * 01: Undefined, treat as Reserved.
+ * 10: Apply LFSR on the [addr_wrap..0] bit range.
+ * 11: Apply LFSR on the [addr_wrap..3] bit range.
+ *
+ * [12..15] Update rate. The number of command runs between address updates. For example:
+ * 0: Update every command run.
+ * 1: Update every second command run. That is, half of the command rate.
+ * N: Update after N command runs without updates.
+ *
+ * [16..17] LFSR behavior on the deselect cycles (when no sub-seq command is issued):
+ * 0: No change w.r.t. the last issued command.
+ * 1: LFSR XORs with address & command (excluding CS), but does not update.
+ * 2: LFSR XORs with address & command (excluding CS), and updates.
+ *
+ * IOSAV_n_SP_CMD_CTL_ch(channel, index)
+ * Special command control register. Controls the DRAM command signals.
+ *
+ * Bitfields:
+ * [0] !RAS signal.
+ * [1] !CAS signal.
+ * [2] !WE signal.
+ * [4..7] CKE, per rank and channel.
+ * [8..11] ODT, per rank and channel.
+ * [12] Chip Select mode control.
+ * [13..16] Chip select, per rank and channel. It works as follows:
+ *
+ * entity CS_BLOCK is
+ * port (
+ * MODE : in std_logic; -- Mode select at [12]
+ * RANKSEL : in std_logic_vector(0 to 3); -- Decoded "ranksel" value
+ * CS_CTL : in std_logic_vector(0 to 3); -- Chip select control at [13..16]
+ * CS_Q : out std_logic_vector(0 to 3) -- CS signals
+ * );
+ * end entity CS_BLOCK;
+ *
+ * architecture RTL of CS_BLOCK is
+ * begin
+ * if MODE = '1' then
+ * CS_Q <= not RANKSEL and CS_CTL;
+ * else
+ * CS_Q <= CS_CTL;
+ * end if;
+ * end architecture RTL;
+ *
+ * [17] Auto Precharge. Only valid when using 10 row bits!
+ *
+ * IOSAV_n_SUBSEQ_CTL_ch(channel, index)
+ * Sub-sequence parameters. Controls repetititons, delays and data orientation.
+ *
+ * Bitfields:
+ * [0..8] Number of repetitions of the sub-sequence command.
+ * [10..14] Gap, number of clock-cycles to wait before sending the next command.
+ * [16..24] Number of clock-cycles to idle between sub-sequence commands.
+ * [26..27] The direction of the data.
+ * 00: None, does not handle data
+ * 01: Read
+ * 10: Write
+ * 11: Read & Write
+ *
+ * IOSAV_n_ADDRESS_LFSR_ch(channel, index)
+ * 23-bit LFSR state register. It is written into the LFSR when the sub-sequence is loaded,
+ * and then read back from the LFSR when the sub-sequence is done.
+ *
+ * Bitfields:
+ * [0..22] LFSR state.
+ *
+ * IOSAV_SEQ_CTL_ch(channel)
+ * Control the sequence level in IOSAV: number of sub-sequences, iterations, maintenance...
+ *
+ * Bitfields:
+ * [0..7] Number of full sequence executions. When this field becomes non-zero, then the
+ * sequence starts running immediately. This value is decremented after completing
+ * a full sequence iteration. When it is zero, the sequence is done. No decrement
+ * is done if this field is set to 0xff. This is the "infinite repeat" mode, and
+ * it is manually aborted by clearing this field.
+ *
+ * [8..16] Number of wait cycles after each sequence iteration. This wait's purpose is to
+ * allow performing maintenance in infinite loops. When non-zero, RCOMP, refresh
+ * and ZQXS operations can take place.
+ *
+ * [17] Stop-on-error mode: Whether to stop sequence execution when an error occurs.
+ * [18..19] Number of sub-sequences. The programmed value is the index of the last sub-seq.
+ * [20] If set, keep refresh disabled until the next sequence execution.
+ * DANGER: Refresh must be re-enabled within the (9 * tREFI) period!
+ *
+ * [22] If set, sequence execution will not prevent refresh. This cannot be set when
+ * bit [20] is also set, or was set on the previous sequence. This bit exists so
+ * that the sequence machine can be used as a timer without affecting the memory.
+ *
+ * [23] If set, a output pin is asserted on the first detected error. This output can
+ * be used as a trigger for an oscilloscope or a logic analyzer, which is handy.
*
- * Known registers:
- * Channel X = [0, 1]
- * Command queue index Y = [0, 1, 2, 3]
+ * IOSAV_DATA_CTL_ch(channel)
+ * Data-related controls in IOSAV mode.
*
- * DEFAULT_MCHBAR + 0x4220 + 0x400 * X + 4 * Y: command io register
- * Controls the DRAM command signals
- * Bit 0: !RAS
- * Bit 1: !CAS
- * Bit 2: !WE
+ * Bitfields:
+ * [0..7] WDB (Write Data Buffer) pattern length: [0..7] = (length / 8) - 1;
+ * [8..15] WDB read pointer. Points at the data used for IOSAV write transactions.
+ * [16..23] Comparison pointer. Used to compare data from IOSAV read transactions.
+ * [24] If set, increment pointers only when micro-breakpoint is active.
*
- * DEFAULT_MCHBAR + 0x4200 + 0x400 * X + 4 * Y: addr bankslot io register
- * Controls the address, bank address and slotrank signals
- * Bit 0-15 : Address
- * Bit 20-22: Bank Address
- * Bit 24-25: slotrank
+ * IOSAV_STATUS_ch(channel)
+ * State of the IOSAV sequence machine. Should be polled after sending an IOSAV sequence.
*
- * DEFAULT_MCHBAR + 0x4230 + 0x400 * X + 4 * Y: idle register
- * Controls the idle time after issuing this DRAM command
- * Bit 16-32: number of clock-cylces to idle
+ * Bitfields:
+ * [0] IDLE: IOSAV is sleeping.
+ * [1] BUSY: IOSAV is running a sequence.
+ * [2] DONE: IOSAV has completed a sequence.
+ * [3] ERROR: IOSAV detected an error and stopped on it, when using Stop-on-error.
+ * [4] PANIC: The refresh machine issued a Panic Refresh, and IOSAV was aborted.
+ * [5] RCOMP: RComp failure. Unused, consider Reserved.
+ * [6] Cleared with a new sequence, and set when done and refresh counter is drained.
*
- * DEFAULT_MCHBAR + 0x4284 + channel * 0x400: execute command queue
- * Starts to execute all queued commands
- * Bit 0 : start DRAM command execution
- * Bit 18-19 : number of queued commands - 1
*/
-#define RUN_QUEUE_4284(x) ((((x) - 1) << 18) | 1) // 0 <= x < 4
+/* length: [1..4] */
+#define IOSAV_RUN_ONCE(length) ((((length) - 1) << 18) | 1)
static void sfence(void)
{
@@ -73,10 +197,10 @@ static void sfence(void)
static void toggle_io_reset(void) {
/* toggle IO reset bit */
- u32 r32 = MCHBAR32(0x5030);
- MCHBAR32(0x5030) = r32 | 0x20;
+ u32 r32 = MCHBAR32(MC_INIT_STATE_G);
+ MCHBAR32(MC_INIT_STATE_G) = r32 | 0x20;
udelay(1);
- MCHBAR32(0x5030) = r32 & ~0x20;
+ MCHBAR32(MC_INIT_STATE_G) = r32 & ~0x20;
udelay(1);
}
@@ -171,7 +295,7 @@ void dram_find_common_params(ramctr_timing *ctrl)
die("No valid DIMMs found");
}
-void dram_xover(ramctr_timing * ctrl)
+void dram_xover(ramctr_timing *ctrl)
{
u32 reg;
int channel;
@@ -179,15 +303,13 @@ void dram_xover(ramctr_timing * ctrl)
FOR_ALL_CHANNELS {
// enable xover clk
reg = get_XOVER_CLK(ctrl->rankmap[channel]);
- printram("XOVER CLK [%x] = %x\n", 0xc14 + channel * 0x100,
- reg);
- MCHBAR32(0xc14 + channel * 0x100) = reg;
+ printram("XOVER CLK [%x] = %x\n", GDCRCKPICODE_ch(channel), reg);
+ MCHBAR32(GDCRCKPICODE_ch(channel)) = reg;
// enable xover ctl & xover cmd
reg = get_XOVER_CMD(ctrl->rankmap[channel]);
- printram("XOVER CMD [%x] = %x\n", 0x320c + channel * 0x100,
- reg);
- MCHBAR32(0x320c + channel * 0x100) = reg;
+ printram("XOVER CMD [%x] = %x\n", GDCRCMDPICODING_ch(channel), reg);
+ MCHBAR32(GDCRCMDPICODING_ch(channel)) = reg;
}
}
@@ -202,14 +324,14 @@ static void dram_odt_stretch(ramctr_timing *ctrl, int channel)
if (IS_SANDY_CPU(cpu) && IS_SANDY_CPU_C(cpu)) {
if (stretch == 2)
stretch = 3;
- addr = 0x401c + channel * 0x400;
+ addr = SCHED_SECOND_CBIT_ch(channel);
MCHBAR32_AND_OR(addr, 0xffffc3ff,
(stretch << 12) | (stretch << 10));
printk(RAM_DEBUG, "OTHP Workaround [%x] = %x\n", addr,
MCHBAR32(addr));
} else {
// OTHP
- addr = 0x400c + channel * 0x400;
+ addr = TC_OTHP_ch(channel);
MCHBAR32_AND_OR(addr, 0xfff0ffff,
(stretch << 16) | (stretch << 18));
printk(RAM_DEBUG, "OTHP [%x] = %x\n", addr, MCHBAR32(addr));
@@ -229,8 +351,8 @@ void dram_timing_regs(ramctr_timing *ctrl)
reg |= (ctrl->CAS << 8);
reg |= (ctrl->CWL << 12);
reg |= (ctrl->tRAS << 16);
- printram("DBP [%x] = %x\n", TC_DBP_C0 + channel * 0x400, reg);
- MCHBAR32(TC_DBP_C0 + channel * 0x400) = reg;
+ printram("DBP [%x] = %x\n", TC_DBP_ch(channel), reg);
+ MCHBAR32(TC_DBP_ch(channel)) = reg;
// RAP
reg = 0;
@@ -241,11 +363,11 @@ void dram_timing_regs(ramctr_timing *ctrl)
reg |= (ctrl->tFAW << 16);
reg |= (ctrl->tWR << 24);
reg |= (3 << 30);
- printram("RAP [%x] = %x\n", TC_RAP_C0 + channel * 0x400, reg);
- MCHBAR32(TC_RAP_C0 + channel * 0x400) = reg;
+ printram("RAP [%x] = %x\n", TC_RAP_ch(channel), reg);
+ MCHBAR32(TC_RAP_ch(channel)) = reg;
// OTHP
- addr = 0x400c + channel * 0x400;
+ addr = TC_OTHP_ch(channel);
reg = 0;
reg |= ctrl->tXPDLL;
reg |= (ctrl->tXP << 5);
@@ -271,10 +393,10 @@ void dram_timing_regs(ramctr_timing *ctrl)
reg = ((ctrl->tREFI & 0xffff) << 0) |
((ctrl->tRFC & 0x1ff) << 16) |
(((val32 / 1024) & 0x7f) << 25);
- printram("REFI [%x] = %x\n", TC_RFTP_C0 + channel * 0x400, reg);
- MCHBAR32(TC_RFTP_C0 + channel * 0x400) = reg;
+ printram("REFI [%x] = %x\n", TC_RFTP_ch(channel), reg);
+ MCHBAR32(TC_RFTP_ch(channel)) = reg;
- MCHBAR32_OR(TC_RFP_C0 + channel * 0x400, 0xff);
+ MCHBAR32_OR(TC_RFP_ch(channel), 0xff);
// SRFTP
reg = 0;
@@ -286,9 +408,9 @@ void dram_timing_regs(ramctr_timing *ctrl)
reg = (reg & ~0x3ff0000) | (val32 << 16);
val32 = ctrl->tMOD - 8;
reg = (reg & ~0xf0000000) | (val32 << 28);
- printram("SRFTP [%x] = %x\n", 0x42a4 + channel * 0x400,
+ printram("SRFTP [%x] = %x\n", TC_SRFTP_ch(channel),
reg);
- MCHBAR32(0x42a4 + channel * 0x400) = reg;
+ MCHBAR32(TC_SRFTP_ch(channel)) = reg;
}
}
@@ -336,7 +458,7 @@ void dram_dimm_mapping(ramctr_timing *ctrl)
}
}
-void dram_dimm_set_mapping(ramctr_timing * ctrl)
+void dram_dimm_set_mapping(ramctr_timing *ctrl)
{
int channel;
FOR_ALL_CHANNELS {
@@ -344,7 +466,7 @@ void dram_dimm_set_mapping(ramctr_timing * ctrl)
}
}
-void dram_zones(ramctr_timing * ctrl, int training)
+void dram_zones(ramctr_timing *ctrl, int training)
{
u32 reg, ch0size, ch1size;
u8 val;
@@ -359,18 +481,18 @@ void dram_zones(ramctr_timing * ctrl, int training)
}
if (ch0size >= ch1size) {
- reg = MCHBAR32(0x5014);
+ reg = MCHBAR32(MAD_ZR);
val = ch1size / 256;
reg = (reg & ~0xff000000) | val << 24;
reg = (reg & ~0xff0000) | (2 * val) << 16;
- MCHBAR32(0x5014) = reg;
+ MCHBAR32(MAD_ZR) = reg;
MCHBAR32(MAD_CHNL) = 0x24;
} else {
- reg = MCHBAR32(0x5014);
+ reg = MCHBAR32(MAD_ZR);
val = ch0size / 256;
reg = (reg & ~0xff000000) | val << 24;
reg = (reg & ~0xff0000) | (2 * val) << 16;
- MCHBAR32(0x5014) = reg;
+ MCHBAR32(MAD_ZR) = reg;
MCHBAR32(MAD_CHNL) = 0x21;
}
}
@@ -462,7 +584,7 @@ static unsigned int get_mmio_size(void)
return cfg->pci_mmio_size;
}
-void dram_memorymap(ramctr_timing * ctrl, int me_uma_size)
+void dram_memorymap(ramctr_timing *ctrl, int me_uma_size)
{
u32 reg, val, reclaim;
u32 tom, gfxstolen, gttsize;
@@ -615,69 +737,69 @@ void dram_memorymap(ramctr_timing * ctrl, int me_uma_size)
}
}
-static void wait_428c(int channel)
+static void wait_for_iosav(int channel)
{
while (1) {
- if (MCHBAR32(0x428c + channel * 0x400) & 0x50)
+ if (MCHBAR32(IOSAV_STATUS_ch(channel)) & 0x50)
return;
}
}
-static void write_reset(ramctr_timing * ctrl)
+static void write_reset(ramctr_timing *ctrl)
{
int channel, slotrank;
/* choose a populated channel. */
channel = (ctrl->rankmap[0]) ? 0 : 1;
- wait_428c(channel);
+ wait_for_iosav(channel);
/* choose a populated rank. */
slotrank = (ctrl->rankmap[channel] & 1) ? 0 : 2;
/* DRAM command ZQCS */
- MCHBAR32(0x4220 + channel * 0x400) = 0x0f003;
- MCHBAR32(0x4230 + channel * 0x400) = 0x80c01;
- MCHBAR32(0x4200 + channel * 0x400) = (slotrank << 24) | 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x80c01;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
// execute command queue - why is bit 22 set here?!
- MCHBAR32(0x4284 + channel * 0x400) = (1 << 22) | RUN_QUEUE_4284(1);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = (1 << 22) | IOSAV_RUN_ONCE(1);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
-void dram_jedecreset(ramctr_timing * ctrl)
+void dram_jedecreset(ramctr_timing *ctrl)
{
u32 reg;
int channel;
- while (!(MCHBAR32(0x5084) & 0x10000));
+ while (!(MCHBAR32(RCOMP_TIMER) & 0x10000));
do {
- reg = MCHBAR32(0x428c);
+ reg = MCHBAR32(IOSAV_STATUS_ch(0));
} while ((reg & 0x14) == 0);
// Set state of memory controller
reg = 0x112;
- MCHBAR32(0x5030) = reg;
- MCHBAR32(0x4ea0) = 0;
+ MCHBAR32(MC_INIT_STATE_G) = reg;
+ MCHBAR32(MC_INIT_STATE) = 0;
reg |= 2; //ddr reset
- MCHBAR32(0x5030) = reg;
+ MCHBAR32(MC_INIT_STATE_G) = reg;
// Assert dimm reset signal
- MCHBAR32_AND(0x5030, ~0x2);
+ MCHBAR32_AND(MC_INIT_STATE_G, ~0x2);
// Wait 200us
udelay(200);
// Deassert dimm reset signal
- MCHBAR32_OR(0x5030, 2);
+ MCHBAR32_OR(MC_INIT_STATE_G, 2);
// Wait 500us
udelay(500);
// Enable DCLK
- MCHBAR32_OR(0x5030, 4);
+ MCHBAR32_OR(MC_INIT_STATE_G, 4);
// XXX Wait 20ns
udelay(1);
@@ -685,13 +807,13 @@ void dram_jedecreset(ramctr_timing * ctrl)
FOR_ALL_CHANNELS {
// Set valid rank CKE
reg = ctrl->rankmap[channel];
- MCHBAR32(0x42a0 + channel * 0x400) = reg;
+ MCHBAR32(MC_INIT_STATE_ch(channel)) = reg;
// Wait 10ns for ranks to settle
//udelay(0.01);
reg = (reg & ~0xf0) | (ctrl->rankmap[channel] << 4);
- MCHBAR32(0x42a0 + channel * 0x400) = reg;
+ MCHBAR32(MC_INIT_STATE_ch(channel)) = reg;
// Write reset using a NOP
write_reset(ctrl);
@@ -714,7 +836,7 @@ static odtmap get_ODT(ramctr_timing *ctrl, u8 rank, int channel)
static void write_mrreg(ramctr_timing *ctrl, int channel, int slotrank,
int reg, u32 val)
{
- wait_428c(channel);
+ wait_for_iosav(channel);
if (ctrl->rank_mirror[channel][slotrank]) {
/* DDR3 Rank1 Address mirror
@@ -726,31 +848,31 @@ static void write_mrreg(ramctr_timing *ctrl, int channel, int slotrank,
}
/* DRAM command MRS */
- MCHBAR32(0x4220 + channel * 0x400) = 0x0f000;
- MCHBAR32(0x4230 + channel * 0x400) = 0x41001;
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x41001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | (reg << 20) | val | 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
/* DRAM command MRS */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f000;
- MCHBAR32(0x4234 + channel * 0x400) = 0x41001;
- MCHBAR32(0x4204 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x41001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) =
(slotrank << 24) | (reg << 20) | val | 0x60000;
- MCHBAR32(0x4214 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
/* DRAM command MRS */
- MCHBAR32(0x4228 + channel * 0x400) = 0x0f000;
- MCHBAR32(0x4238 + channel * 0x400) = 0x1001 | (ctrl->tMOD << 16);
- MCHBAR32(0x4208 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x0f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x1001 | (ctrl->tMOD << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
(slotrank << 24) | (reg << 20) | val | 0x60000;
- MCHBAR32(0x4218 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(3);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(3);
}
-static u32 make_mr0(ramctr_timing * ctrl, u8 rank)
+static u32 make_mr0(ramctr_timing *ctrl, u8 rank)
{
u16 mr0reg, mch_cas, mch_wr;
static const u8 mch_wr_t[12] = { 1, 2, 3, 4, 0, 5, 0, 6, 0, 7, 0, 0 };
@@ -848,7 +970,7 @@ static void dram_mr3(ramctr_timing *ctrl, u8 rank, int channel)
write_mrreg(ctrl, channel, rank, 3, 0);
}
-void dram_mrscommands(ramctr_timing * ctrl)
+void dram_mrscommands(ramctr_timing *ctrl)
{
u8 slotrank;
int channel;
@@ -870,51 +992,51 @@ void dram_mrscommands(ramctr_timing * ctrl)
}
/* DRAM command NOP */
- MCHBAR32(0x4e20) = 0x7;
- MCHBAR32(0x4e30) = 0xf1001;
- MCHBAR32(0x4e00) = 0x60002;
- MCHBAR32(0x4e10) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL(0)) = 0x7;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL(0)) = 0xf1001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR(0)) = 0x60002;
+ MCHBAR32(IOSAV_n_ADDR_UPD(0)) = 0;
/* DRAM command ZQCL */
- MCHBAR32(0x4e24) = 0x1f003;
- MCHBAR32(0x4e34) = 0x1901001;
- MCHBAR32(0x4e04) = 0x60400;
- MCHBAR32(0x4e14) = 0x288;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL(1)) = 0x1f003;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL(1)) = 0x1901001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR(1)) = 0x60400;
+ MCHBAR32(IOSAV_n_ADDR_UPD(1)) = 0x288;
// execute command queue on all channels? Why isn't bit 0 set here?
- MCHBAR32(0x4e84) = 0x40004;
+ MCHBAR32(IOSAV_SEQ_CTL) = 0x40004;
// Drain
FOR_ALL_CHANNELS {
// Wait for ref drained
- wait_428c(channel);
+ wait_for_iosav(channel);
}
// Refresh enable
- MCHBAR32_OR(0x5030, 8);
+ MCHBAR32_OR(MC_INIT_STATE_G, 8);
FOR_ALL_POPULATED_CHANNELS {
- MCHBAR32_AND(0x4020 + channel * 0x400, ~0x200000);
+ MCHBAR32_AND(SCHED_CBIT_ch(channel), ~0x200000);
- wait_428c(channel);
+ wait_for_iosav(channel);
slotrank = (ctrl->rankmap[channel] & 1) ? 0 : 2;
// Drain
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command ZQCS */
- MCHBAR32(0x4220 + channel * 0x400) = 0x0f003;
- MCHBAR32(0x4230 + channel * 0x400) = 0x659001;
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x659001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0x3e0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x3e0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(1);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
// Drain
- wait_428c(channel);
+ wait_for_iosav(channel);
}
}
@@ -924,81 +1046,81 @@ static const u32 lane_registers[] = {
0x0800
};
-void program_timings(ramctr_timing * ctrl, int channel)
+void program_timings(ramctr_timing *ctrl, int channel)
{
- u32 reg32, reg_4024, reg_c14, reg_c18, reg_io_latency;
+ u32 reg32, reg_roundtrip_latency, reg_pi_code, reg_logic_delay, reg_io_latency;
int lane;
int slotrank, slot;
int full_shift = 0;
- u16 slot320c[NUM_SLOTS];
+ u16 pi_coding_ctrl[NUM_SLOTS];
FOR_ALL_POPULATED_RANKS {
- if (full_shift < -ctrl->timings[channel][slotrank].val_320c)
- full_shift = -ctrl->timings[channel][slotrank].val_320c;
+ if (full_shift < -ctrl->timings[channel][slotrank].pi_coding)
+ full_shift = -ctrl->timings[channel][slotrank].pi_coding;
}
for (slot = 0; slot < NUM_SLOTS; slot++)
switch ((ctrl->rankmap[channel] >> (2 * slot)) & 3) {
case 0:
default:
- slot320c[slot] = 0x7f;
+ pi_coding_ctrl[slot] = 0x7f;
break;
case 1:
- slot320c[slot] =
- ctrl->timings[channel][2 * slot + 0].val_320c +
+ pi_coding_ctrl[slot] =
+ ctrl->timings[channel][2 * slot + 0].pi_coding +
full_shift;
break;
case 2:
- slot320c[slot] =
- ctrl->timings[channel][2 * slot + 1].val_320c +
+ pi_coding_ctrl[slot] =
+ ctrl->timings[channel][2 * slot + 1].pi_coding +
full_shift;
break;
case 3:
- slot320c[slot] =
- (ctrl->timings[channel][2 * slot].val_320c +
- ctrl->timings[channel][2 * slot + 1].val_320c) / 2 +
+ pi_coding_ctrl[slot] =
+ (ctrl->timings[channel][2 * slot].pi_coding +
+ ctrl->timings[channel][2 * slot + 1].pi_coding) / 2 +
full_shift;
break;
}
/* enable CMD XOVER */
reg32 = get_XOVER_CMD(ctrl->rankmap[channel]);
- reg32 |= ((slot320c[0] & 0x3f) << 6) | ((slot320c[0] & 0x40) << 9);
- reg32 |= (slot320c[1] & 0x7f) << 18;
+ reg32 |= ((pi_coding_ctrl[0] & 0x3f) << 6) | ((pi_coding_ctrl[0] & 0x40) << 9);
+ reg32 |= (pi_coding_ctrl[1] & 0x7f) << 18;
reg32 |= (full_shift & 0x3f) | ((full_shift & 0x40) << 6);
- MCHBAR32(0x320c + channel * 0x100) = reg32;
+ MCHBAR32(GDCRCMDPICODING_ch(channel)) = reg32;
/* enable CLK XOVER */
- reg_c14 = get_XOVER_CLK(ctrl->rankmap[channel]);
- reg_c18 = 0;
+ reg_pi_code = get_XOVER_CLK(ctrl->rankmap[channel]);
+ reg_logic_delay = 0;
FOR_ALL_POPULATED_RANKS {
int shift =
- ctrl->timings[channel][slotrank].val_320c + full_shift;
- int offset_val_c14;
+ ctrl->timings[channel][slotrank].pi_coding + full_shift;
+ int offset_pi_code;
if (shift < 0)
shift = 0;
- offset_val_c14 = ctrl->reg_c14_offset + shift;
+ offset_pi_code = ctrl->pi_code_offset + shift;
/* set CLK phase shift */
- reg_c14 |= (offset_val_c14 & 0x3f) << (6 * slotrank);
- reg_c18 |= ((offset_val_c14 >> 6) & 1) << slotrank;
+ reg_pi_code |= (offset_pi_code & 0x3f) << (6 * slotrank);
+ reg_logic_delay |= ((offset_pi_code >> 6) & 1) << slotrank;
}
- MCHBAR32(0xc14 + channel * 0x100) = reg_c14;
- MCHBAR32(0xc18 + channel * 0x100) = reg_c18;
+ MCHBAR32(GDCRCKPICODE_ch(channel)) = reg_pi_code;
+ MCHBAR32(GDCRCKLOGICDELAY_ch(channel)) = reg_logic_delay;
- reg_io_latency = MCHBAR32(SC_IO_LATENCY_C0 + channel * 0x400);
+ reg_io_latency = MCHBAR32(SC_IO_LATENCY_ch(channel));
reg_io_latency &= 0xffff0000;
- reg_4024 = 0;
+ reg_roundtrip_latency = 0;
FOR_ALL_POPULATED_RANKS {
int post_timA_min_high = 7, post_timA_max_high = 0;
int pre_timA_min_high = 7, pre_timA_max_high = 0;
int shift_402x = 0;
int shift =
- ctrl->timings[channel][slotrank].val_320c + full_shift;
+ ctrl->timings[channel][slotrank].pi_coding + full_shift;
if (shift < 0)
shift = 0;
@@ -1028,8 +1150,8 @@ void program_timings(ramctr_timing * ctrl, int channel)
reg_io_latency |=
(ctrl->timings[channel][slotrank].io_latency + shift_402x -
post_timA_min_high) << (4 * slotrank);
- reg_4024 |=
- (ctrl->timings[channel][slotrank].val_4024 +
+ reg_roundtrip_latency |=
+ (ctrl->timings[channel][slotrank].roundtrip_latency +
shift_402x) << (8 * slotrank);
FOR_ALL_LANES {
@@ -1064,49 +1186,49 @@ void program_timings(ramctr_timing * ctrl, int channel)
timC + shift) & 0x40) << 13));
}
}
- MCHBAR32(0x4024 + channel * 0x400) = reg_4024;
- MCHBAR32(SC_IO_LATENCY_C0 + channel * 0x400) = reg_io_latency;
+ MCHBAR32(SC_ROUNDT_LAT_ch(channel)) = reg_roundtrip_latency;
+ MCHBAR32(SC_IO_LATENCY_ch(channel)) = reg_io_latency;
}
-static void test_timA(ramctr_timing * ctrl, int channel, int slotrank)
+static void test_timA(ramctr_timing *ctrl, int channel, int slotrank)
{
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command MRS
* write MR3 MPR enable
* in this mode only RD and RDA are allowed
* all reads return a predefined pattern */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f000;
- MCHBAR32(0x4230 + channel * 0x400) = (0xc01 | (ctrl->tMOD << 16));
- MCHBAR32(0x4200 + channel * 0x400) = (slotrank << 24) | 0x360004;
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = (0xc01 | (ctrl->tMOD << 16));
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x360004;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4234 + channel * 0x400) = 0x4040c01;
- MCHBAR32(0x4204 + channel * 0x400) = (slotrank << 24);
- MCHBAR32(0x4214 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4040c01;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24);
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4238 + channel * 0x400) = 0x100f | ((ctrl->CAS + 36) << 16);
- MCHBAR32(0x4208 + channel * 0x400) = (slotrank << 24) | 0x60000;
- MCHBAR32(0x4218 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x100f | ((ctrl->CAS + 36) << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24) | 0x60000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
/* DRAM command MRS
* write MR3 MPR disable */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f000;
- MCHBAR32(0x423c + channel * 0x400) = 0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x420c + channel * 0x400) = (slotrank << 24) | 0x360000;
- MCHBAR32(0x421c + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0xc01 | (ctrl->tMOD << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x360000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
-static int does_lane_work(ramctr_timing * ctrl, int channel, int slotrank,
+static int does_lane_work(ramctr_timing *ctrl, int channel, int slotrank,
int lane)
{
u32 timA = ctrl->timings[channel][slotrank].lanes[lane].timA;
@@ -1156,7 +1278,7 @@ static struct run get_longest_zero_run(int *seq, int sz)
return ret;
}
-static void discover_timA_coarse(ramctr_timing * ctrl, int channel,
+static void discover_timA_coarse(ramctr_timing *ctrl, int channel,
int slotrank, int *upperA)
{
int timA;
@@ -1187,7 +1309,7 @@ static void discover_timA_coarse(ramctr_timing * ctrl, int channel,
}
}
-static void discover_timA_fine(ramctr_timing * ctrl, int channel, int slotrank,
+static void discover_timA_fine(ramctr_timing *ctrl, int channel, int slotrank,
int *upperA)
{
int timA_delta;
@@ -1251,12 +1373,12 @@ static int discover_402x(ramctr_timing *ctrl, int channel, int slotrank,
if (all_works)
return 0;
if (!some_works) {
- if (ctrl->timings[channel][slotrank].val_4024 < 2) {
+ if (ctrl->timings[channel][slotrank].roundtrip_latency < 2) {
printk(BIOS_EMERG, "402x discovery failed (1): %d, %d\n",
channel, slotrank);
return MAKE_ERR;
}
- ctrl->timings[channel][slotrank].val_4024 -= 2;
+ ctrl->timings[channel][slotrank].roundtrip_latency -= 2;
printram("4024 -= 2;\n");
continue;
}
@@ -1282,7 +1404,7 @@ struct timA_minmax {
int timA_min_high, timA_max_high;
};
-static void pre_timA_change(ramctr_timing * ctrl, int channel, int slotrank,
+static void pre_timA_change(ramctr_timing *ctrl, int channel, int slotrank,
struct timA_minmax *mnmx)
{
int lane;
@@ -1303,7 +1425,7 @@ static void pre_timA_change(ramctr_timing * ctrl, int channel, int slotrank,
}
}
-static void post_timA_change(ramctr_timing * ctrl, int channel, int slotrank,
+static void post_timA_change(ramctr_timing *ctrl, int channel, int slotrank,
struct timA_minmax *mnmx)
{
struct timA_minmax post;
@@ -1322,7 +1444,7 @@ static void post_timA_change(ramctr_timing * ctrl, int channel, int slotrank,
shift_402x = 0;
ctrl->timings[channel][slotrank].io_latency += shift_402x;
- ctrl->timings[channel][slotrank].val_4024 += shift_402x;
+ ctrl->timings[channel][slotrank].roundtrip_latency += shift_402x;
printram("4024 += %d;\n", shift_402x);
printram("4028 += %d;\n", shift_402x);
}
@@ -1344,7 +1466,7 @@ static void post_timA_change(ramctr_timing * ctrl, int channel, int slotrank,
* Once the controller has detected this pattern a bit in the result register is
* set for the current phase shift.
*/
-int read_training(ramctr_timing * ctrl)
+int read_training(ramctr_timing *ctrl)
{
int channel, slotrank, lane;
int err;
@@ -1354,21 +1476,21 @@ int read_training(ramctr_timing * ctrl)
int upperA[NUM_LANES];
struct timA_minmax mnmx;
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command PREA */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f002;
- MCHBAR32(0x4230 + channel * 0x400) = 0xc01 | (ctrl->tRP << 16);
- MCHBAR32(0x4200 + channel * 0x400) = (slotrank << 24) | 0x60400;
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f002;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | (ctrl->tRP << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60400;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(1);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
- MCHBAR32(0x3400) = (slotrank << 2) | 0x8001;
+ MCHBAR32(GDCRTRAININGMOD) = (slotrank << 2) | 0x8001;
ctrl->timings[channel][slotrank].io_latency = 4;
- ctrl->timings[channel][slotrank].val_4024 = 55;
+ ctrl->timings[channel][slotrank].roundtrip_latency = 55;
program_timings(ctrl, channel);
discover_timA_coarse(ctrl, channel, slotrank, upperA);
@@ -1393,7 +1515,7 @@ int read_training(ramctr_timing * ctrl)
}
} else if (some_high) {
- ctrl->timings[channel][slotrank].val_4024++;
+ ctrl->timings[channel][slotrank].roundtrip_latency++;
ctrl->timings[channel][slotrank].io_latency++;
printram("4024++;\n");
printram("4028++;\n");
@@ -1424,7 +1546,7 @@ int read_training(ramctr_timing * ctrl)
post_timA_change(ctrl, channel, slotrank, &mnmx);
printram("4/8: %d, %d, %x, %x\n", channel, slotrank,
- ctrl->timings[channel][slotrank].val_4024,
+ ctrl->timings[channel][slotrank].roundtrip_latency,
ctrl->timings[channel][slotrank].io_latency);
printram("final results:\n");
@@ -1433,7 +1555,7 @@ int read_training(ramctr_timing * ctrl)
lane,
ctrl->timings[channel][slotrank].lanes[lane].timA);
- MCHBAR32(0x3400) = 0;
+ MCHBAR32(GDCRTRAININGMOD) = 0;
toggle_io_reset();
}
@@ -1442,85 +1564,85 @@ int read_training(ramctr_timing * ctrl)
program_timings(ctrl, channel);
}
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
- MCHBAR32(0x4080 + channel * 0x400 + lane * 4) = 0;
+ MCHBAR32(IOSAV_By_BW_MASK_ch(channel, lane)) = 0;
}
return 0;
}
-static void test_timC(ramctr_timing * ctrl, int channel, int slotrank)
+static void test_timC(ramctr_timing *ctrl, int channel, int slotrank)
{
int lane;
FOR_ALL_LANES {
- MCHBAR32(0x4340 + channel * 0x400 + lane * 4) = 0;
- MCHBAR32(0x4140 + channel * 0x400 + lane * 4);
+ MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane)) = 0;
+ MCHBAR32(IOSAV_By_BW_SERROR_C_ch(channel, lane));
}
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command ACT */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f006;
- MCHBAR32(0x4230 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f006;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
(MAX((ctrl->tFAW >> 2) + 1, ctrl->tRRD) << 10)
| 4 | (ctrl->tRCD << 16);
- MCHBAR32(0x4200 + channel * 0x400) = (slotrank << 24) | (6 << 16);
- MCHBAR32(0x4210 + channel * 0x400) = 0x244;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | (6 << 16);
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x244;
/* DRAM command NOP */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f207;
- MCHBAR32(0x4234 + channel * 0x400) = 0x8041001;
- MCHBAR32(0x4204 + channel * 0x400) = (slotrank << 24) | 8;
- MCHBAR32(0x4214 + channel * 0x400) = 0x3e0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f207;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x8041001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24) | 8;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x3e0;
/* DRAM command WR */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f201;
- MCHBAR32(0x4238 + channel * 0x400) = 0x80411f4;
- MCHBAR32(0x4208 + channel * 0x400) = slotrank << 24;
- MCHBAR32(0x4218 + channel * 0x400) = 0x242;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f201;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x80411f4;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = slotrank << 24;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x242;
/* DRAM command NOP */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f207;
- MCHBAR32(0x423c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f207;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
0x8000c01 | ((ctrl->CWL + ctrl->tWTR + 5) << 16);
- MCHBAR32(0x420c + channel * 0x400) = (slotrank << 24) | 8;
- MCHBAR32(0x421c + channel * 0x400) = 0x3e0;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 8;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0x3e0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command PREA */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f002;
- MCHBAR32(0x4230 + channel * 0x400) = 0xc01 | (ctrl->tRP << 16);
- MCHBAR32(0x4200 + channel * 0x400) = (slotrank << 24) | 0x60400;
- MCHBAR32(0x4210 + channel * 0x400) = 0x240;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f002;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | (ctrl->tRP << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60400;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x240;
/* DRAM command ACT */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f006;
- MCHBAR32(0x4234 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f006;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) =
(MAX(ctrl->tRRD, (ctrl->tFAW >> 2) + 1) << 10)
| 8 | (ctrl->CAS << 16);
- MCHBAR32(0x4204 + channel * 0x400) = (slotrank << 24) | 0x60000;
- MCHBAR32(0x4214 + channel * 0x400) = 0x244;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24) | 0x60000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x244;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4238 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
0x40011f4 | (MAX(ctrl->tRTP, 8) << 16);
- MCHBAR32(0x4208 + channel * 0x400) = (slotrank << 24);
- MCHBAR32(0x4218 + channel * 0x400) = 0x242;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24);
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x242;
/* DRAM command PREA */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f002;
- MCHBAR32(0x423c + channel * 0x400) = 0xc01 | (ctrl->tRP << 16);
- MCHBAR32(0x420c + channel * 0x400) = (slotrank << 24) | 0x60400;
- MCHBAR32(0x421c + channel * 0x400) = 0x240;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f002;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0xc01 | (ctrl->tRP << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x60400;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0x240;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
static void timC_threshold_process(int *data, const int count)
@@ -1547,16 +1669,16 @@ static int discover_timC(ramctr_timing *ctrl, int channel, int slotrank)
int statistics[NUM_LANES][MAX_TIMC + 1];
int lane;
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command PREA */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f002;
- MCHBAR32(0x4230 + channel * 0x400) = 0xc01 | (ctrl->tRP << 16);
- MCHBAR32(0x4200 + channel * 0x400) = (slotrank << 24) | 0x60400;
- MCHBAR32(0x4210 + channel * 0x400) = 0x240;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f002;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | (ctrl->tRP << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60400;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x240;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(1);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
for (timC = 0; timC <= MAX_TIMC; timC++) {
FOR_ALL_LANES ctrl->timings[channel][slotrank].lanes[lane].
@@ -1567,7 +1689,7 @@ static int discover_timC(ramctr_timing *ctrl, int channel, int slotrank)
FOR_ALL_LANES {
statistics[lane][timC] =
- MCHBAR32(0x4340 + channel * 0x400 + lane * 4);
+ MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane));
}
}
FOR_ALL_LANES {
@@ -1596,7 +1718,7 @@ static int discover_timC(ramctr_timing *ctrl, int channel, int slotrank)
return 0;
}
-static int get_precedening_channels(ramctr_timing * ctrl, int target_channel)
+static int get_precedening_channels(ramctr_timing *ctrl, int target_channel)
{
int channel, ret = 0;
FOR_ALL_POPULATED_CHANNELS if (channel < target_channel)
@@ -1604,7 +1726,7 @@ static int get_precedening_channels(ramctr_timing * ctrl, int target_channel)
return ret;
}
-static void fill_pattern0(ramctr_timing * ctrl, int channel, u32 a, u32 b)
+static void fill_pattern0(ramctr_timing *ctrl, int channel, u32 a, u32 b)
{
unsigned int j;
unsigned int channel_offset =
@@ -1614,7 +1736,7 @@ static void fill_pattern0(ramctr_timing * ctrl, int channel, u32 a, u32 b)
sfence();
}
-static int num_of_channels(const ramctr_timing * ctrl)
+static int num_of_channels(const ramctr_timing *ctrl)
{
int ret = 0;
int channel;
@@ -1622,7 +1744,7 @@ static int num_of_channels(const ramctr_timing * ctrl)
return ret;
}
-static void fill_pattern1(ramctr_timing * ctrl, int channel)
+static void fill_pattern1(ramctr_timing *ctrl, int channel)
{
unsigned int j;
unsigned int channel_offset =
@@ -1635,7 +1757,7 @@ static void fill_pattern1(ramctr_timing * ctrl, int channel)
sfence();
}
-static void precharge(ramctr_timing * ctrl)
+static void precharge(ramctr_timing *ctrl)
{
int channel, slotrank, lane;
@@ -1650,47 +1772,47 @@ static void precharge(ramctr_timing * ctrl)
program_timings(ctrl, channel);
FOR_ALL_POPULATED_RANKS {
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command MRS
* write MR3 MPR enable
* in this mode only RD and RDA are allowed
* all reads return a predefined pattern */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f000;
- MCHBAR32(0x4230 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | 0x360004;
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4234 + channel * 0x400) = 0x4041003;
- MCHBAR32(0x4204 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4041003;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) =
(slotrank << 24) | 0;
- MCHBAR32(0x4214 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4238 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
0x1001 | ((ctrl->CAS + 8) << 16);
- MCHBAR32(0x4208 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
(slotrank << 24) | 0x60000;
- MCHBAR32(0x4218 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
/* DRAM command MRS
* write MR3 MPR disable */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f000;
- MCHBAR32(0x423c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x420c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
(slotrank << 24) | 0x360000;
- MCHBAR32(0x421c + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
@@ -1703,75 +1825,75 @@ static void precharge(ramctr_timing * ctrl)
program_timings(ctrl, channel);
FOR_ALL_POPULATED_RANKS {
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command MRS
* write MR3 MPR enable
* in this mode only RD and RDA are allowed
* all reads return a predefined pattern */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f000;
- MCHBAR32(0x4230 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | 0x360004;
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4234 + channel * 0x400) = 0x4041003;
- MCHBAR32(0x4204 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4041003;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) =
(slotrank << 24) | 0;
- MCHBAR32(0x4214 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4238 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
0x1001 | ((ctrl->CAS + 8) << 16);
- MCHBAR32(0x4208 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
(slotrank << 24) | 0x60000;
- MCHBAR32(0x4218 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
/* DRAM command MRS
* write MR3 MPR disable */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f000;
- MCHBAR32(0x423c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x420c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
(slotrank << 24) | 0x360000;
- MCHBAR32(0x421c + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
}
}
-static void test_timB(ramctr_timing * ctrl, int channel, int slotrank)
+static void test_timB(ramctr_timing *ctrl, int channel, int slotrank)
{
/* enable DQs on this slotrank */
write_mrreg(ctrl, channel, slotrank, 1,
0x80 | make_mr1(ctrl, slotrank, channel));
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command NOP */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f207;
- MCHBAR32(0x4230 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f207;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
0x8000c01 | ((ctrl->CWL + ctrl->tWLO) << 16);
- MCHBAR32(0x4200 + channel * 0x400) = 8 | (slotrank << 24);
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = 8 | (slotrank << 24);
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
/* DRAM command NOP */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f107;
- MCHBAR32(0x4234 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f107;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) =
0x4000c01 | ((ctrl->CAS + 38) << 16);
- MCHBAR32(0x4204 + channel * 0x400) = (slotrank << 24) | 4;
- MCHBAR32(0x4214 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24) | 4;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(2);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(2);
- wait_428c(channel);
+ wait_for_iosav(channel);
/* disable DQs on this slotrank */
write_mrreg(ctrl, channel, slotrank, 1,
@@ -1784,7 +1906,7 @@ static int discover_timB(ramctr_timing *ctrl, int channel, int slotrank)
int statistics[NUM_LANES][128];
int lane;
- MCHBAR32(0x3400) = 0x108052 | (slotrank << 2);
+ MCHBAR32(GDCRTRAININGMOD) = 0x108052 | (slotrank << 2);
for (timB = 0; timB < 128; timB++) {
FOR_ALL_LANES {
@@ -1850,77 +1972,77 @@ static int get_timB_high_adjust(u64 val)
return 8;
}
-static void adjust_high_timB(ramctr_timing * ctrl)
+static void adjust_high_timB(ramctr_timing *ctrl)
{
int channel, slotrank, lane, old;
- MCHBAR32(0x3400) = 0x200;
+ MCHBAR32(GDCRTRAININGMOD) = 0x200;
FOR_ALL_POPULATED_CHANNELS {
fill_pattern1(ctrl, channel);
- MCHBAR32(0x4288 + channel * 0x400) = 1;
+ MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 1;
}
FOR_ALL_POPULATED_CHANNELS FOR_ALL_POPULATED_RANKS {
- MCHBAR32(0x4288 + channel * 0x400) = 0x10001;
+ MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 0x10001;
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command ACT */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f006;
- MCHBAR32(0x4230 + channel * 0x400) = 0xc01 | (ctrl->tRCD << 16);
- MCHBAR32(0x4200 + channel * 0x400) = (slotrank << 24) | 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f006;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | (ctrl->tRCD << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
/* DRAM command NOP */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f207;
- MCHBAR32(0x4234 + channel * 0x400) = 0x8040c01;
- MCHBAR32(0x4204 + channel * 0x400) = (slotrank << 24) | 0x8;
- MCHBAR32(0x4214 + channel * 0x400) = 0x3e0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f207;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x8040c01;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24) | 0x8;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x3e0;
/* DRAM command WR */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f201;
- MCHBAR32(0x4238 + channel * 0x400) = 0x8041003;
- MCHBAR32(0x4208 + channel * 0x400) = (slotrank << 24);
- MCHBAR32(0x4218 + channel * 0x400) = 0x3e2;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f201;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x8041003;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24);
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x3e2;
/* DRAM command NOP */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f207;
- MCHBAR32(0x423c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f207;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
0x8000c01 | ((ctrl->CWL + ctrl->tWTR + 5) << 16);
- MCHBAR32(0x420c + channel * 0x400) = (slotrank << 24) | 0x8;
- MCHBAR32(0x421c + channel * 0x400) = 0x3e0;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x8;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0x3e0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command PREA */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f002;
- MCHBAR32(0x4230 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f002;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
0xc01 | ((ctrl->tRP) << 16);
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | 0x60400;
- MCHBAR32(0x4210 + channel * 0x400) = 0x240;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x240;
/* DRAM command ACT */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f006;
- MCHBAR32(0x4234 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f006;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) =
0xc01 | ((ctrl->tRCD) << 16);
- MCHBAR32(0x4204 + channel * 0x400) = (slotrank << 24) | 0x60000;
- MCHBAR32(0x4214 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24) | 0x60000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x3f105;
- MCHBAR32(0x4238 + channel * 0x400) = 0x4000c01 | ((ctrl->tRP +
- ctrl->timings[channel][slotrank].val_4024 +
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x3f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x4000c01 | ((ctrl->tRP +
+ ctrl->timings[channel][slotrank].roundtrip_latency +
ctrl->timings[channel][slotrank].io_latency) << 16);
- MCHBAR32(0x4208 + channel * 0x400) = (slotrank << 24) | 0x60008;
- MCHBAR32(0x4218 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24) | 0x60008;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(3);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(3);
- wait_428c(channel);
+ wait_for_iosav(channel);
FOR_ALL_LANES {
u64 res = MCHBAR32(lane_registers[lane] +
channel * 0x100 + 4);
@@ -1937,28 +2059,28 @@ static void adjust_high_timB(ramctr_timing * ctrl)
timB);
}
}
- MCHBAR32(0x3400) = 0;
+ MCHBAR32(GDCRTRAININGMOD) = 0;
}
-static void write_op(ramctr_timing * ctrl, int channel)
+static void write_op(ramctr_timing *ctrl, int channel)
{
int slotrank;
- wait_428c(channel);
+ wait_for_iosav(channel);
/* choose an existing rank. */
slotrank = !(ctrl->rankmap[channel] & 1) ? 2 : 0;
/* DRAM command ACT */
- MCHBAR32(0x4220 + channel * 0x400) = 0x0f003;
- MCHBAR32(0x4230 + channel * 0x400) = 0x41001;
- MCHBAR32(0x4200 + channel * 0x400) = (slotrank << 24) | 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0x3e0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x41001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x3e0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(1);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
/* Compensate the skew between CMD/ADDR/CLK and DQ/DQS lanes.
@@ -1973,21 +2095,21 @@ static void write_op(ramctr_timing * ctrl, int channel)
* In this mode the DRAM-chip samples the CLK on every DQS edge and feeds back the
* sampled value on the data lanes (DQs).
*/
-int write_training(ramctr_timing * ctrl)
+int write_training(ramctr_timing *ctrl)
{
int channel, slotrank, lane;
int err;
FOR_ALL_POPULATED_CHANNELS
- MCHBAR32_OR(0x4008 + channel * 0x400, 0x8000000);
+ MCHBAR32_OR(TC_RWP_ch(channel), 0x8000000);
FOR_ALL_POPULATED_CHANNELS {
write_op(ctrl, channel);
- MCHBAR32_OR(0x4020 + channel * 0x400, 0x200000);
+ MCHBAR32_OR(SCHED_CBIT_ch(channel), 0x200000);
}
/* refresh disable */
- MCHBAR32_AND(0x5030, ~8);
+ MCHBAR32_AND(MC_INIT_STATE_G, ~8);
FOR_ALL_POPULATED_CHANNELS {
write_op(ctrl, channel);
}
@@ -2000,7 +2122,7 @@ int write_training(ramctr_timing * ctrl)
write_mrreg(ctrl, channel, slotrank, 1,
make_mr1(ctrl, slotrank, channel) | 0x1080);
- MCHBAR32(0x3400) = 0x108052;
+ MCHBAR32(GDCRTRAININGMOD) = 0x108052;
toggle_io_reset();
@@ -2016,29 +2138,29 @@ int write_training(ramctr_timing * ctrl)
write_mrreg(ctrl, channel,
slotrank, 1, make_mr1(ctrl, slotrank, channel));
- MCHBAR32(0x3400) = 0;
+ MCHBAR32(GDCRTRAININGMOD) = 0;
FOR_ALL_POPULATED_CHANNELS
- wait_428c(channel);
+ wait_for_iosav(channel);
/* refresh enable */
- MCHBAR32_OR(0x5030, 8);
+ MCHBAR32_OR(MC_INIT_STATE_G, 8);
FOR_ALL_POPULATED_CHANNELS {
- MCHBAR32_AND(0x4020 + channel * 0x400, ~0x00200000);
- MCHBAR32(0x428c + channel * 0x400);
- wait_428c(channel);
+ MCHBAR32_AND(SCHED_CBIT_ch(channel), ~0x00200000);
+ MCHBAR32(IOSAV_STATUS_ch(channel));
+ wait_for_iosav(channel);
/* DRAM command ZQCS */
- MCHBAR32(0x4220 + channel * 0x400) = 0x0f003;
- MCHBAR32(0x4230 + channel * 0x400) = 0x659001;
- MCHBAR32(0x4200 + channel * 0x400) = 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0x3e0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x659001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = 0x60000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x3e0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(1);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
toggle_io_reset();
@@ -2048,12 +2170,12 @@ int write_training(ramctr_timing * ctrl)
printram("CPF\n");
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
- MCHBAR32_AND(0x4080 + channel * 0x400 + lane * 4, 0);
+ MCHBAR32_AND(IOSAV_By_BW_MASK_ch(channel, lane), 0);
}
FOR_ALL_POPULATED_CHANNELS {
fill_pattern0(ctrl, channel, 0xaaaaaaaa, 0x55555555);
- MCHBAR32(0x4288 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 0;
}
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS {
@@ -2072,12 +2194,12 @@ int write_training(ramctr_timing * ctrl)
program_timings(ctrl, channel);
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
- MCHBAR32_AND(0x4080 + channel * 0x400 + lane * 4, 0);
+ MCHBAR32_AND(IOSAV_By_BW_MASK_ch(channel, lane), 0);
}
return 0;
}
-static int test_320c(ramctr_timing * ctrl, int channel, int slotrank)
+static int test_320c(ramctr_timing *ctrl, int channel, int slotrank)
{
struct ram_rank_timings saved_rt = ctrl->timings[channel][slotrank];
int timC_delta;
@@ -2092,49 +2214,49 @@ static int test_320c(ramctr_timing * ctrl, int channel, int slotrank)
}
program_timings(ctrl, channel);
FOR_ALL_LANES {
- MCHBAR32(4 * lane + 0x4f40) = 0;
+ MCHBAR32(IOSAV_By_ERROR_COUNT(lane)) = 0;
}
- MCHBAR32(0x4288 + channel * 0x400) = 0x1f;
+ MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 0x1f;
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command ACT */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f006;
- MCHBAR32(0x4230 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f006;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
((MAX(ctrl->tRRD, (ctrl->tFAW >> 2) + 1)) << 10)
| 8 | (ctrl->tRCD << 16);
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | ctr | 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0x244;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x244;
/* DRAM command WR */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f201;
- MCHBAR32(0x4234 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f201;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) =
0x8001020 | ((ctrl->CWL + ctrl->tWTR + 8) << 16);
- MCHBAR32(0x4204 + channel * 0x400) = (slotrank << 24);
- MCHBAR32(0x4244 + channel * 0x400) = 0x389abcd;
- MCHBAR32(0x4214 + channel * 0x400) = 0x20e42;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = (slotrank << 24);
+ MCHBAR32(IOSAV_n_ADDRESS_LFSR_ch(channel, 1)) = 0x389abcd;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x20e42;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4238 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
0x4001020 | (MAX(ctrl->tRTP, 8) << 16);
- MCHBAR32(0x4208 + channel * 0x400) = (slotrank << 24);
- MCHBAR32(0x4248 + channel * 0x400) = 0x389abcd;
- MCHBAR32(0x4218 + channel * 0x400) = 0x20e42;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24);
+ MCHBAR32(IOSAV_n_ADDRESS_LFSR_ch(channel, 2)) = 0x389abcd;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x20e42;
/* DRAM command PRE */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f002;
- MCHBAR32(0x423c + channel * 0x400) = 0xf1001;
- MCHBAR32(0x420c + channel * 0x400) = (slotrank << 24) | 0x60400;
- MCHBAR32(0x421c + channel * 0x400) = 0x240;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f002;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0xf1001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x60400;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0x240;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
FOR_ALL_LANES {
- u32 r32 = MCHBAR32(0x4340 + channel * 0x400 + lane * 4);
+ u32 r32 = MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane));
if (r32 == 0)
lanes_ok |= 1 << lane;
@@ -2151,7 +2273,7 @@ static int test_320c(ramctr_timing * ctrl, int channel, int slotrank)
#include "raminit_patterns.h"
-static void fill_pattern5(ramctr_timing * ctrl, int channel, int patno)
+static void fill_pattern5(ramctr_timing *ctrl, int channel, int patno)
{
unsigned int i, j;
unsigned int channel_offset =
@@ -2181,47 +2303,47 @@ static void fill_pattern5(ramctr_timing * ctrl, int channel, int patno)
}
}
-static void reprogram_320c(ramctr_timing * ctrl)
+static void reprogram_320c(ramctr_timing *ctrl)
{
int channel, slotrank;
FOR_ALL_POPULATED_CHANNELS {
- wait_428c(channel);
+ wait_for_iosav(channel);
/* choose an existing rank. */
slotrank = !(ctrl->rankmap[channel] & 1) ? 2 : 0;
/* DRAM command ZQCS */
- MCHBAR32(0x4220 + channel * 0x400) = 0x0f003;
- MCHBAR32(0x4230 + channel * 0x400) = 0x41001;
- MCHBAR32(0x4200 + channel * 0x400) = (slotrank << 24) | 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0x3e0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x41001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x3e0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(1);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
- wait_428c(channel);
- MCHBAR32_OR(0x4020 + channel * 0x400, 0x200000);
+ wait_for_iosav(channel);
+ MCHBAR32_OR(SCHED_CBIT_ch(channel), 0x200000);
}
/* refresh disable */
- MCHBAR32_AND(0x5030, ~8);
+ MCHBAR32_AND(MC_INIT_STATE_G, ~8);
FOR_ALL_POPULATED_CHANNELS {
- wait_428c(channel);
+ wait_for_iosav(channel);
/* choose an existing rank. */
slotrank = !(ctrl->rankmap[channel] & 1) ? 2 : 0;
/* DRAM command ZQCS */
- MCHBAR32(0x4220 + channel * 0x400) = 0x0f003;
- MCHBAR32(0x4230 + channel * 0x400) = 0x41001;
- MCHBAR32(0x4200 + channel * 0x400) = (slotrank << 24) | 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0x3e0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0f003;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x41001;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) = (slotrank << 24) | 0x60000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x3e0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(1);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(1);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
/* jedec reset */
@@ -2251,7 +2373,7 @@ static int try_cmd_stretch(ramctr_timing *ctrl, int channel, int cmd_stretch)
ctrl->cmd_stretch[channel] = cmd_stretch;
- MCHBAR32(0x4004 + channel * 0x400) =
+ MCHBAR32(TC_RAP_ch(channel)) =
ctrl->tRRD
| (ctrl->tRTP << 4)
| (ctrl->tCKE << 8)
@@ -2266,12 +2388,12 @@ static int try_cmd_stretch(ramctr_timing *ctrl, int channel, int cmd_stretch)
delta = 4;
FOR_ALL_POPULATED_RANKS {
- ctrl->timings[channel][slotrank].val_4024 -= delta;
+ ctrl->timings[channel][slotrank].roundtrip_latency -= delta;
}
for (c320c = -127; c320c <= 127; c320c++) {
FOR_ALL_POPULATED_RANKS {
- ctrl->timings[channel][slotrank].val_320c = c320c;
+ ctrl->timings[channel][slotrank].pi_coding = c320c;
}
program_timings(ctrl, channel);
reprogram_320c(ctrl);
@@ -2283,7 +2405,7 @@ static int try_cmd_stretch(ramctr_timing *ctrl, int channel, int cmd_stretch)
FOR_ALL_POPULATED_RANKS {
struct run rn =
get_longest_zero_run(stat[slotrank], 255);
- ctrl->timings[channel][slotrank].val_320c = rn.middle - 127;
+ ctrl->timings[channel][slotrank].pi_coding = rn.middle - 127;
printram("cmd_stretch: %d, %d: 0x%02x-0x%02x-0x%02x\n",
channel, slotrank, rn.start, rn.middle, rn.end);
if (rn.all || rn.length < MIN_C320C_LEN) {
@@ -2307,7 +2429,7 @@ int command_training(ramctr_timing *ctrl)
FOR_ALL_POPULATED_CHANNELS {
fill_pattern5(ctrl, channel, 0);
- MCHBAR32(0x4288 + channel * 0x400) = 0x1f;
+ MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 0x1f;
}
FOR_ALL_POPULATED_CHANNELS {
@@ -2371,50 +2493,50 @@ static int discover_edges_real(ramctr_timing *ctrl, int channel, int slotrank,
program_timings(ctrl, channel);
FOR_ALL_LANES {
- MCHBAR32(0x4340 + channel * 0x400 + lane * 4) = 0;
- MCHBAR32(0x4140 + channel * 0x400 + lane * 4);
+ MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane)) = 0;
+ MCHBAR32(IOSAV_By_BW_SERROR_C_ch(channel, lane));
}
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command MRS
* write MR3 MPR enable
* in this mode only RD and RDA are allowed
* all reads return a predefined pattern */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f000;
- MCHBAR32(0x4230 + channel * 0x400) = 0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0xc01 | (ctrl->tMOD << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | 0x360004;
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4234 + channel * 0x400) = 0x40411f4;
- MCHBAR32(0x4204 + channel * 0x400) = slotrank << 24;
- MCHBAR32(0x4214 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x40411f4;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = slotrank << 24;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4238 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
0x1001 | ((ctrl->CAS + 8) << 16);
- MCHBAR32(0x4208 + channel * 0x400) = (slotrank << 24) | 0x60000;
- MCHBAR32(0x4218 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = (slotrank << 24) | 0x60000;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
/* DRAM command MRS
* MR3 disable MPR */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f000;
- MCHBAR32(0x423c + channel * 0x400) = 0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x420c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0xc01 | (ctrl->tMOD << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
(slotrank << 24) | 0x360000;
- MCHBAR32(0x421c + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
FOR_ALL_LANES {
statistics[lane][edge] =
- MCHBAR32(0x4340 + channel * 0x400 + lane * 4);
+ MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane));
}
}
FOR_ALL_LANES {
@@ -2439,19 +2561,19 @@ int discover_edges(ramctr_timing *ctrl)
int channel, slotrank, lane;
int err;
- MCHBAR32(0x3400) = 0;
+ MCHBAR32(GDCRTRAININGMOD) = 0;
toggle_io_reset();
FOR_ALL_POPULATED_CHANNELS FOR_ALL_LANES {
- MCHBAR32(0x4080 + channel * 0x400 + lane * 4) = 0;
+ MCHBAR32(IOSAV_By_BW_MASK_ch(channel, lane)) = 0;
}
FOR_ALL_POPULATED_CHANNELS {
fill_pattern0(ctrl, channel, 0, 0);
- MCHBAR32(0x4288 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 0;
FOR_ALL_LANES {
- MCHBAR32(0x4140 + channel * 0x400 + lane * 4);
+ MCHBAR32(IOSAV_By_BW_SERROR_C_ch(channel, lane));
}
FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
@@ -2464,48 +2586,48 @@ int discover_edges(ramctr_timing *ctrl)
program_timings(ctrl, channel);
FOR_ALL_POPULATED_RANKS {
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command MRS
* MR3 enable MPR
* write MR3 MPR enable
* in this mode only RD and RDA are allowed
* all reads return a predefined pattern */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f000;
- MCHBAR32(0x4230 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | 0x360004;
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4234 + channel * 0x400) = 0x4041003;
- MCHBAR32(0x4204 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4041003;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) =
(slotrank << 24) | 0;
- MCHBAR32(0x4214 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4238 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
0x1001 | ((ctrl->CAS + 8) << 16);
- MCHBAR32(0x4208 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
(slotrank << 24) | 0x60000;
- MCHBAR32(0x4218 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
/* DRAM command MRS
* MR3 disable MPR */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f000;
- MCHBAR32(0x423c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x420c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
(slotrank << 24) | 0x360000;
- MCHBAR32(0x421c + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
/* XXX: check any measured value ? */
@@ -2520,65 +2642,65 @@ int discover_edges(ramctr_timing *ctrl)
program_timings(ctrl, channel);
FOR_ALL_POPULATED_RANKS {
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command MRS
* MR3 enable MPR
* write MR3 MPR enable
* in this mode only RD and RDA are allowed
* all reads return a predefined pattern */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f000;
- MCHBAR32(0x4230 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | 0x360004;
- MCHBAR32(0x4210 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4234 + channel * 0x400) = 0x4041003;
- MCHBAR32(0x4204 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x4041003;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) =
(slotrank << 24) | 0;
- MCHBAR32(0x4214 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4238 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
0x1001 | ((ctrl->CAS + 8) << 16);
- MCHBAR32(0x4208 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
(slotrank << 24) | 0x60000;
- MCHBAR32(0x4218 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0;
/* DRAM command MRS
* MR3 disable MPR */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f000;
- MCHBAR32(0x423c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f000;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
0xc01 | (ctrl->tMOD << 16);
- MCHBAR32(0x420c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
(slotrank << 24) | 0x360000;
- MCHBAR32(0x421c + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
/* XXX: check any measured value ? */
FOR_ALL_LANES {
- MCHBAR32(0x4080 + channel * 0x400 + lane * 4) =
- ~MCHBAR32(0x4040 + channel * 0x400 + lane * 4)
+ MCHBAR32(IOSAV_By_BW_MASK_ch(channel, lane)) =
+ ~MCHBAR32(IOSAV_By_BW_SERROR_ch(channel, lane))
& 0xff;
}
fill_pattern0(ctrl, channel, 0, 0xffffffff);
- MCHBAR32(0x4288 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 0;
}
/* FIXME: under some conditions (older chipsets?) vendor BIOS sets both edges to the same value. */
- MCHBAR32(0x4eb0) = 0x300;
- printram("discover falling edges:\n[%x] = %x\n", 0x4eb0, 0x300);
+ MCHBAR32(IOSAV_DC_MASK) = 0x300;
+ printram("discover falling edges:\n[%x] = %x\n", IOSAV_DC_MASK, 0x300);
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS {
err = discover_edges_real(ctrl, channel, slotrank,
@@ -2587,8 +2709,8 @@ int discover_edges(ramctr_timing *ctrl)
return err;
}
- MCHBAR32(0x4eb0) = 0x200;
- printram("discover rising edges:\n[%x] = %x\n", 0x4eb0, 0x200);
+ MCHBAR32(IOSAV_DC_MASK) = 0x200;
+ printram("discover rising edges:\n[%x] = %x\n", IOSAV_DC_MASK, 0x200);
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS {
err = discover_edges_real(ctrl, channel, slotrank,
@@ -2597,7 +2719,7 @@ int discover_edges(ramctr_timing *ctrl)
return err;
}
- MCHBAR32(0x4eb0) = 0;
+ MCHBAR32(IOSAV_DC_MASK) = 0;
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
ctrl->timings[channel][slotrank].lanes[lane].falling =
@@ -2611,7 +2733,7 @@ int discover_edges(ramctr_timing *ctrl)
}
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
- MCHBAR32(0x4080 + channel * 0x400 + lane * 4) = 0;
+ MCHBAR32(IOSAV_By_BW_MASK_ch(channel, lane)) = 0;
}
return 0;
}
@@ -2634,12 +2756,12 @@ static int discover_edges_write_real(ramctr_timing *ctrl, int channel,
}
for (i = 0; i < 3; i++) {
- MCHBAR32(0x3000 + channel * 0x100) = reg3000b24[i] << 24;
+ MCHBAR32(GDCRTRAININGMOD_ch(channel)) = reg3000b24[i] << 24;
printram("[%x] = 0x%08x\n",
- 0x3000 + channel * 0x100, reg3000b24[i] << 24);
+ GDCRTRAININGMOD_ch(channel), reg3000b24[i] << 24);
for (pat = 0; pat < NUM_PATTERNS; pat++) {
fill_pattern5(ctrl, channel, pat);
- MCHBAR32(0x4288 + channel * 0x400) = 0x1f;
+ MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 0x1f;
printram("using pattern %d\n", pat);
for (edge = 0; edge <= MAX_EDGE_TIMING; edge++) {
FOR_ALL_LANES {
@@ -2651,52 +2773,52 @@ static int discover_edges_write_real(ramctr_timing *ctrl, int channel,
program_timings(ctrl, channel);
FOR_ALL_LANES {
- MCHBAR32(0x4340 + channel * 0x400 + lane * 4) = 0;
- MCHBAR32(0x4140 + channel * 0x400 + lane * 4);
+ MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane)) = 0;
+ MCHBAR32(IOSAV_By_BW_SERROR_C_ch(channel, lane));
}
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command ACT */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f006;
- MCHBAR32(0x4230 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f006;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
0x4 | (ctrl->tRCD << 16) |
(MAX(ctrl->tRRD, (ctrl->tFAW >> 2) + 1)
<< 10);
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0x240;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x240;
/* DRAM command WR */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f201;
- MCHBAR32(0x4234 + channel * 0x400) = 0x8005020 |
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f201;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x8005020 |
((ctrl->tWTR + ctrl->CWL + 8) << 16);
- MCHBAR32(0x4204 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) =
slotrank << 24;
- MCHBAR32(0x4214 + channel * 0x400) = 0x242;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x242;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4238 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
0x4005020 | (MAX(ctrl->tRTP, 8) << 16);
- MCHBAR32(0x4208 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
slotrank << 24;
- MCHBAR32(0x4218 + channel * 0x400) = 0x242;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x242;
/* DRAM command PRE */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f002;
- MCHBAR32(0x423c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f002;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) =
0xc01 | (ctrl->tRP << 16);
- MCHBAR32(0x420c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
(slotrank << 24) | 0x60400;
- MCHBAR32(0x421c + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) =
- RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) =
+ IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
FOR_ALL_LANES {
- MCHBAR32(0x4340 + channel * 0x400 + lane * 4);
+ MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane));
}
raw_statistics[edge] =
@@ -2727,7 +2849,7 @@ static int discover_edges_write_real(ramctr_timing *ctrl, int channel,
}
}
- MCHBAR32(0x3000) = 0;
+ MCHBAR32(GDCRTRAININGMOD_ch(0)) = 0;
printram("CPA\n");
return 0;
}
@@ -2740,8 +2862,8 @@ int discover_edges_write(ramctr_timing *ctrl)
int err;
/* FIXME: under some conditions (older chipsets?) vendor BIOS sets both edges to the same value. */
- MCHBAR32(0x4eb0) = 0x300;
- printram("discover falling edges write:\n[%x] = %x\n", 0x4eb0, 0x300);
+ MCHBAR32(IOSAV_DC_MASK) = 0x300;
+ printram("discover falling edges write:\n[%x] = %x\n", IOSAV_DC_MASK, 0x300);
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS {
err = discover_edges_write_real(ctrl, channel, slotrank,
@@ -2750,8 +2872,8 @@ int discover_edges_write(ramctr_timing *ctrl)
return err;
}
- MCHBAR32(0x4eb0) = 0x200;
- printram("discover rising edges write:\n[%x] = %x\n", 0x4eb0, 0x200);
+ MCHBAR32(IOSAV_DC_MASK) = 0x200;
+ printram("discover rising edges write:\n[%x] = %x\n", IOSAV_DC_MASK, 0x200);
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS {
err = discover_edges_write_real(ctrl, channel, slotrank,
@@ -2760,7 +2882,7 @@ int discover_edges_write(ramctr_timing *ctrl)
return err;
}
- MCHBAR32(0x4eb0) = 0;
+ MCHBAR32(IOSAV_DC_MASK) = 0;
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
ctrl->timings[channel][slotrank].lanes[lane].falling =
@@ -2773,47 +2895,47 @@ int discover_edges_write(ramctr_timing *ctrl)
program_timings(ctrl, channel);
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
- MCHBAR32(0x4080 + channel * 0x400 + lane * 4) = 0;
+ MCHBAR32(IOSAV_By_BW_MASK_ch(channel, lane)) = 0;
}
return 0;
}
static void test_timC_write(ramctr_timing *ctrl, int channel, int slotrank)
{
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command ACT */
- MCHBAR32(0x4220 + channel * 0x400) = 0x1f006;
- MCHBAR32(0x4230 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x1f006;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) =
(MAX((ctrl->tFAW >> 2) + 1, ctrl->tRRD)
<< 10) | (ctrl->tRCD << 16) | 4;
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
(slotrank << 24) | 0x60000;
- MCHBAR32(0x4210 + channel * 0x400) = 0x244;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x244;
/* DRAM command WR */
- MCHBAR32(0x4224 + channel * 0x400) = 0x1f201;
- MCHBAR32(0x4234 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x1f201;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) =
0x80011e0 | ((ctrl->tWTR + ctrl->CWL + 8) << 16);
- MCHBAR32(0x4204 + channel * 0x400) = slotrank << 24;
- MCHBAR32(0x4214 + channel * 0x400) = 0x242;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) = slotrank << 24;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x242;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x1f105;
- MCHBAR32(0x4238 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x1f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) =
0x40011e0 | (MAX(ctrl->tRTP, 8) << 16);
- MCHBAR32(0x4208 + channel * 0x400) = slotrank << 24;
- MCHBAR32(0x4218 + channel * 0x400) = 0x242;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) = slotrank << 24;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x242;
/* DRAM command PRE */
- MCHBAR32(0x422c + channel * 0x400) = 0x1f002;
- MCHBAR32(0x423c + channel * 0x400) = 0x1001 | (ctrl->tRP << 16);
- MCHBAR32(0x420c + channel * 0x400) = (slotrank << 24) | 0x60400;
- MCHBAR32(0x421c + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x1f002;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0x1001 | (ctrl->tRP << 16);
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) = (slotrank << 24) | 0x60400;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
}
int discover_timC_write(ramctr_timing *ctrl)
@@ -2830,12 +2952,16 @@ int discover_timC_write(ramctr_timing *ctrl)
upper[channel][slotrank][lane] = MAX_TIMC;
}
- MCHBAR32(0x4ea8) = 1;
+ /*
+ * Enable IOSAV_n_SPECIAL_COMMAND_ADDR optimization.
+ * FIXME: This must only be done on Ivy Bridge.
+ */
+ MCHBAR32(MCMNTS_SPARE) = 1;
printram("discover timC write:\n");
for (i = 0; i < 3; i++)
FOR_ALL_POPULATED_CHANNELS {
- MCHBAR32_AND_OR(0xe3c + (channel * 0x100), ~0x3f000000,
+ MCHBAR32_AND_OR(GDCRCMDDEBUGMUXCFG_Cz_S(channel), ~0x3f000000,
rege3c_b24[i] << 24);
udelay(2);
for (pat = 0; pat < NUM_PATTERNS; pat++) {
@@ -2848,7 +2974,7 @@ int discover_timC_write(ramctr_timing *ctrl)
statistics[MAX_TIMC] = 1;
fill_pattern5(ctrl, channel, pat);
- MCHBAR32(0x4288 + channel * 0x400) =
+ MCHBAR32(IOSAV_DATA_CTL_ch(channel)) =
0x1f;
for (timC = 0; timC < MAX_TIMC; timC++) {
FOR_ALL_LANES
@@ -2892,11 +3018,15 @@ int discover_timC_write(ramctr_timing *ctrl)
}
FOR_ALL_CHANNELS {
- MCHBAR32_AND(0xe3c + (channel * 0x100), ~0x3f000000);
+ MCHBAR32_AND(GDCRCMDDEBUGMUXCFG_Cz_S(channel), ~0x3f000000);
udelay(2);
}
- MCHBAR32(0x4ea8) = 0;
+ /*
+ * Disable IOSAV_n_SPECIAL_COMMAND_ADDR optimization.
+ * FIXME: This must only be done on Ivy Bridge.
+ */
+ MCHBAR32(MCMNTS_SPARE) = 0;
printram("CPB\n");
@@ -2915,7 +3045,7 @@ int discover_timC_write(ramctr_timing *ctrl)
return 0;
}
-void normalize_training(ramctr_timing * ctrl)
+void normalize_training(ramctr_timing *ctrl)
{
int channel, slotrank, lane;
int mat;
@@ -2932,7 +3062,7 @@ void normalize_training(ramctr_timing * ctrl)
printram("normalize %d, %d, %d: delta %d\n",
channel, slotrank, lane, delta);
- ctrl->timings[channel][slotrank].val_4024 += delta;
+ ctrl->timings[channel][slotrank].roundtrip_latency += delta;
ctrl->timings[channel][slotrank].io_latency += delta;
}
@@ -2941,7 +3071,7 @@ void normalize_training(ramctr_timing * ctrl)
}
}
-void write_controller_mr(ramctr_timing * ctrl)
+void write_controller_mr(ramctr_timing *ctrl)
{
int channel, slotrank;
@@ -2959,7 +3089,7 @@ int channel_test(ramctr_timing *ctrl)
slotrank = 0;
FOR_ALL_POPULATED_CHANNELS
- if (MCHBAR32(0x42a0 + channel * 0x400) & 0xa000) {
+ if (MCHBAR32(MC_INIT_STATE_ch(channel)) & 0xa000) {
printk(BIOS_EMERG, "Mini channel test failed (1): %d\n",
channel);
return MAKE_ERR;
@@ -2967,52 +3097,52 @@ int channel_test(ramctr_timing *ctrl)
FOR_ALL_POPULATED_CHANNELS {
fill_pattern0(ctrl, channel, 0x12345678, 0x98765432);
- MCHBAR32(0x4288 + channel * 0x400) = 0;
+ MCHBAR32(IOSAV_DATA_CTL_ch(channel)) = 0;
}
for (slotrank = 0; slotrank < 4; slotrank++)
FOR_ALL_CHANNELS
if (ctrl->rankmap[channel] & (1 << slotrank)) {
FOR_ALL_LANES {
- MCHBAR32(0x4f40 + 4 * lane) = 0;
- MCHBAR32(0x4d40 + 4 * lane) = 0;
+ MCHBAR32(IOSAV_By_ERROR_COUNT(lane)) = 0;
+ MCHBAR32(IOSAV_By_BW_SERROR_C(lane)) = 0;
}
- wait_428c(channel);
+ wait_for_iosav(channel);
/* DRAM command ACT */
- MCHBAR32(0x4220 + channel * 0x400) = 0x0001f006;
- MCHBAR32(0x4230 + channel * 0x400) = 0x0028a004;
- MCHBAR32(0x4200 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 0)) = 0x0001f006;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 0)) = 0x0028a004;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 0)) =
0x00060000 | (slotrank << 24);
- MCHBAR32(0x4210 + channel * 0x400) = 0x00000244;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 0)) = 0x00000244;
/* DRAM command WR */
- MCHBAR32(0x4224 + channel * 0x400) = 0x0001f201;
- MCHBAR32(0x4234 + channel * 0x400) = 0x08281064;
- MCHBAR32(0x4204 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 1)) = 0x0001f201;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 1)) = 0x08281064;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 1)) =
0x00000000 | (slotrank << 24);
- MCHBAR32(0x4214 + channel * 0x400) = 0x00000242;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 1)) = 0x00000242;
/* DRAM command RD */
- MCHBAR32(0x4228 + channel * 0x400) = 0x0001f105;
- MCHBAR32(0x4238 + channel * 0x400) = 0x04281064;
- MCHBAR32(0x4208 + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 2)) = 0x0001f105;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 2)) = 0x04281064;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 2)) =
0x00000000 | (slotrank << 24);
- MCHBAR32(0x4218 + channel * 0x400) = 0x00000242;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 2)) = 0x00000242;
/* DRAM command PRE */
- MCHBAR32(0x422c + channel * 0x400) = 0x0001f002;
- MCHBAR32(0x423c + channel * 0x400) = 0x00280c01;
- MCHBAR32(0x420c + channel * 0x400) =
+ MCHBAR32(IOSAV_n_SP_CMD_CTL_ch(channel, 3)) = 0x0001f002;
+ MCHBAR32(IOSAV_n_SUBSEQ_CTL_ch(channel, 3)) = 0x00280c01;
+ MCHBAR32(IOSAV_n_SP_CMD_ADDR_ch(channel, 3)) =
0x00060400 | (slotrank << 24);
- MCHBAR32(0x421c + channel * 0x400) = 0x00000240;
+ MCHBAR32(IOSAV_n_ADDR_UPD_ch(channel, 3)) = 0x00000240;
// execute command queue
- MCHBAR32(0x4284 + channel * 0x400) = RUN_QUEUE_4284(4);
+ MCHBAR32(IOSAV_SEQ_CTL_ch(channel)) = IOSAV_RUN_ONCE(4);
- wait_428c(channel);
+ wait_for_iosav(channel);
FOR_ALL_LANES
- if (MCHBAR32(0x4340 + channel * 0x400 + lane * 4)) {
+ if (MCHBAR32(IOSAV_By_ERROR_COUNT_ch(channel, lane))) {
printk(BIOS_EMERG, "Mini channel test failed (2): %d, %d, %d\n",
channel, slotrank, lane);
return MAKE_ERR;
@@ -3021,7 +3151,7 @@ int channel_test(ramctr_timing *ctrl)
return 0;
}
-void set_scrambling_seed(ramctr_timing * ctrl)
+void set_scrambling_seed(ramctr_timing *ctrl)
{
int channel;
@@ -3032,10 +3162,10 @@ void set_scrambling_seed(ramctr_timing * ctrl)
{0x00028bfa, 0x53fe4b49, 0x19ed5483}
};
FOR_ALL_POPULATED_CHANNELS {
- MCHBAR32(0x4020 + channel * 0x400) &= ~0x10000000;
- MCHBAR32(0x4034 + channel * 0x400) = seeds[channel][0];
- MCHBAR32(0x403c + channel * 0x400) = seeds[channel][1];
- MCHBAR32(0x4038 + channel * 0x400) = seeds[channel][2];
+ MCHBAR32(SCHED_CBIT_ch(channel)) &= ~0x10000000;
+ MCHBAR32(SCRAMBLING_SEED_1_ch(channel)) = seeds[channel][0];
+ MCHBAR32(SCRAMBLING_SEED_2_HIGH_ch(channel)) = seeds[channel][1];
+ MCHBAR32(SCRAMBLING_SEED_2_LOW_ch(channel)) = seeds[channel][2];
}
}
@@ -3045,77 +3175,76 @@ void set_4f8c(void)
cpu = cpu_get_cpuid();
if (IS_SANDY_CPU(cpu) && (IS_SANDY_CPU_D0(cpu) || IS_SANDY_CPU_D1(cpu))) {
- MCHBAR32(0x4f8c) = 0x141D1519;
+ MCHBAR32(SC_WDBWM) = 0x141D1519;
} else {
- MCHBAR32(0x4f8c) = 0x551D1519;
+ MCHBAR32(SC_WDBWM) = 0x551D1519;
}
}
-void prepare_training(ramctr_timing * ctrl)
+void prepare_training(ramctr_timing *ctrl)
{
int channel;
FOR_ALL_POPULATED_CHANNELS {
// Always drive command bus
- MCHBAR32_OR(0x4004 + channel * 0x400, 0x20000000);
+ MCHBAR32_OR(TC_RAP_ch(channel), 0x20000000);
}
udelay(1);
FOR_ALL_POPULATED_CHANNELS {
- wait_428c(channel);
+ wait_for_iosav(channel);
}
}
-void set_4008c(ramctr_timing * ctrl)
+void set_4008c(ramctr_timing *ctrl)
{
int channel, slotrank;
FOR_ALL_POPULATED_CHANNELS {
u32 b20, b4_8_12;
- int min_320c = 10000;
- int max_320c = -10000;
+ int min_pi = 10000;
+ int max_pi = -10000;
FOR_ALL_POPULATED_RANKS {
- max_320c = MAX(ctrl->timings[channel][slotrank].val_320c, max_320c);
- min_320c = MIN(ctrl->timings[channel][slotrank].val_320c, min_320c);
+ max_pi = MAX(ctrl->timings[channel][slotrank].pi_coding, max_pi);
+ min_pi = MIN(ctrl->timings[channel][slotrank].pi_coding, min_pi);
}
- if (max_320c - min_320c > 51)
+ if (max_pi - min_pi > 51)
b20 = 0;
else
b20 = ctrl->ref_card_offset[channel];
- if (ctrl->reg_320c_range_threshold < max_320c - min_320c)
+ if (ctrl->pi_coding_threshold < max_pi - min_pi)
b4_8_12 = 0x3330;
else
b4_8_12 = 0x2220;
dram_odt_stretch(ctrl, channel);
- MCHBAR32(0x4008 + channel * 0x400) =
+ MCHBAR32(TC_RWP_ch(channel)) =
0x0a000000 | (b20 << 20) |
((ctrl->ref_card_offset[channel] + 2) << 16) | b4_8_12;
}
}
-void set_42a0(ramctr_timing * ctrl)
+void set_normal_operation(ramctr_timing *ctrl)
{
int channel;
FOR_ALL_POPULATED_CHANNELS {
- MCHBAR32(0x42a0 + channel * 0x400) =
- 0x00001000 | ctrl->rankmap[channel];
- MCHBAR32_AND(0x4004 + channel * 0x400, ~0x20000000);
+ MCHBAR32(MC_INIT_STATE_ch(channel)) = 0x00001000 | ctrl->rankmap[channel];
+ MCHBAR32_AND(TC_RAP_ch(channel), ~0x20000000);
}
}
static int encode_5d10(int ns)
{
- return (ns + 499) / 500;
+ return (ns + 499) / 500;
}
/* FIXME: values in this function should be hardware revision-dependent. */
-void final_registers(ramctr_timing * ctrl)
+void final_registers(ramctr_timing *ctrl)
{
const size_t is_mobile = get_platform_type() == PLATFORM_MOBILE;
@@ -3124,10 +3253,11 @@ void final_registers(ramctr_timing * ctrl)
int t3_ns;
u32 r32;
- MCHBAR32(0x4cd4) = 0x00000046;
+ /* FIXME: This register only exists on Ivy Bridge. */
+ MCHBAR32(WMM_READ_CONFIG) = 0x00000046;
FOR_ALL_CHANNELS
- MCHBAR32_AND_OR(0x400c + channel * 0x400, 0xFFFFCFFF, 0x1000);
+ MCHBAR32_AND_OR(TC_OTHP_ch(channel), 0xFFFFCFFF, 0x1000);
if (is_mobile)
/* APD - DLL Off, 64 DCLKs until idle, decision per rank */
@@ -3137,26 +3267,26 @@ void final_registers(ramctr_timing * ctrl)
MCHBAR32(PM_PDWN_CONFIG) = 0x00000340;
FOR_ALL_CHANNELS
- MCHBAR32(0x4380 + 0x400 * channel) = 0x00000aaa;
+ MCHBAR32(PM_TRML_M_CONFIG_ch(channel)) = 0x00000aaa;
- MCHBAR32(0x4f88) = 0x5f7003ff; // OK
- MCHBAR32(0x5064) = 0x00073000 | ctrl->reg_5064b0; // OK
+ MCHBAR32(PM_BW_LIMIT_CONFIG) = 0x5f7003ff; // OK
+ MCHBAR32(PM_DLL_CONFIG) = 0x00073000 | ctrl->mdll_wake_delay; // OK
FOR_ALL_CHANNELS {
switch (ctrl->rankmap[channel]) {
/* Unpopulated channel. */
case 0:
- MCHBAR32(0x4384 + channel * 0x400) = 0;
+ MCHBAR32(PM_CMD_PWR_ch(channel)) = 0;
break;
/* Only single-ranked dimms. */
case 1:
case 4:
case 5:
- MCHBAR32(0x4384 + channel * 0x400) = 0x373131;
+ MCHBAR32(PM_CMD_PWR_ch(channel)) = 0x373131;
break;
/* Dual-ranked dimms present. */
default:
- MCHBAR32(0x4384 + channel * 0x400) = 0x9b6ea1;
+ MCHBAR32(PM_CMD_PWR_ch(channel)) = 0x9b6ea1;
break;
}
}
@@ -3166,30 +3296,30 @@ void final_registers(ramctr_timing * ctrl)
MCHBAR32_AND(MEM_TRML_INTERRUPT, ~0x1f);
FOR_ALL_CHANNELS
- MCHBAR32_AND_OR(TC_RFP_C0 + channel * 0x400, ~0x30000, 1 << 16);
+ MCHBAR32_AND_OR(TC_RFP_ch(channel), ~0x30000, 1 << 16);
- MCHBAR32_OR(0x5030, 1);
- MCHBAR32_OR(0x5030, 0x80);
- MCHBAR32(0x5f18) = 0xfa;
+ MCHBAR32_OR(MC_INIT_STATE_G, 1);
+ MCHBAR32_OR(MC_INIT_STATE_G, 0x80);
+ MCHBAR32(BANDTIMERS_SNB) = 0xfa;
/* Find a populated channel. */
FOR_ALL_POPULATED_CHANNELS
break;
- t1_cycles = (MCHBAR32(0x4290 + channel * 0x400) >> 8) & 0xff;
- r32 = MCHBAR32(0x5064);
+ t1_cycles = (MCHBAR32(TC_ZQCAL_ch(channel)) >> 8) & 0xff;
+ r32 = MCHBAR32(PM_DLL_CONFIG);
if (r32 & 0x20000)
t1_cycles += (r32 & 0xfff);
- t1_cycles += MCHBAR32(0x42a4 + channel * 0x400) & 0xfff;
+ t1_cycles += MCHBAR32(TC_SRFTP_ch(channel)) & 0xfff;
t1_ns = t1_cycles * ctrl->tCK / 256 + 544;
if (!(r32 & 0x20000))
t1_ns += 500;
- t2_ns = 10 * ((MCHBAR32(0x5f10) >> 8) & 0xfff);
- if (MCHBAR32(0x5f00) & 8)
+ t2_ns = 10 * ((MCHBAR32(SAPMTIMERS) >> 8) & 0xfff);
+ if (MCHBAR32(SAPMCTL) & 8)
{
- t3_ns = 10 * ((MCHBAR32(0x5f20) >> 8) & 0xfff);
- t3_ns += 10 * (MCHBAR32(0x5f18) & 0xff);
+ t3_ns = 10 * ((MCHBAR32(BANDTIMERS_IVB) >> 8) & 0xfff);
+ t3_ns += 10 * (MCHBAR32(SAPMTIMERS2_IVB) & 0xff);
}
else
{
@@ -3203,12 +3333,12 @@ void final_registers(ramctr_timing * ctrl)
encode_5d10(t2_ns) + encode_5d10(t1_ns)) << 24) | 0xc);
}
-void restore_timings(ramctr_timing * ctrl)
+void restore_timings(ramctr_timing *ctrl)
{
int channel, slotrank, lane;
FOR_ALL_POPULATED_CHANNELS
- MCHBAR32(0x4004 + channel * 0x400) =
+ MCHBAR32(TC_RAP_ch(channel)) =
ctrl->tRRD
| (ctrl->tRTP << 4)
| (ctrl->tCKE << 8)
@@ -3220,25 +3350,25 @@ void restore_timings(ramctr_timing * ctrl)
udelay(1);
FOR_ALL_POPULATED_CHANNELS {
- wait_428c(channel);
+ wait_for_iosav(channel);
}
FOR_ALL_CHANNELS FOR_ALL_POPULATED_RANKS FOR_ALL_LANES {
- MCHBAR32(0x4080 + channel * 0x400 + lane * 4) = 0;
+ MCHBAR32(IOSAV_By_BW_MASK_ch(channel, lane)) = 0;
}
FOR_ALL_POPULATED_CHANNELS
- MCHBAR32_OR(0x4008 + channel * 0x400, 0x8000000);
+ MCHBAR32_OR(TC_RWP_ch(channel), 0x8000000);
FOR_ALL_POPULATED_CHANNELS {
udelay (1);
- MCHBAR32_OR(0x4020 + channel * 0x400, 0x200000);
+ MCHBAR32_OR(SCHED_CBIT_ch(channel), 0x200000);
}
printram("CPE\n");
- MCHBAR32(0x3400) = 0;
- MCHBAR32(0x4eb0) = 0;
+ MCHBAR32(GDCRTRAININGMOD) = 0;
+ MCHBAR32(IOSAV_DC_MASK) = 0;
printram("CP5b\n");
@@ -3248,14 +3378,14 @@ void restore_timings(ramctr_timing * ctrl)
u32 reg, addr;
- while (!(MCHBAR32(0x5084) & 0x10000));
+ while (!(MCHBAR32(RCOMP_TIMER) & 0x10000));
do {
- reg = MCHBAR32(0x428c);
+ reg = MCHBAR32(IOSAV_STATUS_ch(0));
} while ((reg & 0x14) == 0);
// Set state of memory controller
- MCHBAR32(0x5030) = 0x116;
- MCHBAR32(0x4ea0) = 0;
+ MCHBAR32(MC_INIT_STATE_G) = 0x116;
+ MCHBAR32(MC_INIT_STATE) = 0;
// Wait 500us
udelay(500);
@@ -3264,7 +3394,7 @@ void restore_timings(ramctr_timing * ctrl)
// Set valid rank CKE
reg = 0;
reg = (reg & ~0xf) | ctrl->rankmap[channel];
- addr = 0x42a0 + channel * 0x400;
+ addr = MC_INIT_STATE_ch(channel);
MCHBAR32(addr) = reg;
// Wait 10ns for ranks to settle
@@ -3282,12 +3412,17 @@ void restore_timings(ramctr_timing * ctrl)
printram("CP5c\n");
- MCHBAR32(0x3000) = 0;
+ MCHBAR32(GDCRTRAININGMOD_ch(0)) = 0;
FOR_ALL_CHANNELS {
- MCHBAR32_AND(0xe3c + channel * 0x100, ~0x3f000000);
+ MCHBAR32_AND(GDCRCMDDEBUGMUXCFG_Cz_S(channel), ~0x3f000000);
udelay(2);
}
- MCHBAR32(0x4ea8) = 0;
+ /*
+ * Disable IOSAV_n_SPECIAL_COMMAND_ADDR optimization.
+ * FIXME: This must only be done on Ivy Bridge. Moreover, this instance seems to be
+ * spurious, because nothing else enabled this optimization before.
+ */
+ MCHBAR32(MCMNTS_SPARE) = 0;
}
diff --git a/src/northbridge/intel/sandybridge/raminit_common.h b/src/northbridge/intel/sandybridge/raminit_common.h
index d11c04417a..194e6db673 100644
--- a/src/northbridge/intel/sandybridge/raminit_common.h
+++ b/src/northbridge/intel/sandybridge/raminit_common.h
@@ -52,12 +52,14 @@ typedef struct dimm_info_st {
} dimm_info;
struct ram_rank_timings {
- /* Register 4024. One byte per slotrank. */
- u8 val_4024;
- /* IO_LATENCY register. One nibble per slotrank. */
+ /* ROUNDT_LAT register. One byte per slotrank. */
+ u8 roundtrip_latency;
+
+ /* IO_LATENCY register. One nibble per slotrank. */
u8 io_latency;
- int val_320c;
+ /* Phase interpolator coding for command and control. */
+ int pi_coding;
struct ram_lane_timings {
/* lane register offset 0x10. */
@@ -96,7 +98,7 @@ typedef struct ramctr_timing_st {
u32 tCWL;
u32 tCMD;
/* Latencies in terms of clock cycles
- * They are saved separately as they are needed for DRAM MRS commands*/
+ * They are saved separately as they are needed for DRAM MRS commands */
u8 CAS; /* CAS read latency */
u8 CWL; /* CAS write latency */
@@ -109,7 +111,8 @@ typedef struct ramctr_timing_st {
u32 tXP;
u32 tAONPD;
- u16 reg_5064b0; /* bits 0-11. */
+ /* Bits [0..11] of PM_DLL_CONFIG: Master DLL wakeup delay timer. */
+ u16 mdll_wake_delay;
u8 rankmap[NUM_CHANNELS];
int ref_card_offset[NUM_CHANNELS];
@@ -117,8 +120,8 @@ typedef struct ramctr_timing_st {
int channel_size_mb[NUM_CHANNELS];
u32 cmd_stretch[NUM_CHANNELS];
- int reg_c14_offset;
- int reg_320c_range_threshold;
+ int pi_code_offset;
+ int pi_coding_threshold;
int edge_offset[3];
int timC_offset[3];
@@ -150,33 +153,33 @@ typedef struct ramctr_timing_st {
#define GET_ERR_CHANNEL(x) (x>>16)
u8 get_CWL(u32 tCK);
-void dram_mrscommands(ramctr_timing * ctrl);
-void program_timings(ramctr_timing * ctrl, int channel);
+void dram_mrscommands(ramctr_timing *ctrl);
+void program_timings(ramctr_timing *ctrl, int channel);
void dram_find_common_params(ramctr_timing *ctrl);
-void dram_xover(ramctr_timing * ctrl);
-void dram_timing_regs(ramctr_timing * ctrl);
+void dram_xover(ramctr_timing *ctrl);
+void dram_timing_regs(ramctr_timing *ctrl);
void dram_dimm_mapping(ramctr_timing *ctrl);
-void dram_dimm_set_mapping(ramctr_timing * ctrl);
-void dram_zones(ramctr_timing * ctrl, int training);
+void dram_dimm_set_mapping(ramctr_timing *ctrl);
+void dram_zones(ramctr_timing *ctrl, int training);
unsigned int get_mem_min_tck(void);
-void dram_memorymap(ramctr_timing * ctrl, int me_uma_size);
-void dram_jedecreset(ramctr_timing * ctrl);
-int read_training(ramctr_timing * ctrl);
-int write_training(ramctr_timing * ctrl);
+void dram_memorymap(ramctr_timing *ctrl, int me_uma_size);
+void dram_jedecreset(ramctr_timing *ctrl);
+int read_training(ramctr_timing *ctrl);
+int write_training(ramctr_timing *ctrl);
int command_training(ramctr_timing *ctrl);
int discover_edges(ramctr_timing *ctrl);
int discover_edges_write(ramctr_timing *ctrl);
int discover_timC_write(ramctr_timing *ctrl);
-void normalize_training(ramctr_timing * ctrl);
-void write_controller_mr(ramctr_timing * ctrl);
+void normalize_training(ramctr_timing *ctrl);
+void write_controller_mr(ramctr_timing *ctrl);
int channel_test(ramctr_timing *ctrl);
-void set_scrambling_seed(ramctr_timing * ctrl);
+void set_scrambling_seed(ramctr_timing *ctrl);
void set_4f8c(void);
-void prepare_training(ramctr_timing * ctrl);
-void set_4008c(ramctr_timing * ctrl);
-void set_42a0(ramctr_timing * ctrl);
-void final_registers(ramctr_timing * ctrl);
-void restore_timings(ramctr_timing * ctrl);
+void prepare_training(ramctr_timing *ctrl);
+void set_4008c(ramctr_timing *ctrl);
+void set_normal_operation(ramctr_timing *ctrl);
+void final_registers(ramctr_timing *ctrl);
+void restore_timings(ramctr_timing *ctrl);
int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
int s3_resume, int me_uma_size);
diff --git a/src/northbridge/intel/sandybridge/raminit_ivy.c b/src/northbridge/intel/sandybridge/raminit_ivy.c
index 7542a379e1..8013636f92 100644
--- a/src/northbridge/intel/sandybridge/raminit_ivy.c
+++ b/src/northbridge/intel/sandybridge/raminit_ivy.c
@@ -388,7 +388,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 20; //XXX: guessed
ctrl->timC_offset[1] = 8;
ctrl->timC_offset[2] = 8;
- ctrl->reg_320c_range_threshold = 10;
+ ctrl->pi_coding_threshold = 10;
} else if (ctrl->tCK == TCK_1100MHZ) {
ctrl->edge_offset[0] = 17; //XXX: guessed
ctrl->edge_offset[1] = 7;
@@ -396,7 +396,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 19; //XXX: guessed
ctrl->timC_offset[1] = 7;
ctrl->timC_offset[2] = 7;
- ctrl->reg_320c_range_threshold = 13;
+ ctrl->pi_coding_threshold = 13;
} else if (ctrl->tCK == TCK_1066MHZ) {
ctrl->edge_offset[0] = 16;
ctrl->edge_offset[1] = 7;
@@ -404,7 +404,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 18;
ctrl->timC_offset[1] = 7;
ctrl->timC_offset[2] = 7;
- ctrl->reg_320c_range_threshold = 13;
+ ctrl->pi_coding_threshold = 13;
} else if (ctrl->tCK == TCK_1000MHZ) {
ctrl->edge_offset[0] = 15; //XXX: guessed
ctrl->edge_offset[1] = 6;
@@ -412,7 +412,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 17; //XXX: guessed
ctrl->timC_offset[1] = 6;
ctrl->timC_offset[2] = 6;
- ctrl->reg_320c_range_threshold = 13;
+ ctrl->pi_coding_threshold = 13;
} else if (ctrl->tCK == TCK_933MHZ) {
ctrl->edge_offset[0] = 14;
ctrl->edge_offset[1] = 6;
@@ -420,7 +420,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 15;
ctrl->timC_offset[1] = 6;
ctrl->timC_offset[2] = 6;
- ctrl->reg_320c_range_threshold = 15;
+ ctrl->pi_coding_threshold = 15;
} else if (ctrl->tCK == TCK_900MHZ) {
ctrl->edge_offset[0] = 14; //XXX: guessed
ctrl->edge_offset[1] = 6;
@@ -428,7 +428,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 15; //XXX: guessed
ctrl->timC_offset[1] = 6;
ctrl->timC_offset[2] = 6;
- ctrl->reg_320c_range_threshold = 12;
+ ctrl->pi_coding_threshold = 12;
} else if (ctrl->tCK == TCK_800MHZ) {
ctrl->edge_offset[0] = 13;
ctrl->edge_offset[1] = 5;
@@ -436,7 +436,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 14;
ctrl->timC_offset[1] = 5;
ctrl->timC_offset[2] = 5;
- ctrl->reg_320c_range_threshold = 15;
+ ctrl->pi_coding_threshold = 15;
} else if (ctrl->tCK == TCK_700MHZ) {
ctrl->edge_offset[0] = 13; //XXX: guessed
ctrl->edge_offset[1] = 5;
@@ -444,7 +444,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 14; //XXX: guessed
ctrl->timC_offset[1] = 5;
ctrl->timC_offset[2] = 5;
- ctrl->reg_320c_range_threshold = 16;
+ ctrl->pi_coding_threshold = 16;
} else if (ctrl->tCK == TCK_666MHZ) {
ctrl->edge_offset[0] = 10;
ctrl->edge_offset[1] = 4;
@@ -452,7 +452,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 11;
ctrl->timC_offset[1] = 4;
ctrl->timC_offset[2] = 4;
- ctrl->reg_320c_range_threshold = 16;
+ ctrl->pi_coding_threshold = 16;
} else if (ctrl->tCK == TCK_533MHZ) {
ctrl->edge_offset[0] = 8;
ctrl->edge_offset[1] = 3;
@@ -460,7 +460,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 9;
ctrl->timC_offset[1] = 3;
ctrl->timC_offset[2] = 3;
- ctrl->reg_320c_range_threshold = 17;
+ ctrl->pi_coding_threshold = 17;
} else { /* TCK_400MHZ */
ctrl->edge_offset[0] = 6;
ctrl->edge_offset[1] = 2;
@@ -468,14 +468,14 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 6;
ctrl->timC_offset[1] = 2;
ctrl->timC_offset[2] = 2;
- ctrl->reg_320c_range_threshold = 17;
+ ctrl->pi_coding_threshold = 17;
}
/* Initial phase between CLK/CMD pins */
- ctrl->reg_c14_offset = (256000 / ctrl->tCK) / 66;
+ ctrl->pi_code_offset = (256000 / ctrl->tCK) / 66;
/* DLL_CONFIG_MDLL_W_TIMER */
- ctrl->reg_5064b0 = (128000 / ctrl->tCK) + 3;
+ ctrl->mdll_wake_delay = (128000 / ctrl->tCK) + 3;
if (ctrl->tCWL)
ctrl->CWL = DIV_ROUND_UP(ctrl->tCWL, ctrl->tCK);
@@ -528,7 +528,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->tAONPD = get_AONPD(ctrl->tCK, ctrl->base_freq);
}
-static void dram_freq(ramctr_timing * ctrl)
+static void dram_freq(ramctr_timing *ctrl)
{
if (ctrl->tCK > TCK_400MHZ) {
printk (BIOS_ERR, "DRAM frequency is under lowest supported "
@@ -582,7 +582,7 @@ static void dram_freq(ramctr_timing * ctrl)
}
}
-static void dram_ioregs(ramctr_timing * ctrl)
+static void dram_ioregs(ramctr_timing *ctrl)
{
u32 reg, comp2;
@@ -590,12 +590,12 @@ static void dram_ioregs(ramctr_timing * ctrl)
// IO clock
FOR_ALL_CHANNELS {
- MCHBAR32(0xc00 + channel * 0x100) = ctrl->rankmap[channel];
+ MCHBAR32(GDCRCLKRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
}
// IO command
FOR_ALL_CHANNELS {
- MCHBAR32(0x3200 + channel * 0x100) = ctrl->rankmap[channel];
+ MCHBAR32(GDCRCTLRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
}
// IO control
@@ -607,27 +607,27 @@ static void dram_ioregs(ramctr_timing * ctrl)
printram("RCOMP...");
reg = 0;
while (reg == 0) {
- reg = MCHBAR32(0x5084) & 0x10000;
+ reg = MCHBAR32(RCOMP_TIMER) & 0x10000;
}
printram("done\n");
// Set comp2
comp2 = get_COMP2(ctrl->tCK, ctrl->base_freq);
- MCHBAR32(0x3714) = comp2;
+ MCHBAR32(CRCOMPOFST2) = comp2;
printram("COMP2 done\n");
// Set comp1
FOR_ALL_POPULATED_CHANNELS {
- reg = MCHBAR32(0x1810 + channel * 0x100); //ch0
+ reg = MCHBAR32(CRCOMPOFST1_ch(channel)); //ch0
reg = (reg & ~0xe00) | (1 << 9); //odt
reg = (reg & ~0xe00000) | (1 << 21); //clk drive up
reg = (reg & ~0x38000000) | (1 << 27); //ctl drive up
- MCHBAR32(0x1810 + channel * 0x100) = reg;
+ MCHBAR32(CRCOMPOFST1_ch(channel)) = reg;
}
printram("COMP1 done\n");
printram("FORCE RCOMP and wait 20us...");
- MCHBAR32(0x5f08) |= 0x100;
+ MCHBAR32(M_COMP) |= 0x100;
udelay(20);
printram("done\n");
}
@@ -656,7 +656,7 @@ int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
}
/* Set version register */
- MCHBAR32(0x5034) = 0xC04EB002;
+ MCHBAR32(MRC_REVISION) = 0xC04EB002;
/* Enable crossover */
dram_xover(ctrl);
@@ -665,16 +665,16 @@ int try_init_dram_ddr3_ivy(ramctr_timing *ctrl, int fast_boot,
dram_timing_regs(ctrl);
/* Power mode preset */
- MCHBAR32(0x4e80) = 0x5500;
+ MCHBAR32(PM_THML_STAT) = 0x5500;
- /* Set scheduler parameters */
- MCHBAR32(0x4c20) = 0x10100005;
+ /* Set scheduler chicken bits */
+ MCHBAR32(SCHED_CBIT) = 0x10100005;
/* Set CPU specific register */
set_4f8c();
/* Clear IO reset bit */
- MCHBAR32(0x5030) &= ~0x20;
+ MCHBAR32(MC_INIT_STATE_G) &= ~0x20;
/* Set MAD-DIMM registers */
dram_dimm_set_mapping(ctrl);
diff --git a/src/northbridge/intel/sandybridge/raminit_mrc.c b/src/northbridge/intel/sandybridge/raminit_mrc.c
index 959ea4128d..8daa9aaad1 100644
--- a/src/northbridge/intel/sandybridge/raminit_mrc.c
+++ b/src/northbridge/intel/sandybridge/raminit_mrc.c
@@ -249,7 +249,7 @@ void sdram_initialize(struct pei_data *pei_data)
/* For reference print the System Agent version
* after executing the UEFI PEI stage.
*/
- u32 version = MCHBAR32(0x5034);
+ u32 version = MCHBAR32(MRC_REVISION);
printk(BIOS_DEBUG, "System Agent Version %d.%d.%d Build %d\n",
version >> 24, (version >> 16) & 0xff,
(version >> 8) & 0xff, version & 0xff);
diff --git a/src/northbridge/intel/sandybridge/raminit_sandy.c b/src/northbridge/intel/sandybridge/raminit_sandy.c
index 1191754b71..8417c2fea4 100644
--- a/src/northbridge/intel/sandybridge/raminit_sandy.c
+++ b/src/northbridge/intel/sandybridge/raminit_sandy.c
@@ -200,7 +200,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 18;
ctrl->timC_offset[1] = 7;
ctrl->timC_offset[2] = 7;
- ctrl->reg_320c_range_threshold = 13;
+ ctrl->pi_coding_threshold = 13;
} else if (ctrl->tCK == TCK_933MHZ) {
ctrl->edge_offset[0] = 14;
ctrl->edge_offset[1] = 6;
@@ -208,7 +208,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 15;
ctrl->timC_offset[1] = 6;
ctrl->timC_offset[2] = 6;
- ctrl->reg_320c_range_threshold = 15;
+ ctrl->pi_coding_threshold = 15;
} else if (ctrl->tCK == TCK_800MHZ) {
ctrl->edge_offset[0] = 13;
ctrl->edge_offset[1] = 5;
@@ -216,7 +216,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 14;
ctrl->timC_offset[1] = 5;
ctrl->timC_offset[2] = 5;
- ctrl->reg_320c_range_threshold = 15;
+ ctrl->pi_coding_threshold = 15;
} else if (ctrl->tCK == TCK_666MHZ) {
ctrl->edge_offset[0] = 10;
ctrl->edge_offset[1] = 4;
@@ -224,7 +224,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 11;
ctrl->timC_offset[1] = 4;
ctrl->timC_offset[2] = 4;
- ctrl->reg_320c_range_threshold = 16;
+ ctrl->pi_coding_threshold = 16;
} else if (ctrl->tCK == TCK_533MHZ) {
ctrl->edge_offset[0] = 8;
ctrl->edge_offset[1] = 3;
@@ -232,7 +232,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 9;
ctrl->timC_offset[1] = 3;
ctrl->timC_offset[2] = 3;
- ctrl->reg_320c_range_threshold = 17;
+ ctrl->pi_coding_threshold = 17;
} else {
ctrl->tCK = TCK_400MHZ;
ctrl->edge_offset[0] = 6;
@@ -241,14 +241,14 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->timC_offset[0] = 6;
ctrl->timC_offset[1] = 2;
ctrl->timC_offset[2] = 2;
- ctrl->reg_320c_range_threshold = 17;
+ ctrl->pi_coding_threshold = 17;
}
/* Initial phase between CLK/CMD pins */
- ctrl->reg_c14_offset = (256000 / ctrl->tCK) / 66;
+ ctrl->pi_code_offset = (256000 / ctrl->tCK) / 66;
/* DLL_CONFIG_MDLL_W_TIMER */
- ctrl->reg_5064b0 = (128000 / ctrl->tCK) + 3;
+ ctrl->mdll_wake_delay = (128000 / ctrl->tCK) + 3;
if (ctrl->tCWL)
ctrl->CWL = DIV_ROUND_UP(ctrl->tCWL, ctrl->tCK);
@@ -301,7 +301,7 @@ static void dram_timing(ramctr_timing *ctrl)
ctrl->tAONPD = get_AONPD(ctrl->tCK);
}
-static void dram_freq(ramctr_timing * ctrl)
+static void dram_freq(ramctr_timing *ctrl)
{
if (ctrl->tCK > TCK_400MHZ) {
@@ -353,7 +353,7 @@ static void dram_freq(ramctr_timing * ctrl)
}
}
-static void dram_ioregs(ramctr_timing * ctrl)
+static void dram_ioregs(ramctr_timing *ctrl)
{
u32 reg, comp2;
@@ -361,12 +361,12 @@ static void dram_ioregs(ramctr_timing * ctrl)
// IO clock
FOR_ALL_CHANNELS {
- MCHBAR32(0xc00 + channel * 0x100) = ctrl->rankmap[channel];
+ MCHBAR32(GDCRCLKRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
}
// IO command
FOR_ALL_CHANNELS {
- MCHBAR32(0x3200 + channel * 0x100) = ctrl->rankmap[channel];
+ MCHBAR32(GDCRCTLRANKSUSED_ch(channel)) = ctrl->rankmap[channel];
}
// IO control
@@ -378,27 +378,27 @@ static void dram_ioregs(ramctr_timing * ctrl)
printram("RCOMP...");
reg = 0;
while (reg == 0) {
- reg = MCHBAR32(0x5084) & 0x10000;
+ reg = MCHBAR32(RCOMP_TIMER) & 0x10000;
}
printram("done\n");
// Set comp2
comp2 = get_COMP2(ctrl->tCK);
- MCHBAR32(0x3714) = comp2;
+ MCHBAR32(CRCOMPOFST2) = comp2;
printram("COMP2 done\n");
// Set comp1
FOR_ALL_POPULATED_CHANNELS {
- reg = MCHBAR32(0x1810 + channel * 0x100); //ch0
+ reg = MCHBAR32(CRCOMPOFST1_ch(channel)); //ch0
reg = (reg & ~0xe00) | (1 << 9); //odt
reg = (reg & ~0xe00000) | (1 << 21); //clk drive up
reg = (reg & ~0x38000000) | (1 << 27); //ctl drive up
- MCHBAR32(0x1810 + channel * 0x100) = reg;
+ MCHBAR32(CRCOMPOFST1_ch(channel)) = reg;
}
printram("COMP1 done\n");
printram("FORCE RCOMP and wait 20us...");
- MCHBAR32(0x5f08) |= 0x100;
+ MCHBAR32(M_COMP) |= 0x100;
udelay(20);
printram("done\n");
}
@@ -427,7 +427,7 @@ int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
}
/* Set version register */
- MCHBAR32(0x5034) = 0xC04EB002;
+ MCHBAR32(MRC_REVISION) = 0xC04EB002;
/* Enable crossover */
dram_xover(ctrl);
@@ -436,16 +436,16 @@ int try_init_dram_ddr3_sandy(ramctr_timing *ctrl, int fast_boot,
dram_timing_regs(ctrl);
/* Power mode preset */
- MCHBAR32(0x4e80) = 0x5500;
+ MCHBAR32(PM_THML_STAT) = 0x5500;
- /* Set scheduler parameters */
- MCHBAR32(0x4c20) = 0x10100005;
+ /* Set scheduler chicken bits */
+ MCHBAR32(SCHED_CBIT) = 0x10100005;
/* Set CPU specific register */
set_4f8c();
/* Clear IO reset bit */
- MCHBAR32(0x5030) &= ~0x20;
+ MCHBAR32(MC_INIT_STATE_G) &= ~0x20;
/* Set MAD-DIMM registers */
dram_dimm_set_mapping(ctrl);
diff --git a/src/northbridge/intel/sandybridge/sandybridge.h b/src/northbridge/intel/sandybridge/sandybridge.h
index 52d4c5bda7..d2f76e04b6 100644
--- a/src/northbridge/intel/sandybridge/sandybridge.h
+++ b/src/northbridge/intel/sandybridge/sandybridge.h
@@ -127,22 +127,217 @@ enum platform_type {
#define MCHBAR32_AND(x, and) (MCHBAR32(x) = (MCHBAR32(x) & (and)))
#define MCHBAR32_AND_OR(x, and, or) (MCHBAR32(x) = (MCHBAR32(x) & (and)) | (or))
-#define TC_DBP_C0 0x4000 /* Timing of DDR - bin parameters */
-#define TC_RAP_C0 0x4004 /* Timing of DDR - regular access parameters */
-#define SC_IO_LATENCY_C0 0x4028 /* IO Latency Configuration */
-#define TC_RFP_C0 0x4294 /* Refresh Parameters */
-#define TC_RFTP_C0 0x4298 /* Refresh Timing Parameters */
-#define PM_PDWN_CONFIG 0x4cb0
-#define MAD_CHNL 0x5000 /* Address Decoder Channel Configuration */
-#define MAD_DIMM_CH0 0x5004 /* Address Decode Channel 0 */
-#define MAD_DIMM_CH1 0x5008 /* Address Decode Channel 1 */
+/* Indexed register helper macros */
+#define Gz(r, z) ((r) + ((z) * 0x100))
+#define Ly(r, y) ((r) + ((y) * 4))
+#define Cx(r, x) ((r) + ((x) * 0x400))
+
+/* FIXME: These two are equivalent, but had to be split for reproducibility reasons. */
+#define CxLy(r, x, y) ((r) + ((x) * 0x400) + ((y) * 4))
+#define LyCx(r, x, y) ((r) + ((y) * 4) + ((x) * 0x400))
+
+/* Register definitions */
+#define GDCRCLKRANKSUSED_ch(ch) Gz(0x0c00, ch) /* Indicates which rank is populated */
+#define GDCRCLKCOMP_ch(ch) Gz(0x0c04, ch) /* RCOMP result register */
+#define GDCRCKPICODE_ch(ch) Gz(0x0c14, ch) /* PI coding for DDR CLK pins */
+#define GDCRCKLOGICDELAY_ch(ch) Gz(0x0c18, ch) /* Logic delay of 1 QCLK in CLK slice */
+#define GDDLLFUSE_ch(ch) Gz(0x0c20, ch) /* Used for fuse download to the DLLs */
+#define GDCRCLKDEBUGMUXCFG_ch(ch) Gz(0x0c3c, ch) /* Debug MUX control */
+
+#define GDCRCMDDEBUGMUXCFG_Cz_S(ch) Gz(0x0e3c, ch) /* Debug MUX control */
+
+#define CRCOMPOFST1_ch(ch) Gz(0x1810, ch) /* DQ, CTL and CLK Offset values */
+
+#define GDCRTRAININGMOD_ch(ch) Gz(0x3000, ch) /* Data training mode control */
+#define GDCRTRAININGRESULT1_ch(ch) Gz(0x3004, ch) /* Training results according to PI */
+#define GDCRTRAININGRESULT2_ch(ch) Gz(0x3008, ch)
+
+#define GDCRCTLRANKSUSED_ch(ch) Gz(0x3200, ch) /* Indicates which rank is populated */
+#define GDCRCMDCOMP_ch(ch) Gz(0x3204, ch) /* COMP values register */
+#define GDCRCMDCTLCOMP_ch(ch) Gz(0x3208, ch) /* COMP values register */
+#define GDCRCMDPICODING_ch(ch) Gz(0x320c, ch) /* Command and control PI coding */
+
+#define GDCRTRAININGMOD 0x3400 /* Data training mode control register */
+#define GDCRDATACOMP 0x340c /* COMP values register */
+
+#define CRCOMPOFST2 0x3714 /* CMD DRV, SComp and Static Leg controls */
+
+/* MC per-channel registers */
+#define TC_DBP_ch(ch) Cx(0x4000, ch) /* Timings: BIN */
+#define TC_RAP_ch(ch) Cx(0x4004, ch) /* Timings: Regular access */
+#define TC_RWP_ch(ch) Cx(0x4008, ch) /* Timings: Read / Write */
+#define TC_OTHP_ch(ch) Cx(0x400c, ch) /* Timings: Other parameters */
+#define SCHED_SECOND_CBIT_ch(ch) Cx(0x401c, ch) /* More chicken bits */
+#define SCHED_CBIT_ch(ch) Cx(0x4020, ch) /* Chicken bits in scheduler */
+#define SC_ROUNDT_LAT_ch(ch) Cx(0x4024, ch) /* Round-trip latency per rank */
+#define SC_IO_LATENCY_ch(ch) Cx(0x4028, ch) /* IO Latency Configuration */
+#define SCRAMBLING_SEED_1_ch(ch) Cx(0x4034, ch) /* Scrambling seed 1 */
+#define SCRAMBLING_SEED_2_LOW_ch(ch) Cx(0x4038, ch) /* Scrambling seed 2 low */
+#define SCRAMBLING_SEED_2_HIGH_ch(ch) Cx(0x403c, ch) /* Scrambling seed 2 high */
+
+/* IOSAV Bytelane Bit-wise error */
+#define IOSAV_By_BW_SERROR_ch(ch, y) CxLy(0x4040, ch, y)
+
+/* IOSAV Bytelane Bit-wise compare mask */
+#define IOSAV_By_BW_MASK_ch(ch, y) CxLy(0x4080, ch, y)
+
+/*
+ * Defines the number of transactions (non-VC1 RD CAS commands) between two priority ticks.
+ * Different counters for transactions that are issued on the ring agents (core or GT) and
+ * transactions issued in the SA.
+ */
+#define SC_PR_CNT_CONFIG_ch(ch) Cx(0x40a8, ch)
+#define SC_PCIT_ch(ch) Cx(0x40ac, ch) /* Page-close idle timer setup - 8 bits */
+#define PM_PDWN_CONFIG_ch(ch) Cx(0x40b0, ch) /* Power-down (CKE-off) operation config */
+#define ECC_INJECT_COUNT_ch(ch) Cx(0x40b4, ch) /* ECC error injection count */
+#define ECC_DFT_ch(ch) Cx(0x40b8, ch) /* ECC DFT features (ECC4ANA, error inject) */
+#define SC_WR_ADD_DELAY_ch(ch) Cx(0x40d0, ch) /* Extra WR delay to overcome WR-flyby issue */
+
+#define IOSAV_By_BW_SERROR_C_ch(ch, y) CxLy(0x4140, ch, y) /* IOSAV Bytelane Bit-wise error */
+
+/* IOSAV sub-sequence control registers */
+#define IOSAV_n_SP_CMD_ADDR_ch(ch, y) LyCx(0x4200, ch, y) /* Special command address. */
+#define IOSAV_n_ADDR_UPD_ch(ch, y) LyCx(0x4210, ch, y) /* Address update control */
+#define IOSAV_n_SP_CMD_CTL_ch(ch, y) LyCx(0x4220, ch, y) /* Control of command signals */
+#define IOSAV_n_SUBSEQ_CTL_ch(ch, y) LyCx(0x4230, ch, y) /* Sub-sequence controls */
+#define IOSAV_n_ADDRESS_LFSR_ch(ch, y) LyCx(0x4240, ch, y) /* 23-bit LFSR state value */
+
+#define PM_THML_STAT_ch(ch) Cx(0x4280, ch) /* Thermal status of each rank */
+#define IOSAV_SEQ_CTL_ch(ch) Cx(0x4284, ch) /* IOSAV sequence level control */
+#define IOSAV_DATA_CTL_ch(ch) Cx(0x4288, ch) /* Data control in IOSAV mode */
+#define IOSAV_STATUS_ch(ch) Cx(0x428c, ch) /* State of the IOSAV sequence machine */
+#define TC_ZQCAL_ch(ch) Cx(0x4290, ch) /* ZQCAL control register */
+#define TC_RFP_ch(ch) Cx(0x4294, ch) /* Refresh Parameters */
+#define TC_RFTP_ch(ch) Cx(0x4298, ch) /* Refresh Timing Parameters */
+#define TC_MR2_SHADOW_ch(ch) Cx(0x429c, ch) /* MR2 shadow - copy of DDR configuration */
+#define MC_INIT_STATE_ch(ch) Cx(0x42a0, ch) /* IOSAV mode control */
+#define TC_SRFTP_ch(ch) Cx(0x42a4, ch) /* Self-refresh timing parameters */
+#define IOSAV_ERROR_ch(ch) Cx(0x42ac, ch) /* Data vector count of the first error */
+#define IOSAV_DC_MASK_ch(ch) Cx(0x42b0, ch) /* IOSAV data check masking */
+
+#define IOSAV_By_ERROR_COUNT_ch(ch, y) CxLy(0x4340, ch, y) /* Per-byte 16-bit error count */
+#define IOSAV_G_ERROR_COUNT_ch(ch) Cx(0x4364, ch) /* Global 16-bit error count */
+
+#define PM_TRML_M_CONFIG_ch(ch) Cx(0x4380, ch) /* Thermal mode configuration */
+#define PM_CMD_PWR_ch(ch) Cx(0x4384, ch) /* Power contribution of commands */
+#define PM_BW_LIMIT_CONFIG_ch(ch) Cx(0x4388, ch) /* Bandwidth throttling on overtemp */
+#define SC_WDBWM_ch(ch) Cx(0x438c, ch) /* Watermarks and starvation counter */
+
+/* MC Channel Broadcast registers */
+#define TC_DBP 0x4c00 /* Timings: BIN */
+#define TC_RAP 0x4c04 /* Timings: Regular access */
+#define TC_RWP 0x4c08 /* Timings: Read / Write */
+#define TC_OTHP 0x4c0c /* Timings: Other parameters */
+#define SCHED_SECOND_CBIT 0x4c1c /* More chicken bits */
+#define SCHED_CBIT 0x4c20 /* Chicken bits in scheduler */
+#define SC_ROUNDT_LAT 0x4c24 /* Round-trip latency per rank */
+#define SC_IO_LATENCY 0x4c28 /* IO Latency Configuration */
+#define SCRAMBLING_SEED_1 0x4c34 /* Scrambling seed 1 */
+#define SCRAMBLING_SEED_2_LOW 0x4c38 /* Scrambling seed 2 low */
+#define SCRAMBLING_SEED_2_HIGH 0x4c3c /* Scrambling seed 2 high */
+
+#define IOSAV_By_BW_SERROR(y) Ly(0x4c40, y) /* IOSAV Bytelane Bit-wise error */
+#define IOSAV_By_BW_MASK(y) Ly(0x4c80, y) /* IOSAV Bytelane Bit-wise compare mask */
+
+/*
+ * Defines the number of transactions (non-VC1 RD CAS commands) between two priority ticks.
+ * Different counters for transactions that are issued on the ring agents (core or GT) and
+ * transactions issued in the SA.
+ */
+#define SC_PR_CNT_CONFIG 0x4ca8
+#define SC_PCIT 0x4cac /* Page-close idle timer setup - 8 bits */
+#define PM_PDWN_CONFIG 0x4cb0 /* Power-down (CKE-off) operation config */
+#define ECC_INJECT_COUNT 0x4cb4 /* ECC error injection count */
+#define ECC_DFT 0x4cb8 /* ECC DFT features (ECC4ANA, error inject) */
+#define SC_WR_ADD_DELAY 0x4cd0 /* Extra WR delay to overcome WR-flyby issue */
+
+/* Opportunistic reads configuration during write-major-mode (WMM) */
+#define WMM_READ_CONFIG 0x4cd4 /** WARNING: Only exists on IVB! */
+
+#define IOSAV_By_BW_SERROR_C(y) Ly(0x4d40, y) /* IOSAV Bytelane Bit-wise error */
+
+#define IOSAV_n_SP_CMD_ADDR(n) Ly(0x4e00, n) /* Sub-sequence special command address */
+#define IOSAV_n_ADDR_UPD(n) Ly(0x4e10, n) /* Address update after command execution */
+#define IOSAV_n_SP_CMD_CTL(n) Ly(0x4e20, n) /* Command signals in sub-sequence command */
+#define IOSAV_n_SUBSEQ_CTL(n) Ly(0x4e30, n) /* Sub-sequence command parameter control */
+#define IOSAV_n_ADDRESS_LFSR(n) Ly(0x4e40, n) /* 23-bit LFSR value of the sequence */
+
+#define PM_THML_STAT 0x4e80 /* Thermal status of each rank */
+#define IOSAV_SEQ_CTL 0x4e84 /* IOSAV sequence level control */
+#define IOSAV_DATA_CTL 0x4e88 /* Data control in IOSAV mode */
+#define IOSAV_STATUS 0x4e8c /* State of the IOSAV sequence machine */
+#define TC_ZQCAL 0x4e90 /* ZQCAL control register */
+#define TC_RFP 0x4e94 /* Refresh Parameters */
+#define TC_RFTP 0x4e98 /* Refresh Timing Parameters */
+#define TC_MR2_SHADOW 0x4e9c /* MR2 shadow - copy of DDR configuration */
+#define MC_INIT_STATE 0x4ea0 /* IOSAV mode control */
+#define TC_SRFTP 0x4ea4 /* Self-refresh timing parameters */
+
+/*
+ * Auxiliary register in mcmnts synthesis FUB (Functional Unit Block). Additionally, this
+ * register is also used to enable IOSAV_n_SP_CMD_ADDR optimization on Ivy Bridge.
+ */
+#define MCMNTS_SPARE 0x4ea8 /** WARNING: Reserved, use only on IVB! */
+
+#define IOSAV_ERROR 0x4eac /* Data vector count of the first error */
+#define IOSAV_DC_MASK 0x4eb0 /* IOSAV data check masking */
+
+#define IOSAV_By_ERROR_COUNT(y) Ly(0x4f40, y) /* Per-byte 16-bit error counter */
+#define IOSAV_G_ERROR_COUNT 0x4f64 /* Global 16-bit error counter */
+
+#define PM_TRML_M_CONFIG 0x4f80 /* Thermal mode configuration */
+#define PM_CMD_PWR 0x4f84 /* Power contribution of commands */
+#define PM_BW_LIMIT_CONFIG 0x4f88 /* Bandwidth throttling on overtemperature */
+#define SC_WDBWM 0x4f8c /* Watermarks and starvation counter config */
+
+#define MAD_CHNL 0x5000 /* Address Decoder Channel Configuration */
+#define MAD_DIMM_CH0 0x5004 /* Address Decode Channel 0 */
+#define MAD_DIMM_CH1 0x5008 /* Address Decode Channel 1 */
+#define MAD_DIMM_CH2 0x500c /* Address Decode Channel 2 (unused on SNB) */
+#define MAD_ZR 0x5014 /* Address Decode Zones */
+#define MCDECS_SPARE 0x5018 /* Spare register in mcdecs synthesis FUB */
+#define MCDECS_CBIT 0x501c /* Chicken bits in mcdecs synthesis FUB */
+
+#define CHANNEL_HASH 0x5024 /** WARNING: Only exists on IVB! */
+
+#define MC_INIT_STATE_G 0x5030 /* High-level behavior in IOSAV mode */
+#define MRC_REVISION 0x5034 /* MRC Revision */
+#define PM_DLL_CONFIG 0x5064 /* Memory Controller I/O DLL config */
+#define RCOMP_TIMER 0x5084 /* RCOMP evaluation timer register */
+
+#define MC_LOCK 0x50fc /* Memory Controlller Lock register */
+
+#define VTD1_BASE 0x5400 /* Base address for IGD */
+#define VTD2_BASE 0x5410 /* Base address for PEG, USB, SATA, etc. */
+#define PAIR_CTL 0x5418 /* Power Aware Interrupt Routing Control */
+
+/* PAVP control register, undocumented. Different from PAVPC on PCI config space. */
+#define MMIO_PAVP_CTL 0x5500 /* Bit 0 locks PAVP settings */
+
#define MEM_TRML_ESTIMATION_CONFIG 0x5880
#define MEM_TRML_THRESHOLDS_CONFIG 0x5888
#define MEM_TRML_INTERRUPT 0x58a8
-#define MC_BIOS_REQ 0x5e00
-#define MC_BIOS_DATA 0x5e04
-#define SSKPD 0x5d14 /* 16bit (scratchpad) */
-#define BIOS_RESET_CPL 0x5da8 /* 8bit */
+
+#define MC_TURBO_PL1 0x59a0 /* Turbo Power Limit 1 parameters */
+#define MC_TURBO_PL2 0x59a4 /* Turbo Power Limit 2 parameters */
+
+#define SSKPD_OK 0x5d10 /* 64-bit scratchpad register */
+#define SSKPD 0x5d14 /* 16bit (scratchpad) */
+#define BIOS_RESET_CPL 0x5da8 /* 8bit */
+
+/* PCODE will sample SAPM-related registers at the end of Phase 4. */
+#define MC_BIOS_REQ 0x5e00 /* Memory frequency request register */
+#define MC_BIOS_DATA 0x5e04 /* Miscellaneous information for BIOS */
+#define SAPMCTL 0x5f00 /* Bit 3 enables DDR EPG (C7i) on IVB */
+#define M_COMP 0x5f08 /* Memory COMP control */
+#define SAPMTIMERS 0x5f10 /* SAPM timers in 10ns (100 MHz) units */
+
+/* WARNING: Only applies to Sandy Bridge! */
+#define BANDTIMERS_SNB 0x5f18 /* MPLL and PPLL time to do self-banding */
+
+/** WARNING: Only applies to Ivy Bridge! */
+#define SAPMTIMERS2_IVB 0x5f18 /** Extra latency for DDRIO EPG exit (C7i) */
+#define BANDTIMERS_IVB 0x5f20 /** MPLL and PPLL time to do self-banding */
/*
* EPBAR - Egress Port Root Complex Register Block