summaryrefslogtreecommitdiff
path: root/src/soc
diff options
context:
space:
mode:
Diffstat (limited to 'src/soc')
-rw-r--r--src/soc/intel/tigerlake/Kconfig13
-rw-r--r--src/soc/intel/tigerlake/include/soc/meminit.h212
-rw-r--r--src/soc/intel/tigerlake/meminit.c557
3 files changed, 242 insertions, 540 deletions
diff --git a/src/soc/intel/tigerlake/Kconfig b/src/soc/intel/tigerlake/Kconfig
index 9d073fa5f9..8211d5650b 100644
--- a/src/soc/intel/tigerlake/Kconfig
+++ b/src/soc/intel/tigerlake/Kconfig
@@ -54,6 +54,7 @@ config CPU_SPECIFIC_OPTIONS
select SOC_INTEL_COMMON_BLOCK_GPIO_IOSTANDBY
select SOC_INTEL_COMMON_BLOCK_GSPI_VERSION_2
select SOC_INTEL_COMMON_BLOCK_HDA
+ select SOC_INTEL_COMMON_BLOCK_MEMINIT
select SOC_INTEL_COMMON_BLOCK_PCIE_RTD3
select SOC_INTEL_COMMON_BLOCK_SA
select SOC_INTEL_COMMON_BLOCK_SMM
@@ -250,4 +251,16 @@ config EARLY_TCSS_DISPLAY
help
Enable displays to be detected over Type-C ports during boot.
+config DATA_BUS_WIDTH
+ int
+ default 128
+
+config DIMMS_PER_CHANNEL
+ int
+ default 2
+
+config MRC_CHANNEL_WIDTH
+ int
+ default 16
+
endif
diff --git a/src/soc/intel/tigerlake/include/soc/meminit.h b/src/soc/intel/tigerlake/include/soc/meminit.h
index 4a52298b7a..8583e669d0 100644
--- a/src/soc/intel/tigerlake/include/soc/meminit.h
+++ b/src/soc/intel/tigerlake/include/soc/meminit.h
@@ -5,151 +5,115 @@
#include <stddef.h>
#include <stdint.h>
+#include <types.h>
#include <fsp/soc_binding.h>
+#include <intelblocks/meminit.h>
-#define BITS_PER_BYTE 8
-
-#define LPDDR4X_CHANNELS 8
-#define LPDDR4X_BYTES_PER_CHANNEL 2
+enum mem_type {
+ MEM_TYPE_DDR4,
+ MEM_TYPE_LP4X,
+};
-#define DDR4_CHANNELS 2
-#define DDR4_BYTES_PER_CHANNEL 8
+struct ddr4_dq {
+ uint8_t dq0[BITS_PER_BYTE];
+ uint8_t dq1[BITS_PER_BYTE];
+ uint8_t dq2[BITS_PER_BYTE];
+ uint8_t dq3[BITS_PER_BYTE];
+ uint8_t dq4[BITS_PER_BYTE];
+ uint8_t dq5[BITS_PER_BYTE];
+ uint8_t dq6[BITS_PER_BYTE];
+ uint8_t dq7[BITS_PER_BYTE];
+};
-enum mem_topology {
- MEMORY_DOWN, /* Supports reading SPD from CBFS or in-memory pointer. */
- SODIMM, /* Supports reading SPD using SMBus (only for DDR4). */
- MIXED, /* CH0 = MD, CH1 = SODIMM (only for DDR4). */
+struct ddr4_dqs {
+ uint8_t dqs0;
+ uint8_t dqs1;
+ uint8_t dqs2;
+ uint8_t dqs3;
+ uint8_t dqs4;
+ uint8_t dqs5;
+ uint8_t dqs6;
+ uint8_t dqs7;
};
-enum ddr_memtype {
- MEMTYPE_DDR4, /* Uses DDR4 memory */
- MEMTYPE_LPDDR4X, /* Uses LPDDR4x memory */
+struct ddr4_dq_map {
+ struct ddr4_dq ddr0;
+ struct ddr4_dq ddr1;
};
-enum md_spd_loc {
- /* Read SPD from pointer provided to memory location. */
- SPD_MEMPTR,
- /* Read SPD using index into spd.bin in CBFS. */
- SPD_CBFS,
+struct ddr4_dqs_map {
+ struct ddr4_dqs ddr0;
+ struct ddr4_dqs ddr1;
};
-struct spd_info {
- enum mem_topology topology;
+struct lp4x_dq {
+ uint8_t dq0[BITS_PER_BYTE];
+ uint8_t dq1[BITS_PER_BYTE];
+};
- /* SPD info for Memory down topology */
- enum md_spd_loc md_spd_loc;
- union {
- /* Used for SPD_CBFS */
- uint8_t cbfs_index;
-
- struct {
- /* Used for SPD_MEMPTR */
- uintptr_t data_ptr;
- size_t data_len;
- };
- };
+struct lp4x_dqs {
+ uint8_t dqs0;
+ uint8_t dqs1;
+};
- /*
- * SPD info for SODIMM topology.
- * Leave addr_dimmN as 0 for any DIMMs that are not populated.
- */
- struct {
- /* SMBus address for DIMM0 within the channel. */
- uint8_t addr_dimm0;
- /* SMBus address for DIMM1 within the channel. */
- uint8_t addr_dimm1;
- } smbus_info[DDR4_CHANNELS];
+struct lp4x_dq_map {
+ struct lp4x_dq ddr0;
+ struct lp4x_dq ddr1;
+ struct lp4x_dq ddr2;
+ struct lp4x_dq ddr3;
+ struct lp4x_dq ddr4;
+ struct lp4x_dq ddr5;
+ struct lp4x_dq ddr6;
+ struct lp4x_dq ddr7;
};
-/* Board-specific memory configuration information */
-struct lpddr4x_cfg {
- /*
- * DQ CPU<>DRAM map:
- * LPDDR4x memory interface has 2 DQs per channel. Each DQ consists of 8 bits(1
- * byte). Thus, dq_map is represented as DDR[7-0]_DQ[1-0][7:0], where
- * DDR[7-0] : LPDDR4x channel #
- * DQ[1-0] : DQ # within the channel
- * [7:0] : Bits within the DQ
- *
- * Index of the array represents DQ pin# on the CPU, whereas value in
- * the array represents DQ pin# on the memory part.
- */
- uint8_t dq_map[LPDDR4X_CHANNELS][LPDDR4X_BYTES_PER_CHANNEL][BITS_PER_BYTE];
-
- /*
- * DQS CPU<>DRAM map:
- * LPDDR4x memory interface has 2 DQS pairs(P/N) per channel. Thus, dqs_map is
- * represented as DDR[7-0]_DQS[1:0], where
- * DDR[7-0] : LPDDR4x channel #
- * DQS[1-0] : DQS # within the channel
- *
- * Index of the array represents DQS pin# on the CPU, whereas value in
- * the array represents DQ pin# on the memory part.
- */
- uint8_t dqs_map[LPDDR4X_CHANNELS][LPDDR4X_BYTES_PER_CHANNEL];
- /*
- * Early Command Training Enable/Disable Control
- * 1 = enable, 0 = disable
- */
- uint8_t ect;
+struct lp4x_dqs_map {
+ struct lp4x_dqs ddr0;
+ struct lp4x_dqs ddr1;
+ struct lp4x_dqs ddr2;
+ struct lp4x_dqs ddr3;
+ struct lp4x_dqs ddr4;
+ struct lp4x_dqs ddr5;
+ struct lp4x_dqs ddr6;
+ struct lp4x_dqs ddr7;
};
-/* Board-specific memory configuration information for DDR4 memory variant */
-struct mb_ddr4_cfg {
- /*
- * DQ CPU<>DRAM map:
- * DDR4 memory interface has 8 DQs per channel. Each DQ consists of 8 bits(1
- * byte). Thus, dq_map is represented as DDR[1-0]_DQ[7-0][7:0], where
- * DDR[1-0] : DDR4 channel #
- * DQ[7-0] : DQ # within the channel
- * [7:0] : Bits within the DQ
- *
- * Index of the array represents DQ pin# on the CPU, whereas value in
- * the array represents DQ pin# on the memory part.
- */
- uint8_t dq_map[DDR4_CHANNELS][DDR4_BYTES_PER_CHANNEL][BITS_PER_BYTE];
- /*
- * DQS CPU<>DRAM map:
- * DDR4 memory interface has 8 DQS pairs per channel. Thus, dqs_map is represented as
- * DDR[1-0]_DQS[7-0], where
- * DDR[1-0] : DDR4 channel #
- * DQS[7-0] : DQS # within the channel
- *
- * Index of the array represents DQS pin# on the CPU, whereas value in
- * the array represents DQS pin# on the memory part.
- */
- uint8_t dqs_map[DDR4_CHANNELS][DDR4_BYTES_PER_CHANNEL];
- /*
- * Indicates whether memory is interleaved.
- * Set to 1 for an interleaved design,
- * set to 0 for non-interleaved design.
- */
- uint8_t dq_pins_interleaved;
- /*
- * Early Command Training Enable/Disable Control
- * 1 = enable, 0 = disable
- */
- uint8_t ect;
+struct mem_ddr4_config {
+ bool dq_pins_interleaved;
};
-/* DDR Memory Information - Supports DDR4 and LPDDR4x */
-struct ddr_memory_cfg {
- enum ddr_memtype mem_type;
+struct mb_cfg {
+ enum mem_type type;
+
union {
- const struct mb_ddr4_cfg *ddr4_cfg;
- const struct lpddr4x_cfg *lpddr4_cfg;
+ /*
+ * DQ CPU<>DRAM map:
+ * Index of the array represents DQ# on the CPU and the value represents DQ# on
+ * the DRAM part.
+ */
+ uint8_t dq_map[CONFIG_DATA_BUS_WIDTH];
+ struct lp4x_dq_map lp4x_dq_map;
+ struct ddr4_dq_map ddr4_dq_map;
};
-};
-/* Initialize LPDDR4x memory configurations */
-void meminit_lpddr4x(FSP_M_CONFIG *mem_cfg, const struct lpddr4x_cfg *board_cfg,
- const struct spd_info *spd, bool half_populated);
+ union {
+ /*
+ * DQS CPU<>DRAM map:
+ * Index of the array represents DQS# on the CPU and the value represents DQS#
+ * on the DRAM part.
+ */
+ uint8_t dqs_map[CONFIG_DATA_BUS_WIDTH/BITS_PER_BYTE];
+ struct lp4x_dqs_map lp4x_dqs_map;
+ struct ddr4_dqs_map ddr4_dqs_map;
+ };
+
+ /* Early Command Training Enable/Disable Control */
+ bool ect;
+
+ struct mem_ddr4_config ddr4_config;
+};
-/* Initialize DDR4 memory configurations */
-void meminit_ddr4(FSP_M_CONFIG *mem_cfg, const struct mb_ddr4_cfg *board_cfg,
- const struct spd_info *spd, const bool half_populated);
+void memcfg_init(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg,
+ const struct mem_spd *spd_info, bool half_populated);
-/* Determine which DDR memory is used and call appropriate init routine */
-void meminit_ddr(FSP_M_CONFIG *mem_cfg, const struct ddr_memory_cfg *board_cfg,
- const struct spd_info *info, bool half_populated);
#endif /* _SOC_TIGERLAKE_MEMINIT_H_ */
diff --git a/src/soc/intel/tigerlake/meminit.c b/src/soc/intel/tigerlake/meminit.c
index 7e830f051f..c030bb80ac 100644
--- a/src/soc/intel/tigerlake/meminit.c
+++ b/src/soc/intel/tigerlake/meminit.c
@@ -4,449 +4,174 @@
#include <console/console.h>
#include <fsp/util.h>
#include <soc/meminit.h>
-#include <spd_bin.h>
#include <string.h>
-/* If memory is half-populated, then upper half of the channels need to be left empty. */
-#define LPDDR4X_CHANNEL_UNPOPULATED(ch, half_populated) \
- ((half_populated) && ((ch) >= (LPDDR4X_CHANNELS / 2)))
-
-/*
- * Translate DDR4 channel # to FSP UPD index # for the channel.
- * Channel 0 -> Index 0
- * Channel 1 -> Index 4
- * Index 1-3 and 5-7 are unused.
- */
-#define DDR4_FSP_UPD_CHANNEL_IDX(x) ((x) * 4)
-
-enum dimm_enable_options {
- ENABLE_BOTH_DIMMS = 0,
- DISABLE_DIMM0 = 1,
- DISABLE_DIMM1 = 2,
- DISABLE_BOTH_DIMMS = 3
+#define LP4X_CH_WIDTH 16
+#define LP4X_CHANNELS CHANNEL_COUNT(LP4X_CH_WIDTH)
+
+#define DDR4_CH_WIDTH 64
+#define DDR4_CHANNELS CHANNEL_COUNT(DDR4_CH_WIDTH)
+
+static const struct soc_mem_cfg soc_mem_cfg[] = {
+ [MEM_TYPE_DDR4] = {
+ .num_phys_channels = DDR4_CHANNELS,
+ .phys_to_mrc_map = {
+ [0] = 0,
+ [1] = 4,
+ },
+ .md_phy_masks = {
+ /*
+ * Only physical channel 0 is populated in case of half-populated
+ * configuration.
+ */
+ .half_channel = BIT(0),
+ /* In mixed topologies, channel 0 is always memory-down. */
+ .mixed_topo = BIT(0),
+ },
+ },
+ [MEM_TYPE_LP4X] = {
+ .num_phys_channels = LP4X_CHANNELS,
+ .phys_to_mrc_map = {
+ [0] = 0,
+ [1] = 1,
+ [2] = 2,
+ [3] = 3,
+ [4] = 4,
+ [5] = 5,
+ [6] = 6,
+ [7] = 7,
+ },
+ .md_phy_masks = {
+ /*
+ * Physical channels 0, 1, 2 and 3 are populated in case of
+ * half-populated configurations.
+ */
+ .half_channel = BIT(0) | BIT(1) | BIT(2) | BIT(3),
+ /* LP4x does not support mixed topologies. */
+ },
+ },
};
-static uint8_t get_dimm_cfg(uintptr_t dimm0, uintptr_t dimm1)
-{
- if (dimm0 && dimm1)
- return ENABLE_BOTH_DIMMS;
- if (!dimm0 && !dimm1)
- return DISABLE_BOTH_DIMMS;
- if (!dimm1)
- return DISABLE_DIMM1;
- if (!dimm0)
- die("Disabling of only dimm0 is not supported!\n");
-
- return DISABLE_BOTH_DIMMS;
-}
-
-static void init_spd_upds(FSP_M_CONFIG *mem_cfg, int channel, uintptr_t spd_dimm0,
- uintptr_t spd_dimm1)
-{
- uint8_t dimm_cfg = get_dimm_cfg(spd_dimm0, spd_dimm1);
-
- switch (channel) {
- case 0:
- mem_cfg->DisableDimmCh0 = dimm_cfg;
- mem_cfg->MemorySpdPtr00 = spd_dimm0;
- mem_cfg->MemorySpdPtr01 = spd_dimm1;
- break;
-
- case 1:
- mem_cfg->DisableDimmCh1 = dimm_cfg;
- mem_cfg->MemorySpdPtr02 = spd_dimm0;
- mem_cfg->MemorySpdPtr03 = spd_dimm1;
- break;
-
- case 2:
- mem_cfg->DisableDimmCh2 = dimm_cfg;
- mem_cfg->MemorySpdPtr04 = spd_dimm0;
- mem_cfg->MemorySpdPtr05 = spd_dimm1;
- break;
-
- case 3:
- mem_cfg->DisableDimmCh3 = dimm_cfg;
- mem_cfg->MemorySpdPtr06 = spd_dimm0;
- mem_cfg->MemorySpdPtr07 = spd_dimm1;
- break;
-
- case 4:
- mem_cfg->DisableDimmCh4 = dimm_cfg;
- mem_cfg->MemorySpdPtr08 = spd_dimm0;
- mem_cfg->MemorySpdPtr09 = spd_dimm1;
- break;
-
- case 5:
- mem_cfg->DisableDimmCh5 = dimm_cfg;
- mem_cfg->MemorySpdPtr10 = spd_dimm0;
- mem_cfg->MemorySpdPtr11 = spd_dimm1;
- break;
-
- case 6:
- mem_cfg->DisableDimmCh6 = dimm_cfg;
- mem_cfg->MemorySpdPtr12 = spd_dimm0;
- mem_cfg->MemorySpdPtr13 = spd_dimm1;
- break;
-
- case 7:
- mem_cfg->DisableDimmCh7 = dimm_cfg;
- mem_cfg->MemorySpdPtr14 = spd_dimm0;
- mem_cfg->MemorySpdPtr15 = spd_dimm1;
- break;
-
- default:
- die("Invalid channel: %d\n", channel);
- }
-}
-
-static inline void init_spd_upds_empty(FSP_M_CONFIG *mem_cfg, int channel)
-{
- init_spd_upds(mem_cfg, channel, 0, 0);
-}
-
-static inline void init_spd_upds_dimm0(FSP_M_CONFIG *mem_cfg, int channel, uintptr_t spd_dimm0)
-{
- init_spd_upds(mem_cfg, channel, spd_dimm0, 0);
-}
-
-static void init_dq_upds(FSP_M_CONFIG *mem_cfg, int byte_pair, const uint8_t *dq_byte0,
- const uint8_t *dq_byte1)
-{
- uint8_t *dq_upd;
-
- switch (byte_pair) {
- case 0:
- dq_upd = mem_cfg->DqMapCpu2DramCh0;
- break;
- case 1:
- dq_upd = mem_cfg->DqMapCpu2DramCh1;
- break;
- case 2:
- dq_upd = mem_cfg->DqMapCpu2DramCh2;
- break;
- case 3:
- dq_upd = mem_cfg->DqMapCpu2DramCh3;
- break;
- case 4:
- dq_upd = mem_cfg->DqMapCpu2DramCh4;
- break;
- case 5:
- dq_upd = mem_cfg->DqMapCpu2DramCh5;
- break;
- case 6:
- dq_upd = mem_cfg->DqMapCpu2DramCh6;
- break;
- case 7:
- dq_upd = mem_cfg->DqMapCpu2DramCh7;
- break;
- default:
- die("Invalid byte_pair: %d\n", byte_pair);
- }
-
- if (dq_byte0 && dq_byte1) {
- memcpy(dq_upd, dq_byte0, BITS_PER_BYTE);
- memcpy(dq_upd + BITS_PER_BYTE, dq_byte1, BITS_PER_BYTE);
- } else {
- memset(dq_upd, 0, BITS_PER_BYTE * 2);
- }
-}
-
-static inline void init_dq_upds_empty(FSP_M_CONFIG *mem_cfg, int byte_pair)
-{
- init_dq_upds(mem_cfg, byte_pair, NULL, NULL);
-}
-
-static void init_dqs_upds(FSP_M_CONFIG *mem_cfg, int byte_pair, uint8_t dqs_byte0,
- uint8_t dqs_byte1)
+static void mem_init_spd_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data)
{
- uint8_t *dqs_upd;
-
- switch (byte_pair) {
- case 0:
- dqs_upd = mem_cfg->DqsMapCpu2DramCh0;
- break;
- case 1:
- dqs_upd = mem_cfg->DqsMapCpu2DramCh1;
- break;
- case 2:
- dqs_upd = mem_cfg->DqsMapCpu2DramCh2;
- break;
- case 3:
- dqs_upd = mem_cfg->DqsMapCpu2DramCh3;
- break;
- case 4:
- dqs_upd = mem_cfg->DqsMapCpu2DramCh4;
- break;
- case 5:
- dqs_upd = mem_cfg->DqsMapCpu2DramCh5;
- break;
- case 6:
- dqs_upd = mem_cfg->DqsMapCpu2DramCh6;
- break;
- case 7:
- dqs_upd = mem_cfg->DqsMapCpu2DramCh7;
- break;
- default:
- die("Invalid byte_pair: %d\n", byte_pair);
- }
-
- dqs_upd[0] = dqs_byte0;
- dqs_upd[1] = dqs_byte1;
-}
-
-static inline void init_dqs_upds_empty(FSP_M_CONFIG *mem_cfg, int byte_pair)
-{
- init_dqs_upds(mem_cfg, byte_pair, 0, 0);
-}
-
-static void read_spd_from_cbfs(uint8_t index, uintptr_t *data, size_t *len)
-{
- struct region_device spd_rdev;
-
- printk(BIOS_DEBUG, "SPD INDEX = %u\n", index);
- if (get_spd_cbfs_rdev(&spd_rdev, index) < 0)
- die("spd.bin not found or incorrect index\n");
-
- /* Memory leak is ok since we have memory mapped boot media */
- assert(CONFIG(BOOT_DEVICE_MEMORY_MAPPED));
-
- *len = region_device_sz(&spd_rdev);
- *data = (uintptr_t)rdev_mmap_full(&spd_rdev);
-}
-
-static void read_md_spd(const struct spd_info *info, uintptr_t *data, size_t *len)
-{
- if (info->md_spd_loc == SPD_MEMPTR) {
- *data = info->data_ptr;
- *len = info->data_len;
- } else if (info->md_spd_loc == SPD_CBFS) {
- read_spd_from_cbfs(info->cbfs_index, data, len);
- } else {
- die("Not a valid location(%d) for Memory-down SPD!\n", info->md_spd_loc);
+ uint32_t *spd_upds[MRC_CHANNELS][CONFIG_DIMMS_PER_CHANNEL] = {
+ [0] = { &mem_cfg->MemorySpdPtr00, &mem_cfg->MemorySpdPtr01, },
+ [1] = { &mem_cfg->MemorySpdPtr02, &mem_cfg->MemorySpdPtr03, },
+ [2] = { &mem_cfg->MemorySpdPtr04, &mem_cfg->MemorySpdPtr05, },
+ [3] = { &mem_cfg->MemorySpdPtr06, &mem_cfg->MemorySpdPtr07, },
+ [4] = { &mem_cfg->MemorySpdPtr08, &mem_cfg->MemorySpdPtr09, },
+ [5] = { &mem_cfg->MemorySpdPtr10, &mem_cfg->MemorySpdPtr11, },
+ [6] = { &mem_cfg->MemorySpdPtr12, &mem_cfg->MemorySpdPtr13, },
+ [7] = { &mem_cfg->MemorySpdPtr14, &mem_cfg->MemorySpdPtr15, },
+ };
+ uint8_t *disable_dimm_upds[MRC_CHANNELS] = {
+ &mem_cfg->DisableDimmCh0,
+ &mem_cfg->DisableDimmCh1,
+ &mem_cfg->DisableDimmCh2,
+ &mem_cfg->DisableDimmCh3,
+ &mem_cfg->DisableDimmCh4,
+ &mem_cfg->DisableDimmCh5,
+ &mem_cfg->DisableDimmCh6,
+ &mem_cfg->DisableDimmCh7,
+ };
+ int ch, dimm;
+
+ mem_cfg->MemorySpdDataLen = data->spd_len;
+
+ for (ch = 0; ch < MRC_CHANNELS; ch++) {
+ uint8_t *disable_dimm_ptr = disable_dimm_upds[ch];
+ *disable_dimm_ptr = 0;
+
+ for (dimm = 0; dimm < CONFIG_DIMMS_PER_CHANNEL; dimm++) {
+ uint32_t *spd_ptr = spd_upds[ch][dimm];
+
+ *spd_ptr = data->spd[ch][dimm];
+ if (!*spd_ptr)
+ *disable_dimm_ptr |= BIT(dimm);
+ }
}
-
- print_spd_info((uint8_t *) *data);
}
-void meminit_lpddr4x(FSP_M_CONFIG *mem_cfg, const struct lpddr4x_cfg *board_cfg,
- const struct spd_info *info, bool half_populated)
-
+static void mem_init_dq_dqs_upds(void *upds[MRC_CHANNELS], const void *map, size_t upd_size,
+ const struct mem_channel_data *data)
{
- size_t spd_len;
- uintptr_t spd_data;
- int i;
+ size_t i;
- if (info->topology != MEMORY_DOWN)
- die("LPDDR4x only support memory-down topology.\n");
-
- /* LPDDR4x does not allow interleaved memory */
- mem_cfg->DqPinsInterleaved = 0;
- mem_cfg->ECT = board_cfg->ect;
-
- read_md_spd(info, &spd_data, &spd_len);
- mem_cfg->MemorySpdDataLen = spd_len;
-
- for (i = 0; i < LPDDR4X_CHANNELS; i++) {
- if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
- init_spd_upds_empty(mem_cfg, i);
+ for (i = 0; i < MRC_CHANNELS; i++, map += upd_size) {
+ if (channel_is_populated(i, MRC_CHANNELS, data->ch_population_flags))
+ memcpy(upds[i], map, upd_size);
else
- init_spd_upds_dimm0(mem_cfg, i, spd_data);
- }
-
- /*
- * LPDDR4x memory interface has 2 DQs per channel. Each DQ consists of 8 bits (1
- * byte). However, FSP UPDs for DQ Map expect a DQ pair (i.e. mapping for 2 bytes) in
- * each UPD.
- *
- * Thus, init_dq_upds() needs to be called for dq pair of each channel.
- * DqMapCpu2DramCh0 --> dq_map[CHAN=0][0-1]
- * DqMapCpu2DramCh1 --> dq_map[CHAN=1][0-1]
- * DqMapCpu2DramCh2 --> dq_map[CHAN=2][0-1]
- * DqMapCpu2DramCh3 --> dq_map[CHAN=3][0-1]
- * DqMapCpu2DramCh4 --> dq_map[CHAN=4][0-1]
- * DqMapCpu2DramCh5 --> dq_map[CHAN=5][0-1]
- * DqMapCpu2DramCh6 --> dq_map[CHAN=6][0-1]
- * DqMapCpu2DramCh7 --> dq_map[CHAN=7][0-1]
- */
- for (i = 0; i < LPDDR4X_CHANNELS; i++) {
- if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
- init_dq_upds_empty(mem_cfg, i);
- else
- init_dq_upds(mem_cfg, i, board_cfg->dq_map[i][0],
- board_cfg->dq_map[i][1]);
- }
-
- /*
- * LPDDR4x memory interface has 2 DQS pairs per channel. FSP UPDs for DQS Map expect a
- * pair in each UPD.
- *
- * Thus, init_dqs_upds() needs to be called for dqs pair of each channel.
- * DqsMapCpu2DramCh0 --> dqs_map[CHAN=0][0-1]
- * DqsMapCpu2DramCh1 --> dqs_map[CHAN=1][0-1]
- * DqsMapCpu2DramCh2 --> dqs_map[CHAN=2][0-1]
- * DqsMapCpu2DramCh3 --> dqs_map[CHAN=3][0-1]
- * DqsMapCpu2DramCh4 --> dqs_map[CHAN=4][0-1]
- * DqsMapCpu2DramCh5 --> dqs_map[CHAN=5][0-1]
- * DqsMapCpu2DramCh6 --> dqs_map[CHAN=6][0-1]
- * DqsMapCpu2DramCh7 --> dqs_map[CHAN=7][0-1]
- */
- for (i = 0; i < LPDDR4X_CHANNELS; i++) {
- if (LPDDR4X_CHANNEL_UNPOPULATED(i, half_populated))
- init_dqs_upds_empty(mem_cfg, i);
- else
- init_dqs_upds(mem_cfg, i, board_cfg->dqs_map[i][0],
- board_cfg->dqs_map[i][1]);
+ memset(upds[i], 0, upd_size);
}
}
-static void read_sodimm_spd(const struct spd_info *info, struct spd_block *blk)
+static void mem_init_dq_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
+ const struct mb_cfg *mb_cfg)
{
- unsigned int i;
-
- blk->addr_map[0] = info->smbus_info[0].addr_dimm0;
- blk->addr_map[1] = info->smbus_info[0].addr_dimm1;
- blk->addr_map[2] = info->smbus_info[1].addr_dimm0;
- blk->addr_map[3] = info->smbus_info[1].addr_dimm1;
-
- get_spd_smbus(blk);
-
- /*
- * SPD gets printed only if:
- * a) mainboard provides a non-zero SMBus address and
- * b) SPD is successfully read using the SMBus address
- */
- for (i = 0; i < ARRAY_SIZE(blk->addr_map); i++) {
- if (blk->spd_array[i] != NULL)
- print_spd_info((uint8_t *)blk->spd_array[i]);
- }
+ void *dq_upds[MRC_CHANNELS] = {
+ &mem_cfg->DqMapCpu2DramCh0,
+ &mem_cfg->DqMapCpu2DramCh1,
+ &mem_cfg->DqMapCpu2DramCh2,
+ &mem_cfg->DqMapCpu2DramCh3,
+ &mem_cfg->DqMapCpu2DramCh4,
+ &mem_cfg->DqMapCpu2DramCh5,
+ &mem_cfg->DqMapCpu2DramCh6,
+ &mem_cfg->DqMapCpu2DramCh7,
+ };
+
+ const size_t upd_size = sizeof(mem_cfg->DqMapCpu2DramCh0);
+
+ _Static_assert(upd_size == CONFIG_MRC_CHANNEL_WIDTH, "Incorrect DQ UPD size!");
+
+ mem_init_dq_dqs_upds(dq_upds, mb_cfg->dq_map, upd_size, data);
}
-static void ddr4_get_spd(unsigned int channel, const uintptr_t *spd_md_data,
- const struct spd_block *spd_sodimm_blk,
- const struct spd_info *info,
- const bool half_populated, uintptr_t *spd_dimm0,
- uintptr_t *spd_dimm1)
+static void mem_init_dqs_upds(FSP_M_CONFIG *mem_cfg, const struct mem_channel_data *data,
+ const struct mb_cfg *mb_cfg)
{
- if (channel == 0) {
- /* For mixed topology, channel 0 can only be Memory_Down */
- if ((info->topology == MEMORY_DOWN) || (info->topology == MIXED)) {
- *spd_dimm0 = *spd_md_data;
- *spd_dimm1 = 0;
- } else if (info->topology == SODIMM) {
- *spd_dimm0 = (uintptr_t)spd_sodimm_blk->spd_array[0];
- *spd_dimm1 = (uintptr_t)spd_sodimm_blk->spd_array[1];
- } else
- die("Undefined memory topology on Channel 0.\n");
- } else if (channel == 1) {
- if (half_populated) {
- *spd_dimm0 = *spd_dimm1 = 0;
- } else if (info->topology == MEMORY_DOWN) {
- *spd_dimm0 = *spd_md_data;
- *spd_dimm1 = 0;
- /* For mixed topology, channel 1 can only be SODIMM */
- } else if ((info->topology == SODIMM) || (info->topology == MIXED)) {
- *spd_dimm0 = (uintptr_t)spd_sodimm_blk->spd_array[2];
- *spd_dimm1 = (uintptr_t)spd_sodimm_blk->spd_array[3];
- } else
- die("Undefined memory topology on channel 1.\n");
- } else
- die("Unsupported channels.\n");
+ void *dqs_upds[MRC_CHANNELS] = {
+ &mem_cfg->DqsMapCpu2DramCh0,
+ &mem_cfg->DqsMapCpu2DramCh1,
+ &mem_cfg->DqsMapCpu2DramCh2,
+ &mem_cfg->DqsMapCpu2DramCh3,
+ &mem_cfg->DqsMapCpu2DramCh4,
+ &mem_cfg->DqsMapCpu2DramCh5,
+ &mem_cfg->DqsMapCpu2DramCh6,
+ &mem_cfg->DqsMapCpu2DramCh7,
+ };
+
+ const size_t upd_size = sizeof(mem_cfg->DqsMapCpu2DramCh0);
+
+ _Static_assert(upd_size == CONFIG_MRC_CHANNEL_WIDTH / 8, "Incorrect DQS UPD size!");
+
+ mem_init_dq_dqs_upds(dqs_upds, mb_cfg->dqs_map, upd_size, data);
}
-/* Initialize DDR4 memory configurations */
-void meminit_ddr4(FSP_M_CONFIG *mem_cfg, const struct mb_ddr4_cfg *board_cfg,
- const struct spd_info *info, const bool half_populated)
+void memcfg_init(FSP_M_CONFIG *mem_cfg, const struct mb_cfg *mb_cfg,
+ const struct mem_spd *spd_info, bool half_populated)
{
- uintptr_t spd_md_data;
- size_t spd_md_len;
- uintptr_t spd_dimm0 = 0;
- uintptr_t spd_dimm1 = 0;
- struct spd_block spd_sodimm_blk;
- unsigned int i;
- unsigned int index = 0;
+ struct mem_channel_data data;
- /* Early Command Training Enabled */
- mem_cfg->ECT = board_cfg->ect;
- mem_cfg->DqPinsInterleaved = board_cfg->dq_pins_interleaved;
+ if (mb_cfg->type >= ARRAY_SIZE(soc_mem_cfg))
+ die("Invalid memory type(%x)!\n", mb_cfg->type);
- if ((info->topology == MEMORY_DOWN) || (info->topology == MIXED)) {
- read_md_spd(info, &spd_md_data, &spd_md_len);
- mem_cfg->MemorySpdDataLen = spd_md_len;
- }
-
- if ((info->topology == SODIMM) || (info->topology == MIXED)) {
- read_sodimm_spd(info, &spd_sodimm_blk);
- if ((info->topology == MIXED) &&
- (mem_cfg->MemorySpdDataLen != spd_sodimm_blk.len))
- die("Mixed topology has incorrect length.\n");
- else
- mem_cfg->MemorySpdDataLen = spd_sodimm_blk.len;
- }
-
- for (i = 0; i < DDR4_CHANNELS; i++) {
- ddr4_get_spd(i, &spd_md_data, &spd_sodimm_blk, info,
- half_populated, &spd_dimm0, &spd_dimm1);
- init_spd_upds(mem_cfg, DDR4_FSP_UPD_CHANNEL_IDX(i), spd_dimm0, spd_dimm1);
- }
-
- /*
- * DDR4 memory interface has 8 DQs per channel. Each DQ consists of 8 bits (1
- * byte). However, FSP UPDs for DQ Map expect a DQ pair (i.e. mapping for 2 bytes) in
- * each UPD.
- *
- * Thus, init_dq_upds() needs to be called for every dq pair of each channel.
- * DqMapCpu2DramCh0 --> dq_map[CHAN=0][0-1]
- * DqMapCpu2DramCh1 --> dq_map[CHAN=0][2-3]
- * DqMapCpu2DramCh2 --> dq_map[CHAN=0][4-5]
- * DqMapCpu2DramCh3 --> dq_map[CHAN=0][6-7]
- * DqMapCpu2DramCh4 --> dq_map[CHAN=1][0-1]
- * DqMapCpu2DramCh5 --> dq_map[CHAN=1][2-3]
- * DqMapCpu2DramCh6 --> dq_map[CHAN=1][4-5]
- * DqMapCpu2DramCh7 --> dq_map[CHAN=1][6-7]
- */
+ mem_populate_channel_data(&soc_mem_cfg[mb_cfg->type], spd_info, half_populated, &data);
+ mem_init_spd_upds(mem_cfg, &data);
+ mem_init_dq_upds(mem_cfg, &data, mb_cfg);
+ mem_init_dqs_upds(mem_cfg, &data, mb_cfg);
- /*
- * DDR4 memory interface has 8 DQS pairs per channel. FSP UPDs for DQS Map expect a
- * pair in each UPD.
- *
- * Thus, init_dqs_upds() needs to be called for every dqs pair of each channel.
- * DqsMapCpu2DramCh0 --> dqs_map[CHAN=0][0-1]
- * DqsMapCpu2DramCh1 --> dqs_map[CHAN=0][2-3]
- * DqsMapCpu2DramCh2 --> dqs_map[CHAN=0][4-5]
- * DqsMapCpu2DramCh3 --> dqs_map[CHAN=0][6-7]
- * DqsMapCpu2DramCh4 --> dqs_map[CHAN=1][0-1]
- * DqsMapCpu2DramCh5 --> dqs_map[CHAN=1][2-3]
- * DqsMapCpu2DramCh6 --> dqs_map[CHAN=1][4-5]
- * DqsMapCpu2DramCh7 --> dqs_map[CHAN=1][6-7]
- */
+ mem_cfg->ECT = mb_cfg->ect;
- for (i = 0; i < DDR4_CHANNELS; i++) {
- for (int b = 0; b < DDR4_BYTES_PER_CHANNEL; b += 2) {
- if (half_populated && (i == 1)) {
- init_dq_upds_empty(mem_cfg, index);
- init_dqs_upds_empty(mem_cfg, index);
- } else {
- init_dq_upds(mem_cfg, index, board_cfg->dq_map[i][b],
- board_cfg->dq_map[i][b+1]);
- init_dqs_upds(mem_cfg, index, board_cfg->dqs_map[i][b],
- board_cfg->dqs_map[i][b+1]);
- }
- index++;
- }
- }
-}
-
-void meminit_ddr(FSP_M_CONFIG *mem_cfg, const struct ddr_memory_cfg *board_cfg,
- const struct spd_info *info, bool half_populated)
-{
- switch (board_cfg->mem_type) {
- case MEMTYPE_DDR4:
- meminit_ddr4(mem_cfg, board_cfg->ddr4_cfg, info, half_populated);
+ switch (mb_cfg->type) {
+ case MEM_TYPE_DDR4:
+ mem_cfg->DqPinsInterleaved = mb_cfg->ddr4_config.dq_pins_interleaved;
break;
- case MEMTYPE_LPDDR4X:
- meminit_lpddr4x(mem_cfg, board_cfg->lpddr4_cfg, info, half_populated);
+ case MEM_TYPE_LP4X:
+ /* LPDDR4x does not allow interleaved memory */
+ mem_cfg->DqPinsInterleaved = 0;
break;
default:
- die("Unsupported memory type = %d!\n", board_cfg->mem_type);
+ die("Unsupported memory type(%d)\n", mb_cfg->type);
}
+
}