diff options
author | Rudolf Marek <r.marek@assembler.cz> | 2010-12-18 23:30:59 +0000 |
---|---|---|
committer | Stefan Reinauer <stepan@openbios.org> | 2010-12-18 23:30:59 +0000 |
commit | 2c3662710ae4f2ef063098c5dfb58dbe74fe121e (patch) | |
tree | 788cfb8e3aa7ee01aa0a029c1c07735ae0f9b315 /src | |
parent | cadc54583877db65f33d2db11088d5fae1b77b74 (diff) | |
download | coreboot-2c3662710ae4f2ef063098c5dfb58dbe74fe121e.tar.xz |
SMM on AMD K8 Part 2/2
Signed-off-by: Rudolf Marek <r.marek@assembler.cz>
Acked-by: Stefan Reinauer <stepan@coresystems.de>
git-svn-id: svn://svn.coreboot.org/coreboot/trunk@6202 2b7e53f0-3cfb-0310-b3e9-8179ed1497e1
Diffstat (limited to 'src')
-rw-r--r-- | src/cpu/amd/smm/smm_init.c | 48 | ||||
-rw-r--r-- | src/southbridge/via/vt8237r/smihandler.c | 4 |
2 files changed, 19 insertions, 33 deletions
diff --git a/src/cpu/amd/smm/smm_init.c b/src/cpu/amd/smm/smm_init.c index d6f728bbba..ad1c112ed0 100644 --- a/src/cpu/amd/smm/smm_init.c +++ b/src/cpu/amd/smm/smm_init.c @@ -57,13 +57,13 @@ void smm_init(void) smm_handler_copied = 1; - /* MTRR changes don't like an enabled cache */ - disable_cache(); - /* Back up MSRs for later restore */ syscfg_orig = rdmsr(SYSCFG_MSR); mtrr_aseg_orig = rdmsr(MTRRfix16K_A0000_MSR); + /* MTRR changes don't like an enabled cache */ + disable_cache(); + msr = syscfg_orig; /* Allow changes to MTRR extended attributes */ msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; @@ -78,60 +78,46 @@ void smm_init(void) msr.lo = 0x18181818; msr.hi = 0x18181818; wrmsr(MTRRfix16K_A0000_MSR, msr); - enable_cache(); - /* disable the extended features */ + /* enable the extended features */ msr = syscfg_orig; msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; msr.lo |= SYSCFG_MSR_MtrrFixDramEn; wrmsr(SYSCFG_MSR, msr); - /* enable the SMM memory window */ - // TODO does "Enable ASEG SMRAM Range" have to happen on - // every CPU core? - msr = rdmsr(SMM_MASK_MSR); - msr.lo |= (1 << 0); // Enable ASEG SMRAM Range - msr.lo &= ~(1 << 2); // Open ASEG SMRAM Range - wrmsr(SMM_MASK_MSR, msr); - + enable_cache(); /* copy the real SMM handler */ memcpy((void *)SMM_BASE, &_binary_smm_start, (size_t)&_binary_smm_size); wbinvd(); - msr = rdmsr(SMM_MASK_MSR); - msr.lo |= ~(1 << 2); // Close ASEG SMRAM Range - wrmsr(SMM_MASK_MSR, msr); - - /* Change SYSCFG so we can restore the MTRR */ - msr = syscfg_orig; - msr.lo |= SYSCFG_MSR_MtrrFixDramModEn; - msr.lo &= ~SYSCFG_MSR_MtrrFixDramEn; - wrmsr(SYSCFG_MSR, msr); - /* Restore MTRR */ disable_cache(); - wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); /* Restore SYSCFG */ wrmsr(SYSCFG_MSR, syscfg_orig); + + wrmsr(MTRRfix16K_A0000_MSR, mtrr_aseg_orig); enable_cache(); } + /* But set SMM base address on all CPUs/cores */ msr = rdmsr(SMM_BASE_MSR); msr.lo = SMM_BASE - (lapicid() * 0x400); wrmsr(SMM_BASE_MSR, msr); -} - -void smm_lock(void) -{ - // TODO I think this should be running on each CPU - msr_t msr; - printk(BIOS_DEBUG, "Locking SMM.\n"); + /* enable the SMM memory window */ + msr = rdmsr(SMM_MASK_MSR); + msr.lo |= (1 << 0); // Enable ASEG SMRAM Range + wrmsr(SMM_MASK_MSR, msr); /* Set SMMLOCK to avoid exploits messing with SMM */ msr = rdmsr(HWCR_MSR); msr.lo |= (1 << 0); wrmsr(HWCR_MSR, msr); } + +void smm_lock(void) +{ + /* We lock SMM per CPU core */ +} diff --git a/src/southbridge/via/vt8237r/smihandler.c b/src/southbridge/via/vt8237r/smihandler.c index 344293dfa8..0c8ff2f7c7 100644 --- a/src/southbridge/via/vt8237r/smihandler.c +++ b/src/southbridge/via/vt8237r/smihandler.c @@ -208,9 +208,9 @@ smi_handler_t southbridge_smi[32] = { NULL, // [1] NULL, // [2] NULL, // [3] - southbridge_smi_cmd, // [4] + NULL, // [4] NULL, // [5] - NULL, // [6] + southbridge_smi_cmd, // [6] NULL, // [7] NULL, // [8] NULL, // [9] |