summaryrefslogtreecommitdiff
path: root/src/vendorcode/amd/agesa/f14/Proc
diff options
context:
space:
mode:
authorzbao <fishbaozi@gmail.com>2012-04-05 13:20:50 +0800
committerStefan Reinauer <stefan.reinauer@coreboot.org>2012-04-12 00:17:35 +0200
commit392562263858011ef898e377477124f5f66b1302 (patch)
tree8ac8b9437dbfb6f5f56af42f0163a006b0f745e8 /src/vendorcode/amd/agesa/f14/Proc
parent9bcdbf8eaa0c73d130ba555163f89fa1759c8c99 (diff)
downloadcoreboot-392562263858011ef898e377477124f5f66b1302.tar.xz
S3 code in vendorcode folder.
Change the ExecuteFinalHltInstruction to assembly code. so we can make sure the code can run stackless. Change-Id: I783ced6cf7c5bc29c12a37aef29077e610d8957d Signed-off-by: Zheng Bao <zheng.bao@amd.com> Signed-off-by: zbao <fishbaozi@gmail.com> Reviewed-on: http://review.coreboot.org/622 Tested-by: build bot (Jenkins) Reviewed-by: Stefan Reinauer <stefan.reinauer@coreboot.org>
Diffstat (limited to 'src/vendorcode/amd/agesa/f14/Proc')
-rw-r--r--src/vendorcode/amd/agesa/f14/Proc/CPU/cahalt.c141
-rw-r--r--src/vendorcode/amd/agesa/f14/Proc/CPU/cahaltasm.S203
-rw-r--r--src/vendorcode/amd/agesa/f14/Proc/Common/AmdS3Save.c2
-rw-r--r--src/vendorcode/amd/agesa/f14/Proc/Mem/Feat/S3/mfs3.c10
4 files changed, 213 insertions, 143 deletions
diff --git a/src/vendorcode/amd/agesa/f14/Proc/CPU/cahalt.c b/src/vendorcode/amd/agesa/f14/Proc/CPU/cahalt.c
index ae9f3c7fd8..938300ae93 100644
--- a/src/vendorcode/amd/agesa/f14/Proc/CPU/cahalt.c
+++ b/src/vendorcode/amd/agesa/f14/Proc/CPU/cahalt.c
@@ -106,149 +106,16 @@ ExecuteWbinvdInstruction (
*/
-//----------------------------------------------------------------------------
-
-STATIC
-VOID
-PrimaryCoreFunctions (AP_MTRR_SETTINGS *ApMtrrSettingsList)
- {
- UINT64 data;
- UINT32 msrno;
- // Configure the MTRRs on the AP so
- // when it runs remote code it will execute
- // out of RAM instead of ROM.
- // Disable MTRRs and turn on modification enable bit
-
- data = __readmsr (0xC0010010); // MTRR_SYS_CFG
- data &= ~(1 << 18); // MtrrFixDramEn
- data &= ~(1 << 20); // MtrrVarDramEn
- data |= (1 << 19); // MtrrFixDramModEn
- data |= (1 << 17); // SysUcLockEn
-
-
- __writemsr (0xC0010010, data);
-
- // Set 7FFFh-00000h and 9FFFFh-80000h as WB DRAM
- __writemsr (0x250, 0x1E1E1E1E1E1E1E1E); // AMD_MTRR_FIX64k_00000
- __writemsr (0x258, 0x1E1E1E1E1E1E1E1E); // AMD_MTRR_FIX16k_80000
-
- // Set BFFFFh-A0000h, DFFFFh-C0000h as Uncacheable Memory-mapped IO
- __writemsr (0x259, 0); // AMD_AP_MTRR_FIX16k_A0000
- __writemsr (0x268, 0); // AMD_MTRR_FIX4k_C0000
- __writemsr (0x269, 0); // AMD_MTRR_FIX4k_C8000
- __writemsr (0x26A, 0); // AMD_MTRR_FIX4k_D0000
- __writemsr (0x26B, 0); // AMD_MTRR_FIX4k_D8000
-
- // Set FFFFFh-E0000h as Uncacheable Memory
- for (msrno = 0x26C; msrno <= 0x26F; msrno++)
- __writemsr (msrno, 0x1818181818181818);
-
- // If IBV provided settings for Fixed-Sized MTRRs,
- // overwrite the default settings.
- if ((uintptr_t) ApMtrrSettingsList != 0 && (uintptr_t) ApMtrrSettingsList != 0xFFFFFFFF)
- {
- int index;
- for (index = 0; ApMtrrSettingsList [index].MsrAddr != CPU_LIST_TERMINAL; index++)
- __writemsr (ApMtrrSettingsList [index].MsrAddr, ApMtrrSettingsList [index].MsrData);
- }
-
- // restore variable MTTR6 and MTTR7 to default states
- for (msrno = 0x20F; msrno <= 0x20C; msrno--) // decrement so that the pair is disable before the base is cleared
- __writemsr (msrno, 0);
-
- // Enable fixed-range and variable-range MTRRs
- // Set Fixed-Range Enable (FE) and MTRR Enable (E) bits
- __writemsr (0x2FF, __readmsr (0x2FF) | 0xC00);
-
- // Enable Top-of-Memory setting
- // Enable use of RdMem/WrMem bits attributes
- data = __readmsr (0xC0010010); // MTRR_SYS_CFG
- data |= (1 << 18); // MtrrFixDramEn
- data |= (1 << 20); // MtrrVarDramEn
- data &= ~(1 << 19); // MtrrFixDramModEn
- __writemsr (0xC0010010, data);
- }
-
-//----------------------------------------------------------------------------
-
+/* see cahaltasm.S
VOID
ExecuteFinalHltInstruction (
- IN UINT32 SharedCore,
+ IN UINT32 HaltFlags,
IN AP_MTRR_SETTINGS *ApMtrrSettingsList,
IN AMD_CONFIG_PARAMS *StdHeader
)
{
- int abcdRegs [4];
- UINT32 cr0val;
- UINT64 data;
-
- cr0val = __readcr0 ();
- if (SharedCore & 2)
- {
- // set CombineCr0Cd and enable cache in CR0
- __writemsr (MSR_CU_CFG3, __readmsr (MSR_CU_CFG3) | 1ULL << 49);
- __writecr0 (cr0val & ~0x60000000);
- }
- else
- __writecr0 (cr0val | 0x60000000);
-
- if (SharedCore & 1) PrimaryCoreFunctions (ApMtrrSettingsList);
-
- // Make sure not to touch any Shared MSR from this point on
-
- // Restore settings that were temporarily overridden for the cache as ram phase
- data = __readmsr (0xC0011022); // MSR_DC_CFG
- data &= ~(1 << 4); // DC_DIS_SPEC_TLB_RLD
- data &= ~(1 << 8); // DIS_CLR_WBTOL2_SMC_HIT
- data &= ~(1 << 13); // DIS_HW_PF
- __writemsr (0xC0011022, data);
-
- data = __readmsr (0xC0011021); // MSR_IC_CFG - C001_1021
- data &= ~(1 << 9); // IC_DIS_SPEC_TLB_RLD
- __writemsr (0xC0011021, data);
-
- // AMD_DISABLE_STACK_FAMILY_HOOK
- __cpuid (abcdRegs, 1);
- if ((abcdRegs [0] >> 20) == 1) //-----family 10h (Hydra) only-----
- {
- data = __readmsr (0xC0011022);
- data &= ~(1 << 4);
- data &= ~(1 << 8);
- data &= ~(1 << 13);
- __writemsr (0xC0011022, data);
-
- data = __readmsr (0xC0011021);
- data &= ~(1 << 14);
- data &= ~(1 << 9);
- __writemsr (0xC0011021, data);
-
- data = __readmsr (0xC001102A);
- data &= ~(1 << 15);
- data &= ~(1ull << 35);
- __writemsr (0xC001102A, data);
- }
- else if ((abcdRegs [0] >> 20) == 6) //-----family 15h (Orochi) only-----
- {
- data = __readmsr (0xC0011020);
- data &= ~(1 << 28);
- __writemsr (0xC0011020, data);
-
- data = __readmsr (0xC0011021);
- data &= ~(1 << 9);
- __writemsr (0xC0011021, data);
-
- data = __readmsr (0xC0011022);
- data &= ~(1 << 4);
- data &= ~(1l << 13);
- __writemsr (0xC0011022, data);
- }
-
- for (;;)
- {
- _disable ();
- __halt ();
- }
- }
+}
+*/
//----------------------------------------------------------------------------
diff --git a/src/vendorcode/amd/agesa/f14/Proc/CPU/cahaltasm.S b/src/vendorcode/amd/agesa/f14/Proc/CPU/cahaltasm.S
new file mode 100644
index 0000000000..48ee5d0238
--- /dev/null
+++ b/src/vendorcode/amd/agesa/f14/Proc/CPU/cahaltasm.S
@@ -0,0 +1,203 @@
+/*
+ * Copyright (c) 2011, Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of Advanced Micro Devices, Inc. nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL ADVANCED MICRO DEVICES, INC. BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+.include "src/vendorcode/amd/agesa/f14/gcccar.inc"
+
+.code32
+.align 4
+.globl ExecuteFinalHltInstruction
+ .type ExecuteFinalHltInstruction, @function
+/* ExecuteFinalHltInstruction (
+ IN UINT32 HaltFlags,
+ IN AP_MTRR_SETTINGS *ApMtrrSettingsList,
+ IN AMD_CONFIG_PARAMS *StdHeader
+ )
+*/
+
+/* This function disables CAR. We don't care about the stack on this CPU */
+ExecuteFinalHltInstruction:
+ movl 4(%esp), %esi /* HaltFlags*/
+ movl 8(%esp), %edi /* ApMtrrSettingList */
+
+/* Do these special steps in case if the core is part of a compute unit
+ * Note: The following bits are family specific flags, that gets set during build time,
+ * and indicates things like "family cache control methodology", etc.
+ * esi bit0 = 0 -> not a Primary core
+ * esi bit0 = 1 -> Primary core
+ * esi bit1 = 0 -> Cache disable
+ * esi bit1 = 1 -> Cache enable
+ */
+
+ bt $1, %esi /* .if (esi & 2h) */
+ jz 0f
+ /* Set CombineCr0Cd bit */
+ movl $CU_CFG3, %ecx
+ rdmsr
+ bts $(COMBINE_CR0_CD - 32), %edx
+ wrmsr
+ /* Clear the CR0.CD bit */
+ movl %cr0, %eax /* Make sure cache is enabled for all APs */
+ btr $CR0_CD, %eax
+ btr $CR0_NW, %eax
+ mov %eax, %cr0 /* Write back to CR0 */
+ jmp 1f /* .else */
+0:
+ movl %cr0, %eax /* Make sure cache is disabled for all APs */
+ bts $CR0_CD, %eax /* Disable cache */
+ bts $CR0_NW, %eax
+ movl %eax, %cr0 /* Write back to CR0 */
+1: /* .endif */
+
+ bt $0, %esi /* .if (esi & 1h) */
+ jz 2f
+ /* This core is a primary core and needs to do all the MTRRs, including shared MTRRs. */
+ movl %edi, %esi /* Get ApMtrrSettingList */
+
+ /* Configure the MTRRs on the AP so
+ * when it runs remote code it will execute
+ * out of RAM instead of ROM.
+ */
+
+ /* Disable MTRRs and turn on modification enable bit */
+ movl $MTRR_SYS_CFG, %ecx
+ rdmsr
+ btr $MTRR_VAR_DRAM_EN, %eax /* Disable */
+ bts $MTRR_FIX_DRAM_MOD_EN, %eax /* Enable */
+ btr $MTRR_FIX_DRAM_EN, %eax /* Disable */
+ bts $SYS_UC_LOCK_EN, %eax
+ wrmsr
+
+ /* Setup default values for Fixed-Sized MTRRs */
+ /* Set 7FFFh-00000h as WB */
+ movl $AMD_AP_MTRR_FIX64k_00000, %ecx
+ movl $0x1E1E1E1E, %eax
+ movl %eax, %edx
+ wrmsr
+
+ /* Set 9FFFFh-80000h also as WB */
+ movl $AMD_AP_MTRR_FIX16k_80000, %ecx
+ wrmsr
+
+ /* Set BFFFFh-A0000h as Uncacheable Memory-mapped IO */
+ movl $AMD_AP_MTRR_FIX16k_A0000, %ecx
+ xorl %eax, %eax
+ xorl %edx, %edx
+ wrmsr
+
+ /* Set DFFFFh-C0000h as Uncacheable Memory-mapped IO */
+ xorl %eax, %eax
+ xorl %edx, %edx
+ movl $AMD_AP_MTRR_FIX4k_C0000, %ecx
+
+CDLoop:
+ wrmsr
+ inc %ecx
+ cmp $AMD_AP_MTRR_FIX4k_D8000, %ecx
+ jbe CDLoop
+
+ /* Set FFFFFh-E0000h as Uncacheable Memory */
+ movl $0x18181818, %eax
+ movl %eax, %edx
+
+ mov $AMD_AP_MTRR_FIX4k_E0000, %ecx
+
+EFLoop:
+ wrmsr
+ inc %ecx
+ cmp $AMD_AP_MTRR_FIX4k_F8000, %ecx
+ jbe EFLoop
+
+ /* If IBV provided settings for Fixed-Sized MTRRs,
+ * overwrite the default settings. */
+ cmp $0, %esi /*.if ((esi != 0) && (esi != 0FFFFFFFFh)) */
+ jz 4f
+ cmp $0xFFFFFFFF, %esi
+ jz 4f
+ 5:
+ mov (%esi), %ecx /* (AP_MTRR_SETTINGS ptr [esi]).MsrAddr */
+ /* While we are not at the end of the list */
+ cmp $CPU_LIST_TERMINAL, %ecx /* .while (ecx != CPU_LIST_TERMINAL)*/
+ je 4f
+ /* TODO - coreboot isn't checking for valid data.
+ * Ensure that the MSR address is valid for Fixed-Sized MTRRs */
+ /*.if ( ((ecx >= AMD_AP_MTRR_FIX4k_C0000) && (ecx <= AMD_AP_MTRR_FIX4k_F8000)) || \
+ (ecx == AMD_AP_MTRR_FIX64k_00000) || (ecx == AMD_AP_MTRR_FIX16k_80000 ) || \
+ (ecx == AMD_AP_MTRR_FIX16k_A0000))
+ */
+ mov 4(%esi), %eax /* MsrData */
+ mov 8(%esi), %edx /* MsrData */
+ wrmsr
+ /* .endif */
+ add $12, %esi /* sizeof (AP_MTRR_SETTINGS) */
+ jmp 5b /* .endw */
+ 4: /* .endif */
+
+ /* restore variable MTTR6 and MTTR7 to default states */
+ movl $AMD_MTRR_VARIABLE_BASE6, %ecx /* clear MTRRPhysBase6 MTRRPhysMask6 */
+ xor %eax, %eax /* and MTRRPhysBase7 MTRRPhysMask7 */
+ xor %edx, %edx
+ cmp $10, %ecx /* .while (cl < 010h) */
+ jge 6f
+ wrmsr
+ inc %ecx
+ 6: /* .endw */
+
+ /* Enable fixed-range and variable-range MTRRs */
+ mov $AMD_MTRR_DEFTYPE, %ecx
+ rdmsr
+ bts $MTRR_DEF_TYPE_EN, %eax /* MtrrDefTypeEn */
+ bts $MTRR_DEF_TYPE_FIX_EN, %eax /* MtrrDefTypeFixEn */
+ wrmsr
+
+ /* Enable Top-of-Memory setting */
+ /* Enable use of RdMem/WrMem bits attributes */
+ mov $MTRR_SYS_CFG, %ecx
+ rdmsr
+ bts $MTRR_VAR_DRAM_EN, %eax /* Enable */
+ btr $MTRR_FIX_DRAM_MOD_EN, %eax /* Disable */
+ bts $MTRR_FIX_DRAM_EN, %eax /* Enable */
+ wrmsr
+
+ bts $FLAG_IS_PRIMARY, %esi
+ jmp 3f /* .else ; end if primary core */
+ 2:
+ xor %esi, %esi
+ 3: /* .endif*/
+
+ /* Make sure not to touch any Shared MSR from this point on */
+
+ AMD_DISABLE_STACK_FAMILY_HOOK
+
+ xor %eax, %eax
+
+7:
+ cli
+ hlt
+ jmp 7b /* ExecuteHltInstruction */
+
+ .size ExecuteFinalHltInstruction, .-ExecuteFinalHltInstruction
diff --git a/src/vendorcode/amd/agesa/f14/Proc/Common/AmdS3Save.c b/src/vendorcode/amd/agesa/f14/Proc/Common/AmdS3Save.c
index f994c87f66..448d0f9313 100644
--- a/src/vendorcode/amd/agesa/f14/Proc/Common/AmdS3Save.c
+++ b/src/vendorcode/amd/agesa/f14/Proc/Common/AmdS3Save.c
@@ -245,7 +245,7 @@ AmdS3Save (
HeapStatus = AmdS3SaveParams->StdHeader.HeapStatus;
AmdS3SaveParams->StdHeader.HeapStatus = HEAP_S3_RESUME;
- AmdS3SaveParams->StdHeader.HeapBasePtr = (UINT64) HeapPtr;
+ AmdS3SaveParams->StdHeader.HeapBasePtr = (VOID *) HeapPtr;
for (i = 0; i < S3LATE_TABLE_SIZE; i++) {
if (HeapPtrs[i] != NULL) {
diff --git a/src/vendorcode/amd/agesa/f14/Proc/Mem/Feat/S3/mfs3.c b/src/vendorcode/amd/agesa/f14/Proc/Mem/Feat/S3/mfs3.c
index df2071f5bc..eee980484d 100644
--- a/src/vendorcode/amd/agesa/f14/Proc/Mem/Feat/S3/mfs3.c
+++ b/src/vendorcode/amd/agesa/f14/Proc/Mem/Feat/S3/mfs3.c
@@ -259,30 +259,30 @@ MemFS3GetDeviceList (
(*DeviceBlockHdrPtr)->RelativeOrMaskOffset = (UINT16) AllocHeapParams.RequestedBufferSize;
// Copy device list on the stack to the heap.
- BufferOffset = sizeof (DEVICE_BLOCK_HEADER) + (UINT64) AllocHeapParams.BufferPtr;
+ BufferOffset = sizeof (DEVICE_BLOCK_HEADER) + (UINT64) (UINT32) AllocHeapParams.BufferPtr;
for (Die = 0; Die < DieCount; Die ++) {
for (i = PRESELFREF; i <= POSTSELFREF; i ++) {
// Copy PCI device descriptor to the heap if it exists.
if (DeviceDescript[Die].PCIDevice[i].RegisterListID != 0xFFFFFFFF) {
- LibAmdMemCopy ((VOID *) BufferOffset, &(DeviceDescript[Die].PCIDevice[i]), sizeof (PCI_DEVICE_DESCRIPTOR), StdHeader);
+ LibAmdMemCopy ((VOID *)(UINT32) BufferOffset, &(DeviceDescript[Die].PCIDevice[i]), sizeof (PCI_DEVICE_DESCRIPTOR), StdHeader);
(*DeviceBlockHdrPtr)->NumDevices ++;
BufferOffset += sizeof (PCI_DEVICE_DESCRIPTOR);
}
// Copy conditional PCI device descriptor to the heap if it exists.
if (DeviceDescript[Die].CPCIDevice[i].RegisterListID != 0xFFFFFFFF) {
- LibAmdMemCopy ((VOID *) BufferOffset, &(DeviceDescript[Die].CPCIDevice[i]), sizeof (CONDITIONAL_PCI_DEVICE_DESCRIPTOR), StdHeader);
+ LibAmdMemCopy ((VOID *)(UINT32) BufferOffset, &(DeviceDescript[Die].CPCIDevice[i]), sizeof (CONDITIONAL_PCI_DEVICE_DESCRIPTOR), StdHeader);
(*DeviceBlockHdrPtr)->NumDevices ++;
BufferOffset += sizeof (CONDITIONAL_PCI_DEVICE_DESCRIPTOR);
}
// Copy MSR device descriptor to the heap if it exists.
if (DeviceDescript[Die].MSRDevice[i].RegisterListID != 0xFFFFFFFF) {
- LibAmdMemCopy ((VOID *) BufferOffset, &(DeviceDescript[Die].MSRDevice[i]), sizeof (MSR_DEVICE_DESCRIPTOR), StdHeader);
+ LibAmdMemCopy ((VOID *)(UINT32) BufferOffset, &(DeviceDescript[Die].MSRDevice[i]), sizeof (MSR_DEVICE_DESCRIPTOR), StdHeader);
(*DeviceBlockHdrPtr)->NumDevices ++;
BufferOffset += sizeof (MSR_DEVICE_DESCRIPTOR);
}
// Copy conditional MSR device descriptor to the heap if it exists.
if (DeviceDescript[Die].CMSRDevice[i].RegisterListID != 0xFFFFFFFF) {
- LibAmdMemCopy ((VOID *) BufferOffset, &(DeviceDescript[Die].PCIDevice[i]), sizeof (CONDITIONAL_MSR_DEVICE_DESCRIPTOR), StdHeader);
+ LibAmdMemCopy ((VOID *)(UINT32) BufferOffset, &(DeviceDescript[Die].PCIDevice[i]), sizeof (CONDITIONAL_MSR_DEVICE_DESCRIPTOR), StdHeader);
(*DeviceBlockHdrPtr)->NumDevices ++;
BufferOffset += sizeof (CONDITIONAL_MSR_DEVICE_DESCRIPTOR);
}