diff options
author | Angel Pons <th3fanbus@gmail.com> | 2020-08-28 00:59:14 +0200 |
---|---|---|
committer | Philipp Deppenwiese <zaolin.daisuki@gmail.com> | 2020-10-17 09:32:47 +0000 |
commit | 578a4d2b6a0ac96d70ea3b8490872a21dcf19df2 (patch) | |
tree | b10325e23598ba62bc225614a90cec3a544a8927 /src | |
parent | 038cef9dffdd0df89e50799826e521b1e26b3081 (diff) | |
download | coreboot-578a4d2b6a0ac96d70ea3b8490872a21dcf19df2.tar.xz |
security/intel/txt: Improve MTRR setup for GETSEC[ENTERACCS]
The BIOS ACM will check that enabled variable MTRRs do not cover more
than the ACM's size, rounded up to 4 KiB. If that is not the case,
launching the ACM will result in a lovely TXT reset. How boring.
The new algorithm simply performs a reverse bit scan in a loop, and
allocates one MTRR for each set bit in the rounded-up size to cache.
Before allocating anything, it checks if there are enough variable
MTRRs; if not, it will refuse to cache anything. This will result in
another TXT reset, initiated by the processor, with error type 5:
Load memory type error in Authenticated Code Execution Area.
This can only happen if the ACM has specific caching requirements that
the current code does not know about, or something has been compromised.
Therefore, causing a TXT reset should be a reasonable enough approach.
Also, disable all MTRRs before clearing the variable MTRRs and only
enable them again once they have been set up with the new values.
Tested on Asrock B85M Pro4 with a BIOS ACM whose size is 101504 bytes.
Without this patch, launching the ACM would result in a TXT reset. This
no longer happens when this patch is applied.
Change-Id: I8d411f6450928357544be20250262c2005d1e75d
Signed-off-by: Angel Pons <th3fanbus@gmail.com>
Reviewed-on: https://review.coreboot.org/c/coreboot/+/44880
Reviewed-by: Arthur Heymans <arthur@aheymans.xyz>
Reviewed-by: Christian Walter <christian.walter@9elements.com>
Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src')
-rw-r--r-- | src/security/intel/txt/getsec_enteraccs.S | 103 |
1 files changed, 86 insertions, 17 deletions
diff --git a/src/security/intel/txt/getsec_enteraccs.S b/src/security/intel/txt/getsec_enteraccs.S index 563dc08a15..be038b0607 100644 --- a/src/security/intel/txt/getsec_enteraccs.S +++ b/src/security/intel/txt/getsec_enteraccs.S @@ -108,10 +108,10 @@ cond_push_var_mtrrs: orl $(CR0_CD | CR0_NW), %eax movl %eax, %cr0 - /* Disable fixed MTRRs */ + /* Disable all MTRRs */ movl $(MTRR_DEF_TYPE_MSR), %ecx - rdmsr - andl $(~MTRR_DEF_TYPE_FIX_EN), %eax + xorl %eax, %eax + xorl %edx, %edx wrmsr /* @@ -165,24 +165,93 @@ cond_clear_var_mtrrs: * Chapter A.1.1 * Intel TXT Software Development Guide (Document: 315168-015) */ - movl $(MTRR_PHYS_BASE(0)), %ecx + + /* + * Important note: The MTRRs must cache less than a page (4 KiB) + * of unused memory after the BIOS ACM. Failure to do so will + * result in a TXT reset with Class Code 5, Major Error Code 2. + * + * The caller must have checked that there are enough variable + * MTRRs to cache the ACM size prior to invoking this routine. + */ + + /* Determine size of AC module */ movl 12(%ebp), %eax /* %eax = acmbase */ - orl $(6), %eax /* MTRR_TYPE_WB */ - movl $0, %edx + movl $1, %ebx + movl 16(%ebp), %ebx /* %ebx = acmsize */ + + /* Round up to page size */ + addl $(0xfff), %ebx + andl $(~0xfff), %ebx /* Aligned to a page (4 KiB) */ + + /* + * Use XMM to store local variables. This code will need to be + * used in romstage, and CAR will have been torn down by then. + */ + movd %eax, %xmm0 /* XMM0: Base address of next MTRR */ + movd %ebx, %xmm1 /* XMM1: Remaining size to cache */ + + /* Get the number of variable MTRRs */ + movl $(MTRR_CAP_MSR), %ecx + rdmsr + andl $(0xff), %eax + + /* Initialize ECX */ + movl $(MTRR_PHYS_BASE(0)), %ecx + + jmp cond_allocate_var_mtrrs + +body_allocate_var_mtrrs: + + /* Program MTRR base */ + xorl %edx, %edx + movd %xmm0, %eax + orl $(MTRR_TYPE_WRBACK), %eax wrmsr + incl %ecx /* Move index to MTRR_PHYS_MASK */ - /* Round acmsize to next power of two. Required for MTRR programming. */ - movl $1, %ebx - movl 16(%ebp), %ecx /* %ebx = acmsize */ - dec %ecx - bsr %ecx, %ecx /* find MSB */ - inc %ecx - shl %cl, %ebx - movl $(MTRR_PHYS_MASK(0)), %ecx - xorl %eax, %eax - subl %ebx, %eax /* %eax = 4GIB - log2_ceil(ACM SIZE) */ - orl $((1 << 11)), %eax /* MTRR_PHYS_MASK_VALID */ + /* Temporarily transfer MSR index to EDX so that CL can be used */ + movl %ecx, %edx + + /* Determine next size to cache */ + bsr %ebx, %ecx + movl $(1), %ebx + shl %cl, %ebx /* Can only use CL here */ + + /* Restore ECX */ + movl %edx, %ecx + + /* Update saved base address */ + addl %ebx, %eax + movd %eax, %xmm0 + + /* Update saved remaining size */ + movd %xmm1, %eax + subl %ebx, %eax + movd %eax, %xmm1 + + /* Program MTRR mask */ movl MTRR_HIGH_MASK, %edx + xorl %eax, %eax + subl %ebx, %eax /* %eax = 4GIB - size to cache */ + orl $(MTRR_PHYS_MASK_VALID), %eax + wrmsr + incl %ecx /* Move index to next MTRR_PHYS_BASE */ + +cond_allocate_var_mtrrs: + + /* Check if we still need to cache something */ + movd %xmm1, %ebx + andl %ebx, %ebx + + jnz body_allocate_var_mtrrs + + /* + * Now that the variable MTRRs have been set up, enable them. + */ + movl $(MTRR_DEF_TYPE_MSR), %ecx + rdmsr + orl $(MTRR_DEF_TYPE_EN), %eax wrmsr /* Enable cache - GPF# if not done */ |