summaryrefslogtreecommitdiff
path: root/src/cpu/x86/mtrr/xip_cache.c
blob: 9968eea78e018331e667c56abd23577ad17489b6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
/*
 * This file is part of the coreboot project.
 *
 * This program is free software; you can redistribute it and/or modify
 * it under the terms of the GNU General Public License as published by
 * the Free Software Foundation; version 2 of the License.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU General Public License for more details.
 */

#include <arch/cpu.h>
#include <program_loading.h>
#include <commonlib/region.h>
#include <console/console.h>
#include <cpu/x86/mtrr.h>

/* For now this is a good lowest common denominator for the total CPU cache.
   TODO: fetch the total amount of cache from CPUID leaf2. */
#define MAX_CPU_CACHE (256 * KiB)

/* This makes the 'worst' case assumption that all cachelines covered by
   the MTRR, no matter the caching type, are filled and not overlapping. */
static uint32_t max_cache_used(void)
{
	msr_t msr = rdmsr(MTRR_CAP_MSR);
	int i, total_mtrrs = msr.lo & MTRR_CAP_VCNT;
	uint32_t total_cache = 0;

	for (i = 0; i < total_mtrrs; i++) {
		msr_t mtrr = rdmsr(MTRR_PHYS_MASK(i));
		if (!(mtrr.lo & MTRR_PHYS_MASK_VALID))
			continue;
		total_cache += ~(mtrr.lo & 0xfffff000) + 1;
	}
	return total_cache;
}

void platform_prog_run(struct prog *prog)
{
	const uint32_t base = region_device_offset(&prog->rdev);
	const uint32_t size = region_device_sz(&prog->rdev);
	const uint32_t end = base + size;
	const uint32_t cache_used = max_cache_used();
	/* This will accumulate MTRR's as XIP stages are run.
	   For now this includes bootblock which sets ups its own
	   caching elsewhere, verstage and romstage */
	int mtrr_num = get_free_var_mtrr();
	uint32_t mtrr_base;
	uint32_t mtrr_size = 4 * KiB;
	struct cpuinfo_x86 cpu_info;

	get_fms(&cpu_info, cpuid_eax(1));
	/*
	 * An unidentified combination of speculative reads and branch
	 * predictions inside WRPROT-cacheable memory can cause invalidation
	 * of cachelines and loss of stack on models based on NetBurst
	 * microarchitecture. Therefore disable WRPROT region entirely for
	 * all family F models.
	 */
	if (cpu_info.x86 == 0xf) {
		printk(BIOS_NOTICE,
		       "PROG_RUN: CPU does not support caching ROM\n"
		       "The next stage will run slowly!\n");
		return;
	}

	if (mtrr_num == -1) {
		printk(BIOS_NOTICE,
		       "PROG_RUN: No MTRR available to cache ROM!\n"
		       "The next stage will run slowly!\n");
		return;
	}

	if (cache_used + mtrr_size > MAX_CPU_CACHE) {
		printk(BIOS_NOTICE,
		       "PROG_RUN: No more cache available for the next stage\n"
		       "The next stage will run slowly!\n");
		return;
	}

	while (1) {
		if (ALIGN_DOWN(base, mtrr_size) + mtrr_size >= end)
			break;
		if (cache_used + mtrr_size * 2 > MAX_CPU_CACHE)
			break;
		mtrr_size *= 2;
	}

	mtrr_base = ALIGN_DOWN(base, mtrr_size);
	if (mtrr_base + mtrr_size < end) {
		printk(BIOS_NOTICE, "PROG_RUN: Limiting XIP cache to %uKiB!\n",
		       mtrr_size / KiB);
		/* Check if we can cover a bigger range by aligning up. */
		const uint32_t alt_base = ALIGN_UP(base, mtrr_size);
		const uint32_t lower_coverage = mtrr_base + mtrr_size - base;
		const uint32_t upper_coverage = MIN(alt_base + mtrr_size, end) - alt_base;
		if (upper_coverage > lower_coverage)
			mtrr_base = alt_base;
	}

	printk(BIOS_DEBUG,
	       "PROG_RUN: Setting MTRR to cache XIP stage. base: 0x%08x, size: 0x%08x\n",
	       mtrr_base, mtrr_size);

	set_var_mtrr(mtrr_num, mtrr_base, mtrr_size, MTRR_TYPE_WRPROT);
}