summaryrefslogtreecommitdiff
path: root/src/cpu/x86/mtrr/earlymtrr.c
blob: 5c83554cb2c3b4dfbbc00f2b7a74641c1159350c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
#ifndef EARLYMTRR_C
#define EARLYMTRR_C
#include <cpu/x86/cache.h>
#include <cpu/x86/mtrr.h>
#include <cpu/amd/mtrr.h>
#include <cpu/x86/msr.h>

#if 0
static void disable_var_mtrr(unsigned reg)
{
	/* The invalid bit is kept in the mask so we simply
	 * clear the relevent mask register to disable a
	 * range.
	 */
	msr_t zero;
	zero.lo = zero.hi = 0;
	wrmsr(MTRRphysMask_MSR(reg), zero);
}
#endif

static void set_var_mtrr(
	unsigned reg, unsigned base, unsigned size, unsigned type)

{
	/* Bit Bit 32-35 of MTRRphysMask should be set to 1 */
	/* FIXME: It only support 4G less range */
	msr_t basem, maskm;
	basem.lo = base | type;
	basem.hi = 0;
	wrmsr(MTRRphysBase_MSR(reg), basem);
	maskm.lo = ~(size - 1) | 0x800;
	maskm.hi = (1<<(CONFIG_CPU_ADDR_BITS-32))-1;
	wrmsr(MTRRphysMask_MSR(reg), maskm);
}

#if 0
static void set_var_mtrr_x(
        unsigned reg, uint32_t base_lo, uint32_t base_hi, uint32_t size_lo, uint32_t size_hi, unsigned type)

{
        /* Bit Bit 32-35 of MTRRphysMask should be set to 1 */
        msr_t basem, maskm;
        basem.lo = (base_lo & 0xfffff000) | type;
        basem.hi = base_hi & ((1<<(CONFIG_CPU_ADDR_BITS-32))-1);
        wrmsr(MTRRphysBase_MSR(reg), basem);
       	maskm.hi = (1<<(CONFIG_CPU_ADDR_BITS-32))-1;
	if(size_lo) {
	        maskm.lo = ~(size_lo - 1) | 0x800;
	} else {
		maskm.lo = 0x800;
		maskm.hi &= ~(size_hi - 1);
	}
        wrmsr(MTRRphysMask_MSR(reg), maskm);
}
#endif

static inline void cache_lbmem(int type)
{
	/* Enable caching for 0 - 1MB using variable mtrr */
	disable_cache();
	set_var_mtrr(0, 0x00000000, CONFIG_RAMTOP, type);
	enable_cache();
}

#if !defined(CONFIG_CACHE_AS_RAM) || (CONFIG_CACHE_AS_RAM == 0)
/* the fixed and variable MTTRs are power-up with random values,
 * clear them to MTRR_TYPE_UNCACHEABLE for safty.
 */
static void do_early_mtrr_init(const unsigned long *mtrr_msrs)
{
	/* Precondition:
	 *   The cache is not enabled in cr0 nor in MTRRdefType_MSR
	 *   entry32.inc ensures the cache is not enabled in cr0
	 */
	msr_t msr;
	const unsigned long *msr_addr;

	/* Inialize all of the relevant msrs to 0 */
	msr.lo = 0;
	msr.hi = 0;
	unsigned long msr_nr;
	for(msr_addr = mtrr_msrs; (msr_nr = *msr_addr); msr_addr++) {
		wrmsr(msr_nr, msr);
	}

#if defined(CONFIG_XIP_ROM_SIZE)
	/* enable write through caching so we can do execute in place
	 * on the flash rom.
	 */
	set_var_mtrr(1, REAL_XIP_ROM_BASE, CONFIG_XIP_ROM_SIZE, MTRR_TYPE_WRBACK);
#endif

	/* Set the default memory type and enable fixed and variable MTRRs
	 */
	/* Enable Variable MTRRs */
	msr.hi = 0x00000000;
	msr.lo = 0x00000800;
	wrmsr(MTRRdefType_MSR, msr);

}

static inline void early_mtrr_init(void)
{
	static const unsigned long mtrr_msrs[] = {
		/* fixed mtrr */
		0x250, 0x258, 0x259,
		0x268, 0x269, 0x26A,
		0x26B, 0x26C, 0x26D,
		0x26E, 0x26F,
		/* var mtrr */
		0x200, 0x201, 0x202, 0x203,
		0x204, 0x205, 0x206, 0x207,
		0x208, 0x209, 0x20A, 0x20B,
		0x20C, 0x20D, 0x20E, 0x20F,
		/* NULL end of table */
		0
	};
	disable_cache();
	do_early_mtrr_init(mtrr_msrs);
	enable_cache();
}
#endif

static inline int early_mtrr_init_detected(void)
{
	msr_t msr;
	/* See if MTRR's are enabled.
	 * a #RESET disables them while an #INIT
	 * preserves their state.  This works
	 * on both Intel and AMD cpus, at least
	 * according to the documentation.
	 */
	msr = rdmsr(MTRRdefType_MSR);
	return msr.lo & 0x00000800;
}

#endif /* EARLYMTRR_C */