blob: 94146875524045f692da5250d3c33a5cd5a1ac07 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
|
#ifndef CPU_X86_MTRR_H
#define CPU_X86_MTRR_H
/* These are the region types */
#define MTRR_TYPE_UNCACHEABLE 0
#define MTRR_TYPE_WRCOMB 1
/*#define MTRR_TYPE_ 2*/
/*#define MTRR_TYPE_ 3*/
#define MTRR_TYPE_WRTHROUGH 4
#define MTRR_TYPE_WRPROT 5
#define MTRR_TYPE_WRBACK 6
#define MTRR_NUM_TYPES 7
#define MTRRcap_MSR 0x0fe
#define MTRRdefType_MSR 0x2ff
#define MTRRdefTypeEn (1 << 11)
#define MTRRdefTypeFixEn (1 << 10)
#define SMRRphysBase_MSR 0x1f2
#define SMRRphysMask_MSR 0x1f3
#define MTRRphysBase_MSR(reg) (0x200 + 2 * (reg))
#define MTRRphysMask_MSR(reg) (0x200 + 2 * (reg) + 1)
#define MTRRphysMaskValid (1 << 11)
#define NUM_FIXED_RANGES 88
#define RANGES_PER_FIXED_MTRR 8
#define MTRRfix64K_00000_MSR 0x250
#define MTRRfix16K_80000_MSR 0x258
#define MTRRfix16K_A0000_MSR 0x259
#define MTRRfix4K_C0000_MSR 0x268
#define MTRRfix4K_C8000_MSR 0x269
#define MTRRfix4K_D0000_MSR 0x26a
#define MTRRfix4K_D8000_MSR 0x26b
#define MTRRfix4K_E0000_MSR 0x26c
#define MTRRfix4K_E8000_MSR 0x26d
#define MTRRfix4K_F0000_MSR 0x26e
#define MTRRfix4K_F8000_MSR 0x26f
#if !defined (__ASSEMBLER__) && !defined(__PRE_RAM__)
/*
* The MTRR code has some side effects that the callers should be aware for.
* 1. The call sequence matters. x86_setup_mtrrs() calls
* x86_setup_fixed_mtrrs_no_enable() then enable_fixed_mtrrs() (equivalent
* of x86_setup_fixed_mtrrs()) then x86_setup_var_mtrrs(). If the callers
* want to call the components of x86_setup_mtrrs() because of other
* requirements the ordering should still preserved.
* 2. enable_fixed_mtrr() will enable both variable and fixed MTRRs because
* of the nature of the global MTRR enable flag. Therefore, all direct
* or indirect callers of enable_fixed_mtrr() should ensure that the
* variable MTRR MSRs do not contain bad ranges.
* 3. If CONFIG_CACHE_ROM is selected an MTRR is allocated for enabling
* the caching of the ROM. However, it is set to uncacheable (UC). It
* is the responsibility of the caller to enable it by calling
* x86_mtrr_enable_rom_caching().
*/
void x86_setup_mtrrs(void);
/*
* x86_setup_var_mtrrs() parameters:
* address_bits - number of physical address bits supported by cpu
* above4gb - 2 means dynamically detect number of variable MTRRs available.
* non-zero means handle memory ranges above 4GiB.
* 0 means ignore memory ranges above 4GiB
*/
void x86_setup_var_mtrrs(unsigned int address_bits, unsigned int above4gb);
void enable_fixed_mtrr(void);
void x86_setup_fixed_mtrrs(void);
/* Set up fixed MTRRs but do not enable them. */
void x86_setup_fixed_mtrrs_no_enable(void);
int x86_mtrr_check(void);
/* ROM caching can be used after variable MTRRs are set up. Beware that
* enabling CONFIG_CACHE_ROM will eat through quite a few MTRRs based on
* one's IO hole size and WRCOMB resources. Be sure to check the console
* log when enabling CONFIG_CACHE_ROM or adding WRCOMB resources. Beware that
* on CPUs with core-scoped MTRR registers such as hyperthreaded CPUs the
* rom caching will be disabled if all threads run the MTRR code. Therefore,
* one needs to call x86_mtrr_enable_rom_caching() after all threads of the
* same core have run the MTRR code. */
#if CONFIG_CACHE_ROM
void x86_mtrr_enable_rom_caching(void);
void x86_mtrr_disable_rom_caching(void);
/* Return the variable range MTRR index of the ROM cache. */
long x86_mtrr_rom_cache_var_index(void);
#else
static inline void x86_mtrr_enable_rom_caching(void) {}
static inline void x86_mtrr_disable_rom_caching(void) {}
static inline long x86_mtrr_rom_cache_var_index(void) { return -1; }
#endif /* CONFIG_CACHE_ROM */
#endif
#if !defined(__ASSEMBLER__) && defined(__PRE_RAM__) && !defined(__ROMCC__)
void set_var_mtrr(unsigned reg, unsigned base, unsigned size, unsigned type);
#endif
/* Align up to next power of 2, suitable for ROMCC and assembler too.
* Range of result 256kB to 128MB is good enough here.
*/
#define _POW2_MASK(x) ((x>>1)|(x>>2)|(x>>3)|(x>>4)|(x>>5)| \
(x>>6)|(x>>7)|(x>>8)|((1<<18)-1))
#define _ALIGN_UP_POW2(x) ((x + _POW2_MASK(x)) & ~_POW2_MASK(x))
#if !defined(CONFIG_RAMTOP)
# error "CONFIG_RAMTOP not defined"
#endif
#if ((CONFIG_XIP_ROM_SIZE & (CONFIG_XIP_ROM_SIZE -1)) != 0)
# error "CONFIG_XIP_ROM_SIZE is not a power of 2"
#endif
/* Select CACHE_ROM_SIZE to use with MTRR setup. For most cases this
* resolves to a suitable CONFIG_ROM_SIZE but some odd cases need to
* use CONFIG_CACHE_ROM_SIZE_OVERRIDE in the mainboard Kconfig.
*/
#if (CONFIG_CACHE_ROM_SIZE_OVERRIDE != 0)
# define CACHE_ROM_SIZE CONFIG_CACHE_ROM_SIZE_OVERRIDE
#else
# if ((CONFIG_ROM_SIZE & (CONFIG_ROM_SIZE-1)) == 0)
# define CACHE_ROM_SIZE CONFIG_ROM_SIZE
# else
# define CACHE_ROM_SIZE _ALIGN_UP_POW2(CONFIG_ROM_SIZE)
# if (CACHE_ROM_SIZE < CONFIG_ROM_SIZE) || (CACHE_ROM_SIZE >= (2 * CONFIG_ROM_SIZE))
# error "CACHE_ROM_SIZE is not optimal."
# endif
# endif
#endif
#if ((CACHE_ROM_SIZE & (CACHE_ROM_SIZE-1)) != 0)
# error "CACHE_ROM_SIZE is not a power of 2."
#endif
#define CACHE_ROM_BASE (((1<<20) - (CACHE_ROM_SIZE>>12))<<12)
#if (CONFIG_RAMTOP & (CONFIG_RAMTOP - 1)) != 0
# error "CONFIG_RAMTOP must be a power of 2"
#endif
#endif /* CPU_X86_MTRR_H */
|