summaryrefslogtreecommitdiff
path: root/src/include/cpu/x86/msr.h
blob: c761bc04b622c17a2567c772c5a620f08910af9d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
#ifndef CPU_X86_MSR_H
#define CPU_X86_MSR_H

/* Intel SDM: Table 2-1
 * IA-32 architectural MSR: Extended Feature Enable Register
 *
 * AMD64 Programmers Manual vol2 Revision 3.30 and/or the device's BKDG
 */

#define IA32_EFER	0xC0000080
#define  EFER_NXE	(1 << 11)
#define  EFER_LMA	(1 << 10)
#define  EFER_LME	(1 << 8)
#define  EFER_SCE	(1 << 0)

/* Page attribute type MSR */
#define TSC_MSR				0x10
#define IA32_PLATFORM_ID		0x17
#define IA32_APIC_BASE_MSR_INDEX	0x1B
#define IA32_FEATURE_CONTROL		0x3a
#define  FEATURE_CONTROL_LOCK_BIT	(1 << 0)
#define  FEATURE_ENABLE_VMX		(1 << 2)
#define  SMRR_ENABLE			(1 << 3)
#define  CPUID_VMX			(1 << 5)
#define  CPUID_SMX			(1 << 6)
#define  CPUID_DCA			(1 << 18)
#define  CPUID_AES			(1 << 25)
#define  SGX_GLOBAL_ENABLE		(1 << 18)
#define  PLATFORM_INFO_SET_TDP		(1 << 29)
#define IA32_BIOS_UPDT_TRIG		0x79
#define IA32_BIOS_SIGN_ID		0x8b
#define IA32_MPERF			0xe7
#define IA32_APERF			0xe8
/* STM */
#define IA32_SMM_MONITOR_CTL_MSR	0x9B
#define SMBASE_RO_MSR			0x98
#define  IA32_SMM_MONITOR_VALID		(1 << 0)
#define IA32_MCG_CAP			0x179
#define  MCG_CTL_P			(1 << 3)
#define  MCA_BANKS_MASK			0xff
#define IA32_PERF_STATUS		0x198
#define IA32_PERF_CTL			0x199
#define IA32_THERM_INTERRUPT		0x19b
#define IA32_MISC_ENABLE		0x1a0
#define  FAST_STRINGS_ENABLE_BIT	(1 << 0)
#define  SPEED_STEP_ENABLE_BIT		(1 << 16)
#define IA32_ENERGY_PERF_BIAS		0x1b0
#define  ENERGY_POLICY_PERFORMANCE	0
#define  ENERGY_POLICY_NORMAL		6
#define  ENERGY_POLICY_POWERSAVE	15
#define IA32_PACKAGE_THERM_INTERRUPT	0x1b2
#define IA32_PLATFORM_DCA_CAP		0x1f8
#define SMRR_PHYSBASE_MSR		0x1F2
#define SMRR_PHYSMASK_MSR		0x1F3
#define IA32_PLATFORM_DCA_CAP		0x1f8
#define IA32_PAT			0x277
#define IA32_MC0_CTL			0x400
#define IA32_MC0_STATUS			0x401
#define  MCA_STATUS_HI_VAL		(1UL << (63 - 32))
#define  MCA_STATUS_HI_OVERFLOW		(1UL << (62 - 32))
#define  MCA_STATUS_HI_UC		(1UL << (61 - 32))
#define  MCA_STATUS_HI_EN		(1UL << (60 - 32))
#define  MCA_STATUS_HI_MISCV		(1UL << (59 - 32))
#define  MCA_STATUS_HI_ADDRV		(1UL << (58 - 32))
#define  MCA_STATUS_HI_PCC		(1UL << (57 - 32))
#define  MCA_STATUS_HI_COREID_VAL	(1UL << (56 - 32))
#define  MCA_STATUS_HI_CECC		(1UL << (46 - 32))
#define  MCA_STATUS_HI_UECC		(1UL << (45 - 32))
#define  MCA_STATUS_HI_DEFERRED		(1UL << (44 - 32))
#define  MCA_STATUS_HI_POISON		(1UL << (43 - 32))
#define  MCA_STATUS_HI_SUBLINK		(1UL << (41 - 32))
#define  MCA_STATUS_HI_ERRCOREID_MASK	(0xf << 0)
#define  MCA_STATUS_LO_ERRCODE_EXT_SH	16
#define  MCA_STATUS_LO_ERRCODE_EXT_MASK	(0x3f << MCA_STATUS_LO_ERRCODE_EXT_SH)
#define  MCA_STATUS_LO_ERRCODE_MASK	(0xffff << 0)
#define IA32_VMX_BASIC_MSR              0x480
#define  VMX_BASIC_HI_DUAL_MONITOR      (1UL << (49 - 32))
#define IA32_VMX_MISC_MSR               0x485
#define MC0_ADDR			0x402
#define MC0_MISC			0x403
#define MC0_CTL_MASK			0xC0010044

#define IA32_PM_ENABLE			0x770
#define IA32_HWP_CAPABILITIES		0x771
#define IA32_HWP_REQUEST		0x774
#define IA32_HWP_STATUS			0x777
#define IA32_PQR_ASSOC			0xc8f
/* MSR bits 33:32 encode slot number 0-3 */
#define   IA32_PQR_ASSOC_MASK	(1 << 0 | 1 << 1)
#define IA32_L3_MASK_1			0xc91
#define IA32_L3_MASK_2			0xc92

#ifndef __ASSEMBLER__
#include <types.h>

typedef struct msr_struct {
	unsigned int lo;
	unsigned int hi;
} msr_t;

typedef struct msrinit_struct {
	unsigned int index;
	msr_t msr;
} msrinit_t;

#if CONFIG(SOC_SETS_MSRS)
msr_t soc_msr_read(unsigned int index);
void soc_msr_write(unsigned int index, msr_t msr);

/* Handle MSR references in the other source code */
static __always_inline msr_t rdmsr(unsigned int index)
{
	return soc_msr_read(index);
}

static __always_inline void wrmsr(unsigned int index, msr_t msr)
{
	soc_msr_write(index, msr);
}
#else /* CONFIG_SOC_SETS_MSRS */

/* The following functions require the __always_inline due to AMD
 * function STOP_CAR_AND_CPU that disables cache as
 * RAM, the cache as RAM stack can no longer be used. Called
 * functions must be inlined to avoid stack usage. Also, the
 * compiler must keep local variables register based and not
 * allocated them from the stack. With gcc 4.5.0, some functions
 * declared as inline are not being inlined. This patch forces
 * these functions to always be inlined by adding the qualifier
 * __always_inline to their declaration.
 */
static __always_inline msr_t rdmsr(unsigned int index)
{
	msr_t result;
	__asm__ __volatile__ (
		"rdmsr"
		: "=a" (result.lo), "=d" (result.hi)
		: "c" (index)
		);
	return result;
}

static __always_inline void wrmsr(unsigned int index, msr_t msr)
{
	__asm__ __volatile__ (
		"wrmsr"
		: /* No outputs */
		: "c" (index), "a" (msr.lo), "d" (msr.hi)
		);
}

#endif /* CONFIG_SOC_SETS_MSRS */

/* Helpers for interpreting MC[i]_STATUS */

static inline int mca_valid(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_VAL);
}

static inline int mca_over(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_OVERFLOW);
}

static inline int mca_uc(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_UC);
}

static inline int mca_en(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_EN);
}

static inline int mca_miscv(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_MISCV);
}

static inline int mca_addrv(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_ADDRV);
}

static inline int mca_pcc(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_PCC);
}

static inline int mca_idv(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_COREID_VAL);
}

static inline int mca_cecc(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_CECC);
}

static inline int mca_uecc(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_UECC);
}

static inline int mca_defd(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_DEFERRED);
}

static inline int mca_poison(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_POISON);
}

static inline int mca_sublink(msr_t msr)
{
	return !!(msr.hi & MCA_STATUS_HI_SUBLINK);
}

static inline uint16_t mca_err_code(msr_t reg)
{
	return reg.lo & MCA_STATUS_LO_ERRCODE_MASK;
}

static inline uint16_t mca_err_extcode(msr_t reg)
{
	return reg.lo & MCA_STATUS_LO_ERRCODE_EXT_MASK;
}

/* Machine Check errors may be categorized by type, as determined by the
 * Error Code field of MC[i]_STATUS.  The definitions below can typically
 * be found by searching the BKDG for a table called "Error Code Types".
 */
/* TLB Errors 0000 0000 0001 TTLL */
#define MCA_ERRCODE_TLB_DETECT		0xfff0
#define MCA_ERRCODE_TLB_TT_SH		2 /* Transaction Type */
#define MCA_ERRCODE_TLB_TT_MASK		(0x3 << MCA_ERRCODE_TLB_TT_SH)
#define MCA_ERRCODE_TLB_LL_SH		0 /* Cache Level */
#define MCA_ERRCODE_TLB_LL_MASK		(0x3 << MCA_ERRCODE_TLB_LL_SH)

/* Memory Errors 0000 0001 RRRR TTLL */
#define MCA_ERRCODE_MEM_DETECT		0xff00
#define MCA_ERRCODE_MEM_RRRR_SH		4 /* Memory Transaction Type */
#define MCA_ERRCODE_MEM_RRRR_MASK	(0xf << MCA_ERRCODE_MEM_RRRR_MASK)
#define MCA_ERRCODE_MEM_TT_SH		2 /* Transaction Type */
#define MCA_ERRCODE_MEM_TT_MASK		(0x3 << MCA_ERRCODE_MEM_TT_SH)
#define MCA_ERRCODE_MEM_LL_SH		0 /* Cache Level */
#define MCA_ERRCODE_MEM_LL_MASK		(0x3 << MCA_ERRCODE_MEM_LL_SH)

/* Bus Errors 0000 1PPT RRRR IILL */
#define MCA_ERRCODE_BUS_DETECT		0xf800
#define MCA_ERRCODE_BUS_PP_SH		9 /* Participation Processor */
#define MCA_ERRCODE_BUS_PP_MASK		(0x3 << MCA_ERRCODE_BUS_PP_SH)
#define MCA_ERRCODE_BUS_T_SH		8 /* Timeout */
#define MCA_ERRCODE_BUS_T_MASK		(0x1 << MCA_ERRCODE_BUS_T_SH)
#define MCA_ERRCODE_BUS_RRRR_SH		4 /* Memory Transaction Type */
#define MCA_ERRCODE_BUS_RRRR_MASK	(0xf << MCA_ERRCODE_BUS_RRRR_SH)
#define MCA_ERRCODE_BUS_II_SH		2 /* Memory or IO */
#define MCA_ERRCODE_BUS_II_MASK		(0x3 << MCA_ERRCODE_BUS_II_SH)
#define MCA_ERRCODE_BUS_LL_SH		0 /* Cache Level */
#define MCA_ERRCODE_BUS_LL_MASK		(0x3 << MCA_ERRCODE_BUS_LL_SH)

/* Int. Unclassified Errors 0000 01UU 0000 0000 */
#define MCA_ERRCODE_INT_DETECT		0xfc00
#define MCA_ERRCODE_INT_UU_SH		8 /* Internal Error Type */
#define MCA_ERRCODE_INT_UU_MASK		(0x3 << MCA_ERRCODE_INT_UU_SH)

#define MCA_BANK_LS 0 /* Load-store, including DC */
#define MCA_BANK_IF 1 /* Instruction Fetch, including IC */
#define MCA_BANK_CU 2 /* Combined Unit, including L2 */
/* bank 3 reserved */
#define MCA_BANK_NB 4 /* Northbridge, including IO link */
#define MCA_BANK_EX 5 /* Execution Unit */
#define MCA_BANK_FP 6 /* Floating Point */

enum mca_err_code_types {
	MCA_ERRTYPE_UNKNOWN,
	MCA_ERRTYPE_TLB,
	MCA_ERRTYPE_MEM,
	MCA_ERRTYPE_BUS,
	MCA_ERRTYPE_INT
};

static inline enum mca_err_code_types mca_err_type(msr_t reg)
{
	uint16_t error = mca_err_code(reg);
	if (error & MCA_ERRCODE_BUS_DETECT) /* this order must be maintained */
		return MCA_ERRTYPE_BUS;
	if (error & MCA_ERRCODE_INT_DETECT)
		return MCA_ERRTYPE_INT;
	if (error & MCA_ERRCODE_MEM_DETECT)
		return MCA_ERRTYPE_MEM;
	if (error & MCA_ERRCODE_TLB_DETECT)
		return MCA_ERRTYPE_TLB;
	return MCA_ERRTYPE_UNKNOWN;
}


/* Helper for setting single MSR bits */
static inline void msr_set_bit(unsigned int reg, unsigned int bit)
{
	msr_t msr = rdmsr(reg);

	if (bit < 32) {
		if (msr.lo & (1 << bit))
			return;
		msr.lo |= 1 << bit;
	} else {
		if (msr.hi & (1 << (bit - 32)))
			return;
		msr.hi |= 1 << (bit - 32);
	}

	wrmsr(reg, msr);
}


#endif /* __ASSEMBLER__ */
#endif /* CPU_X86_MSR_H */