1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
|
/*
* Copyright (c) 2013 Andreas Sandberg
* All rights reserved
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met: redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer;
* redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution;
* neither the name of the copyright holders nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* Authors: Andreas Sandberg
*/
#ifndef __CPU_KVM_X86_CPU_HH__
#define __CPU_KVM_X86_CPU_HH__
#include "cpu/kvm/base.hh"
#include "cpu/kvm/vm.hh"
#include "params/X86KvmCPU.hh"
/**
* x86 implementation of a KVM-based hardware virtualized CPU.
*/
class X86KvmCPU : public BaseKvmCPU
{
public:
X86KvmCPU(X86KvmCPUParams *params);
virtual ~X86KvmCPU();
void startup();
/** @{ */
void dump();
void dumpFpuRegs() const;
void dumpIntRegs() const;
void dumpSpecRegs() const;
void dumpDebugRegs() const;
void dumpXCRs() const;
void dumpXSave() const;
void dumpVCpuEvents() const;
void dumpMSRs() const;
/** @} */
protected:
typedef std::vector<struct kvm_msr_entry> KvmMSRVector;
Tick kvmRun(Tick ticks);
/**
* Run the virtual CPU until draining completes.
*
* In addition to the base functionality provided by
* BaseKvmCPU::kvmRunDrain(), this method handles x86-specific
* cases where there are pending interrupt events in the virtual
* CPU. These are handled by requesting an interrupt window if
* interrupts are pending (causing the vCPU to execute until
* interrupts can be delivered again).
*
* @see BaseKvmCPU::kvmRunDrain()
* @see archIsDrained()
*
* @return Number of ticks executed
*/
Tick kvmRunDrain();
/** Wrapper that synchronizes state in kvm_run */
Tick kvmRunWrapper(Tick ticks);
uint64_t getHostCycles() const;
/**
* Methods to access CPUID information using the extended
* API. Only available if Kvm::capExtendedCPUID() is true.
*
* @{
*/
void setCPUID(const struct kvm_cpuid2 &cpuid);
void setCPUID(const Kvm::CPUIDVector &cpuid);
/** @} */
/**
* Methods to access MSRs in the guest.
*
* @{
*/
void setMSRs(const struct kvm_msrs &msrs);
void setMSRs(const KvmMSRVector &msrs);
void getMSRs(struct kvm_msrs &msrs) const;
void setMSR(uint32_t index, uint64_t value);
uint64_t getMSR(uint32_t index) const;
/** @} */
/**
* Get a list of MSRs supported by both gem5 and KVM.
*
* @note This method uses an internal cache and only generates the
* MSR list once.
*
* @return reference to a list of msr indices
*/
const Kvm::MSRIndexVector &getMsrIntersection() const;
/**
* Wrappers around KVM's state transfer methods.
*
* @{
*/
void getDebugRegisters(struct kvm_debugregs ®s) const;
void setDebugRegisters(const struct kvm_debugregs ®s);
void getXCRs(struct kvm_xcrs ®s) const;
void setXCRs(const struct kvm_xcrs ®s);
void getXSave(struct kvm_xsave &xsave) const;
void setXSave(const struct kvm_xsave &xsave);
void getVCpuEvents(struct kvm_vcpu_events &events) const;
void setVCpuEvents(const struct kvm_vcpu_events &events);
/** @} */
void updateKvmState();
void updateThreadContext();
/**
* Inject pending interrupts from gem5 into the virtual CPU.
*/
void deliverInterrupts();
/**
* Handle x86 legacy IO (in/out)
*/
Tick handleKvmExitIO();
Tick handleKvmExitIRQWindowOpen();
/**
* Check if there are pending events in the vCPU that prevents it
* from being drained.
*
* There are cases after interrupt injection where the interrupt
* is still pending in the guest. This method detects such cases
* and requests additional draining.
*
* @return False if there are pending events in the guest, True
* otherwise.
*/
bool archIsDrained() const;
private:
/**
* Support routines to update the state of the KVM CPU from gem5's
* state representation.
*
* @{
*/
/** Update integer registers */
void updateKvmStateRegs();
/** Update control registers (CRx, segments, etc.) */
void updateKvmStateSRegs();
/**
* Update FPU and SIMD registers
*
* This method uses the appropriate (depending on availability and
* user configuration) kernel API by calling
* updateKvmStateFPULegacy() or updateKvmStateFPUXSave().
*
* @see updateKvmStateFPULegacy()
* @see updateKvmStateFPUXSave()
*/
void updateKvmStateFPU();
/**
* Update FPU and SIMD registers using the legacy API
*
* @note This method should normally only be called by
* updateKvmStateFPU() which automatically chooses between
* available APIs.
*/
void updateKvmStateFPULegacy();
/**
* Update FPU and SIMD registers using the XSave API
*
* @note This method should normally only be called by
* updateKvmStateFPU() which automatically chooses between
* available APIs.
*/
void updateKvmStateFPUXSave();
/** Update MSR registers */
void updateKvmStateMSRs();
/** @} */
/**
* Support routines to update the state of gem5's thread context from
* KVM's state representation.
*
* @{
*/
/** Update integer registers */
void updateThreadContextRegs();
/** Update control registers (CRx, segments, etc.) */
void updateThreadContextSRegs();
/** Update FPU and SIMD registers using the legacy API */
void updateThreadContextFPU();
/** Update FPU and SIMD registers using the XSave API */
void updateThreadContextXSave();
/** Update MSR registers */
void updateThreadContextMSRs();
/** @} */
/** Transfer gem5's CPUID values into the virtual CPU. */
void updateCPUID();
/**
* Handle a 32-bit IO access that should be mapped to a MiscReg.
*
* @note This method can only be called on when handling IO after
* a KVM_EXIT_IO.
*
* @param miscreg Register to map the current IO access to.
*/
void handleIOMiscReg32(int miscreg);
/** Reusable IO request */
Request io_req;
/** Cached intersection of supported MSRs */
mutable Kvm::MSRIndexVector cachedMsrIntersection;
/** @{ */
/** Kvm::capDebugRegs() available? */
bool haveDebugRegs;
/** Kvm::capXSave() available? */
bool haveXSave;
/**
* Should the XSave interface be used to sync the FPU and SIMD
* registers?
*/
bool useXSave;
/** Kvm::capXCRs() available? */
bool haveXCRs;
/** @} */
};
#endif
|