Updated to 2.6.32.27
/arch/x86/kvm/vmx.c
blob:6a28d5d95e8fe88f7a5ee7f90ac0b2ae950ec55a -> blob:d9c4fb6f0ff37545f95c29c520c3c9dde17c06c5
--- arch/x86/kvm/vmx.c
+++ arch/x86/kvm/vmx.c
@@ -130,6 +130,7 @@ static u64 construct_eptp(unsigned long
static DEFINE_PER_CPU(struct vmcs *, vmxarea);
static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
+static DEFINE_PER_CPU(struct desc_ptr, host_gdt);
static unsigned long *vmx_io_bitmap_a;
static unsigned long *vmx_io_bitmap_b;
@@ -628,7 +629,7 @@ static void vmx_save_host_state(struct k
*/
vmx->host_state.ldt_sel = kvm_read_ldt();
vmx->host_state.gs_ldt_reload_needed = vmx->host_state.ldt_sel;
- vmx->host_state.fs_sel = kvm_read_fs();
+ savesegment(fs, vmx->host_state.fs_sel);
if (!(vmx->host_state.fs_sel & 7)) {
vmcs_write16(HOST_FS_SELECTOR, vmx->host_state.fs_sel);
vmx->host_state.fs_reload_needed = 0;
@@ -636,7 +637,7 @@ static void vmx_save_host_state(struct k
vmcs_write16(HOST_FS_SELECTOR, 0);
vmx->host_state.fs_reload_needed = 1;
}
- vmx->host_state.gs_sel = kvm_read_gs();
+ savesegment(gs, vmx->host_state.gs_sel);
if (!(vmx->host_state.gs_sel & 7))
vmcs_write16(HOST_GS_SELECTOR, vmx->host_state.gs_sel);
else {
@@ -653,10 +654,7 @@ static void vmx_save_host_state(struct k
#endif
#ifdef CONFIG_X86_64
- if (is_long_mode(&vmx->vcpu))
- save_msrs(vmx->host_msrs +
- vmx->msr_offset_kernel_gs_base, 1);
-
+ save_msrs(vmx->host_msrs + vmx->msr_offset_kernel_gs_base, 1);
#endif
load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
load_transition_efer(vmx);
@@ -664,32 +662,36 @@ static void vmx_save_host_state(struct k
static void __vmx_load_host_state(struct vcpu_vmx *vmx)
{
- unsigned long flags;
-
if (!vmx->host_state.loaded)
return;
++vmx->vcpu.stat.host_state_reload;
vmx->host_state.loaded = 0;
if (vmx->host_state.fs_reload_needed)
- kvm_load_fs(vmx->host_state.fs_sel);
+ loadsegment(fs, vmx->host_state.fs_sel);
+#ifdef CONFIG_X86_64
+ if (is_long_mode(&vmx->vcpu))
+ save_msrs(vmx->guest_msrs + vmx->msr_offset_kernel_gs_base, 1);
+#endif
if (vmx->host_state.gs_ldt_reload_needed) {
kvm_load_ldt(vmx->host_state.ldt_sel);
- /*
- * If we have to reload gs, we must take care to
- * preserve our gs base.
- */
- local_irq_save(flags);
- kvm_load_gs(vmx->host_state.gs_sel);
#ifdef CONFIG_X86_64
- wrmsrl(MSR_GS_BASE, vmcs_readl(HOST_GS_BASE));
+ load_gs_index(vmx->host_state.gs_sel);
+#else
+ loadsegment(gs, vmx->host_state.gs_sel);
#endif
- local_irq_restore(flags);
}
reload_tss();
+#ifdef CONFIG_X86_64
+ save_msrs(vmx->guest_msrs, vmx->msr_offset_kernel_gs_base);
+ save_msrs(vmx->guest_msrs + vmx->msr_offset_kernel_gs_base + 1,
+ vmx->save_nmsrs - vmx->msr_offset_kernel_gs_base - 1);
+#else
save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
+#endif
load_msrs(vmx->host_msrs, vmx->save_nmsrs);
reload_host_efer(vmx);
+ load_gdt(&__get_cpu_var(host_gdt));
}
static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -1176,6 +1178,8 @@ static void hardware_enable(void *garbag
asm volatile (ASM_VMX_VMXON_RAX
: : "a"(&phys_addr), "m"(phys_addr)
: "memory", "cc");
+
+ store_gdt(&__get_cpu_var(host_gdt));
}
static void vmclear_local_vcpus(void)
@@ -2338,8 +2342,8 @@ static int vmx_vcpu_setup(struct vcpu_vm
vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
- vmcs_write16(HOST_FS_SELECTOR, kvm_read_fs()); /* 22.2.4 */
- vmcs_write16(HOST_GS_SELECTOR, kvm_read_gs()); /* 22.2.4 */
+ vmcs_write16(HOST_FS_SELECTOR, 0); /* 22.2.4 */
+ vmcs_write16(HOST_GS_SELECTOR, 0); /* 22.2.4 */
vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
#ifdef CONFIG_X86_64
rdmsrl(MSR_FS_BASE, a);