Skip to content

Commit 7501a53

Browse files
committed
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
Pull kvm fixes from Paolo Bonzini: "A small set of x86 fixes. The most serious is an SRCU lockdep fix. A bit late - needed some time to test the SRCU fix, which only came in on Friday" * tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm: KVM: vmx: defer load of APIC access page address during reset KVM: nVMX: Disable preemption while reading from shadow VMCS KVM: x86: Fix far-jump to non-canonical check KVM: emulator: fix execution close to the segment limit KVM: emulator: fix error code for __linearize
2 parents 7e05b80 + a73896c commit 7501a53

File tree

2 files changed

+45
-16
lines changed

2 files changed

+45
-16
lines changed

arch/x86/kvm/emulate.c

Lines changed: 40 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -574,12 +574,14 @@ static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
574574
case 4:
575575
ctxt->_eip = (u32)dst;
576576
break;
577+
#ifdef CONFIG_X86_64
577578
case 8:
578579
if ((cs_l && is_noncanonical_address(dst)) ||
579-
(!cs_l && (dst & ~(u32)-1)))
580+
(!cs_l && (dst >> 32) != 0))
580581
return emulate_gp(ctxt, 0);
581582
ctxt->_eip = dst;
582583
break;
584+
#endif
583585
default:
584586
WARN(1, "unsupported eip assignment size\n");
585587
}
@@ -641,7 +643,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
641643

642644
static int __linearize(struct x86_emulate_ctxt *ctxt,
643645
struct segmented_address addr,
644-
unsigned size, bool write, bool fetch,
646+
unsigned *max_size, unsigned size,
647+
bool write, bool fetch,
645648
ulong *linear)
646649
{
647650
struct desc_struct desc;
@@ -652,10 +655,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
652655
unsigned cpl;
653656

654657
la = seg_base(ctxt, addr.seg) + addr.ea;
658+
*max_size = 0;
655659
switch (ctxt->mode) {
656660
case X86EMUL_MODE_PROT64:
657661
if (((signed long)la << 16) >> 16 != la)
658662
return emulate_gp(ctxt, 0);
663+
664+
*max_size = min_t(u64, ~0u, (1ull << 48) - la);
665+
if (size > *max_size)
666+
goto bad;
659667
break;
660668
default:
661669
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
@@ -673,20 +681,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
673681
if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
674682
(ctxt->d & NoBigReal)) {
675683
/* la is between zero and 0xffff */
676-
if (la > 0xffff || (u32)(la + size - 1) > 0xffff)
684+
if (la > 0xffff)
677685
goto bad;
686+
*max_size = 0x10000 - la;
678687
} else if ((desc.type & 8) || !(desc.type & 4)) {
679688
/* expand-up segment */
680-
if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
689+
if (addr.ea > lim)
681690
goto bad;
691+
*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
682692
} else {
683693
/* expand-down segment */
684-
if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim)
694+
if (addr.ea <= lim)
685695
goto bad;
686696
lim = desc.d ? 0xffffffff : 0xffff;
687-
if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
697+
if (addr.ea > lim)
688698
goto bad;
699+
*max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
689700
}
701+
if (size > *max_size)
702+
goto bad;
690703
cpl = ctxt->ops->cpl(ctxt);
691704
if (!(desc.type & 8)) {
692705
/* data segment */
@@ -711,17 +724,18 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
711724
return X86EMUL_CONTINUE;
712725
bad:
713726
if (addr.seg == VCPU_SREG_SS)
714-
return emulate_ss(ctxt, sel);
727+
return emulate_ss(ctxt, 0);
715728
else
716-
return emulate_gp(ctxt, sel);
729+
return emulate_gp(ctxt, 0);
717730
}
718731

719732
static int linearize(struct x86_emulate_ctxt *ctxt,
720733
struct segmented_address addr,
721734
unsigned size, bool write,
722735
ulong *linear)
723736
{
724-
return __linearize(ctxt, addr, size, write, false, linear);
737+
unsigned max_size;
738+
return __linearize(ctxt, addr, &max_size, size, write, false, linear);
725739
}
726740

727741

@@ -746,17 +760,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
746760
static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
747761
{
748762
int rc;
749-
unsigned size;
763+
unsigned size, max_size;
750764
unsigned long linear;
751765
int cur_size = ctxt->fetch.end - ctxt->fetch.data;
752766
struct segmented_address addr = { .seg = VCPU_SREG_CS,
753767
.ea = ctxt->eip + cur_size };
754768

755-
size = 15UL ^ cur_size;
756-
rc = __linearize(ctxt, addr, size, false, true, &linear);
769+
/*
770+
* We do not know exactly how many bytes will be needed, and
771+
* __linearize is expensive, so fetch as much as possible. We
772+
* just have to avoid going beyond the 15 byte limit, the end
773+
* of the segment, or the end of the page.
774+
*
775+
* __linearize is called with size 0 so that it does not do any
776+
* boundary check itself. Instead, we use max_size to check
777+
* against op_size.
778+
*/
779+
rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
757780
if (unlikely(rc != X86EMUL_CONTINUE))
758781
return rc;
759782

783+
size = min_t(unsigned, 15UL ^ cur_size, max_size);
760784
size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
761785

762786
/*
@@ -766,7 +790,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
766790
* still, we must have hit the 15-byte boundary.
767791
*/
768792
if (unlikely(size < op_size))
769-
return X86EMUL_UNHANDLEABLE;
793+
return emulate_gp(ctxt, 0);
794+
770795
rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
771796
size, &ctxt->exception);
772797
if (unlikely(rc != X86EMUL_CONTINUE))
@@ -2012,7 +2037,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
20122037

20132038
rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
20142039
if (rc != X86EMUL_CONTINUE) {
2015-
WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
2040+
WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
20162041
/* assigning eip failed; restore the old cs */
20172042
ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
20182043
return rc;
@@ -2109,7 +2134,7 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt)
21092134
return rc;
21102135
rc = assign_eip_far(ctxt, eip, new_desc.l);
21112136
if (rc != X86EMUL_CONTINUE) {
2112-
WARN_ON(!ctxt->mode != X86EMUL_MODE_PROT64);
2137+
WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
21132138
ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
21142139
}
21152140
return rc;

arch/x86/kvm/vmx.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4579,7 +4579,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
45794579
vmcs_write32(TPR_THRESHOLD, 0);
45804580
}
45814581

4582-
kvm_vcpu_reload_apic_access_page(vcpu);
4582+
kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
45834583

45844584
if (vmx_vm_has_apicv(vcpu->kvm))
45854585
memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
@@ -6426,6 +6426,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
64266426
const unsigned long *fields = shadow_read_write_fields;
64276427
const int num_fields = max_shadow_read_write_fields;
64286428

6429+
preempt_disable();
6430+
64296431
vmcs_load(shadow_vmcs);
64306432

64316433
for (i = 0; i < num_fields; i++) {
@@ -6449,6 +6451,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
64496451

64506452
vmcs_clear(shadow_vmcs);
64516453
vmcs_load(vmx->loaded_vmcs->vmcs);
6454+
6455+
preempt_enable();
64526456
}
64536457

64546458
static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)

0 commit comments

Comments
 (0)