4 * Copyright (C) 2006 Qumranet, Inc.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
6 * Copyright(C) 2015 Intel Corporation.
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 * Marcelo Tosatti <mtosatti@redhat.com>
12 * Paolo Bonzini <pbonzini@redhat.com>
13 * Xiao Guangrong <guangrong.xiao@linux.intel.com>
15 * This work is licensed under the terms of the GNU GPL, version 2. See
16 * the COPYING file in the top-level directory.
19 #include <linux/kvm_host.h>
25 #define IA32_MTRR_DEF_TYPE_E (1ULL << 11)
26 #define IA32_MTRR_DEF_TYPE_FE (1ULL << 10)
27 #define IA32_MTRR_DEF_TYPE_TYPE_MASK (0xff)
29 static bool msr_mtrr_valid(unsigned msr)
32 case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
33 case MSR_MTRRfix64K_00000:
34 case MSR_MTRRfix16K_80000:
35 case MSR_MTRRfix16K_A0000:
36 case MSR_MTRRfix4K_C0000:
37 case MSR_MTRRfix4K_C8000:
38 case MSR_MTRRfix4K_D0000:
39 case MSR_MTRRfix4K_D8000:
40 case MSR_MTRRfix4K_E0000:
41 case MSR_MTRRfix4K_E8000:
42 case MSR_MTRRfix4K_F0000:
43 case MSR_MTRRfix4K_F8000:
53 static bool valid_pat_type(unsigned t)
55 return t < 8 && (1 << t) & 0xf3; /* 0, 1, 4, 5, 6, 7 */
58 static bool valid_mtrr_type(unsigned t)
60 return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
63 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
68 if (!msr_mtrr_valid(msr))
71 if (msr == MSR_IA32_CR_PAT) {
72 for (i = 0; i < 8; i++)
73 if (!valid_pat_type((data >> (i * 8)) & 0xff))
76 } else if (msr == MSR_MTRRdefType) {
79 return valid_mtrr_type(data & 0xff);
80 } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
81 for (i = 0; i < 8 ; i++)
82 if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
88 WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
90 mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
93 if (!valid_mtrr_type(data & 0xff))
100 kvm_inject_gp(vcpu, 0);
106 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
108 static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
110 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
113 static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
115 return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
118 static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
124 * Three terms are used in the following code:
125 * - segment, it indicates the address segments covered by fixed MTRRs.
126 * - unit, it corresponds to the MSR entry in the segment.
127 * - range, a range is covered in one memory cache type.
129 struct fixed_mtrr_segment {
135 /* the start position in kvm_mtrr.fixed_ranges[]. */
139 static struct fixed_mtrr_segment fixed_seg_table[] = {
140 /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
144 .range_shift = 16, /* 64K */
149 * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
155 .range_shift = 14, /* 16K */
160 * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
166 .range_shift = 12, /* 12K */
172 * The size of unit is covered in one MSR, one MSR entry contains
173 * 8 ranges so that unit size is always 8 * 2^range_shift.
175 static u64 fixed_mtrr_seg_unit_size(int seg)
177 return 8 << fixed_seg_table[seg].range_shift;
180 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
183 case MSR_MTRRfix64K_00000:
187 case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
189 *unit = msr - MSR_MTRRfix16K_80000;
191 case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
193 *unit = msr - MSR_MTRRfix4K_C0000;
202 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
204 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
205 u64 unit_size = fixed_mtrr_seg_unit_size(seg);
207 *start = mtrr_seg->start + unit * unit_size;
208 *end = *start + unit_size;
209 WARN_ON(*end > mtrr_seg->end);
212 static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
214 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
216 WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
219 /* each unit has 8 ranges. */
220 return mtrr_seg->range_start + 8 * unit;
223 static int fixed_mtrr_seg_end_range_index(int seg)
225 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
228 n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
229 return mtrr_seg->range_start + n - 1;
232 static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
236 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
239 fixed_mtrr_seg_unit_range(seg, unit, start, end);
243 static int fixed_msr_to_range_index(u32 msr)
247 if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
250 return fixed_mtrr_seg_unit_range_index(seg, unit);
253 static int fixed_mtrr_addr_to_seg(u64 addr)
255 struct fixed_mtrr_segment *mtrr_seg;
256 int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
258 for (seg = 0; seg < seg_num; seg++) {
259 mtrr_seg = &fixed_seg_table[seg];
260 if (mtrr_seg->start >= addr && addr < mtrr_seg->end)
267 static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
269 struct fixed_mtrr_segment *mtrr_seg;
272 mtrr_seg = &fixed_seg_table[seg];
273 index = mtrr_seg->range_start;
274 index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
278 static u64 fixed_mtrr_range_end_addr(int seg, int index)
280 struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
281 int pos = index - mtrr_seg->range_start;
283 return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
286 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
290 *start = range->base & PAGE_MASK;
292 mask = range->mask & PAGE_MASK;
293 mask |= ~0ULL << boot_cpu_data.x86_phys_bits;
295 /* This cannot overflow because writing to the reserved bits of
296 * variable MTRRs causes a #GP.
298 *end = (*start | ~mask) + 1;
301 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
303 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
307 if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
308 !kvm_arch_has_noncoherent_dma(vcpu->kvm))
311 if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
315 if (fixed_msr_to_range(msr, &start, &end)) {
316 if (!fixed_mtrr_is_enabled(mtrr_state))
318 } else if (msr == MSR_MTRRdefType) {
322 /* variable range MTRRs. */
323 index = (msr - 0x200) / 2;
324 var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
327 kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
330 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
332 return (range->mask & (1 << 11)) != 0;
335 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
337 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
338 struct kvm_mtrr_range *tmp, *cur;
339 int index, is_mtrr_mask;
341 index = (msr - 0x200) / 2;
342 is_mtrr_mask = msr - 0x200 - 2 * index;
343 cur = &mtrr_state->var_ranges[index];
345 /* remove the entry if it's in the list. */
346 if (var_mtrr_range_is_valid(cur))
347 list_del(&mtrr_state->var_ranges[index].node);
354 /* add it to the list if it's enabled. */
355 if (var_mtrr_range_is_valid(cur)) {
356 list_for_each_entry(tmp, &mtrr_state->head, node)
357 if (cur->base >= tmp->base)
359 list_add_tail(&cur->node, &tmp->node);
363 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
367 if (!kvm_mtrr_valid(vcpu, msr, data))
370 index = fixed_msr_to_range_index(msr);
372 *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
373 else if (msr == MSR_MTRRdefType)
374 vcpu->arch.mtrr_state.deftype = data;
375 else if (msr == MSR_IA32_CR_PAT)
376 vcpu->arch.pat = data;
378 set_var_mtrr_msr(vcpu, msr, data);
380 update_mtrr(vcpu, msr);
384 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
388 /* MSR_MTRRcap is a readonly MSR. */
389 if (msr == MSR_MTRRcap) {
394 * VCNT = KVM_NR_VAR_MTRR
396 *pdata = 0x500 | KVM_NR_VAR_MTRR;
400 if (!msr_mtrr_valid(msr))
403 index = fixed_msr_to_range_index(msr);
405 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
406 else if (msr == MSR_MTRRdefType)
407 *pdata = vcpu->arch.mtrr_state.deftype;
408 else if (msr == MSR_IA32_CR_PAT)
409 *pdata = vcpu->arch.pat;
410 else { /* Variable MTRRs */
413 index = (msr - 0x200) / 2;
414 is_mtrr_mask = msr - 0x200 - 2 * index;
416 *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
418 *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
424 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
426 INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
431 struct kvm_mtrr *mtrr_state;
437 /* [start, end) is not fully covered in MTRRs? */
440 /* private fields. */
442 /* used for fixed MTRRs. */
448 /* used for var MTRRs. */
450 struct kvm_mtrr_range *range;
451 /* max address has been covered in var MTRRs. */
459 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
463 if (!fixed_mtrr_is_enabled(iter->mtrr_state))
466 seg = fixed_mtrr_addr_to_seg(iter->start);
471 index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
477 static bool match_var_range(struct mtrr_iter *iter,
478 struct kvm_mtrr_range *range)
482 var_mtrr_range(range, &start, &end);
483 if (!(start >= iter->end || end <= iter->start)) {
487 * the function is called when we do kvm_mtrr.head walking.
488 * Range has the minimum base address which interleaves
489 * [looker->start_max, looker->end).
491 iter->partial_map |= iter->start_max < start;
493 /* update the max address has been covered. */
494 iter->start_max = max(iter->start_max, end);
501 static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
503 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
505 list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
506 if (match_var_range(iter, iter->range))
510 iter->partial_map |= iter->start_max < iter->end;
513 static void mtrr_lookup_var_start(struct mtrr_iter *iter)
515 struct kvm_mtrr *mtrr_state = iter->mtrr_state;
518 iter->start_max = iter->start;
519 iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
521 __mtrr_lookup_var_next(iter);
524 static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
526 /* terminate the lookup. */
527 if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
535 /* have looked up for all fixed MTRRs. */
536 if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
537 return mtrr_lookup_var_start(iter);
539 /* switch to next segment. */
540 if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
544 static void mtrr_lookup_var_next(struct mtrr_iter *iter)
546 __mtrr_lookup_var_next(iter);
549 static void mtrr_lookup_start(struct mtrr_iter *iter)
551 if (!mtrr_is_enabled(iter->mtrr_state)) {
552 iter->partial_map = true;
556 if (!mtrr_lookup_fixed_start(iter))
557 mtrr_lookup_var_start(iter);
560 static void mtrr_lookup_init(struct mtrr_iter *iter,
561 struct kvm_mtrr *mtrr_state, u64 start, u64 end)
563 iter->mtrr_state = mtrr_state;
566 iter->partial_map = false;
570 mtrr_lookup_start(iter);
573 static bool mtrr_lookup_okay(struct mtrr_iter *iter)
576 iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
581 iter->mem_type = iter->range->base & 0xff;
588 static void mtrr_lookup_next(struct mtrr_iter *iter)
591 mtrr_lookup_fixed_next(iter);
593 mtrr_lookup_var_next(iter);
596 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
597 for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
598 mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
600 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
602 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
603 struct mtrr_iter iter;
606 const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
607 | (1 << MTRR_TYPE_WRTHROUGH);
609 start = gfn_to_gpa(gfn);
610 end = start + PAGE_SIZE;
612 mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
613 int curr_type = iter.mem_type;
616 * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
626 * If two or more variable memory ranges match and the
627 * memory types are identical, then that memory type is
630 if (type == curr_type)
634 * If two or more variable memory ranges match and one of
635 * the memory types is UC, the UC memory type used.
637 if (curr_type == MTRR_TYPE_UNCACHABLE)
638 return MTRR_TYPE_UNCACHABLE;
641 * If two or more variable memory ranges match and the
642 * memory types are WT and WB, the WT memory type is used.
644 if (((1 << type) & wt_wb_mask) &&
645 ((1 << curr_type) & wt_wb_mask)) {
646 type = MTRR_TYPE_WRTHROUGH;
651 * For overlaps not defined by the above rules, processor
652 * behavior is undefined.
655 /* We use WB for this undefined behavior. :( */
656 return MTRR_TYPE_WRBACK;
659 /* It is not covered by MTRRs. */
660 if (iter.partial_map) {
662 * We just check one page, partially covered by MTRRs is
666 type = mtrr_default_type(mtrr_state);
670 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
672 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
675 struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
676 struct mtrr_iter iter;
680 start = gfn_to_gpa(gfn);
681 end = gfn_to_gpa(gfn + page_num);
682 mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
684 type = iter.mem_type;
688 if (type != iter.mem_type)
692 if (!iter.partial_map)
698 return type == mtrr_default_type(mtrr_state);