root/arch/x86/kvm/mtrr.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. msr_mtrr_valid
  2. valid_mtrr_type
  3. kvm_mtrr_valid
  4. mtrr_is_enabled
  5. fixed_mtrr_is_enabled
  6. mtrr_default_type
  7. mtrr_disabled_type
  8. fixed_mtrr_seg_unit_size
  9. fixed_msr_to_seg_unit
  10. fixed_mtrr_seg_unit_range
  11. fixed_mtrr_seg_unit_range_index
  12. fixed_mtrr_seg_end_range_index
  13. fixed_msr_to_range
  14. fixed_msr_to_range_index
  15. fixed_mtrr_addr_to_seg
  16. fixed_mtrr_addr_seg_to_range_index
  17. fixed_mtrr_range_end_addr
  18. var_mtrr_range
  19. update_mtrr
  20. var_mtrr_range_is_valid
  21. set_var_mtrr_msr
  22. kvm_mtrr_set_msr
  23. kvm_mtrr_get_msr
  24. kvm_vcpu_mtrr_init
  25. mtrr_lookup_fixed_start
  26. match_var_range
  27. __mtrr_lookup_var_next
  28. mtrr_lookup_var_start
  29. mtrr_lookup_fixed_next
  30. mtrr_lookup_var_next
  31. mtrr_lookup_start
  32. mtrr_lookup_init
  33. mtrr_lookup_okay
  34. mtrr_lookup_next
  35. kvm_mtrr_get_guest_memory_type
  36. kvm_mtrr_check_gfn_range_consistency

   1 // SPDX-License-Identifier: GPL-2.0-only
   2 /*
   3  * vMTRR implementation
   4  *
   5  * Copyright (C) 2006 Qumranet, Inc.
   6  * Copyright 2010 Red Hat, Inc. and/or its affiliates.
   7  * Copyright(C) 2015 Intel Corporation.
   8  *
   9  * Authors:
  10  *   Yaniv Kamay  <yaniv@qumranet.com>
  11  *   Avi Kivity   <avi@qumranet.com>
  12  *   Marcelo Tosatti <mtosatti@redhat.com>
  13  *   Paolo Bonzini <pbonzini@redhat.com>
  14  *   Xiao Guangrong <guangrong.xiao@linux.intel.com>
  15  */
  16 
  17 #include <linux/kvm_host.h>
  18 #include <asm/mtrr.h>
  19 
  20 #include "cpuid.h"
  21 #include "mmu.h"
  22 
  23 #define IA32_MTRR_DEF_TYPE_E            (1ULL << 11)
  24 #define IA32_MTRR_DEF_TYPE_FE           (1ULL << 10)
  25 #define IA32_MTRR_DEF_TYPE_TYPE_MASK    (0xff)
  26 
  27 static bool msr_mtrr_valid(unsigned msr)
  28 {
  29         switch (msr) {
  30         case 0x200 ... 0x200 + 2 * KVM_NR_VAR_MTRR - 1:
  31         case MSR_MTRRfix64K_00000:
  32         case MSR_MTRRfix16K_80000:
  33         case MSR_MTRRfix16K_A0000:
  34         case MSR_MTRRfix4K_C0000:
  35         case MSR_MTRRfix4K_C8000:
  36         case MSR_MTRRfix4K_D0000:
  37         case MSR_MTRRfix4K_D8000:
  38         case MSR_MTRRfix4K_E0000:
  39         case MSR_MTRRfix4K_E8000:
  40         case MSR_MTRRfix4K_F0000:
  41         case MSR_MTRRfix4K_F8000:
  42         case MSR_MTRRdefType:
  43         case MSR_IA32_CR_PAT:
  44                 return true;
  45         }
  46         return false;
  47 }
  48 
  49 static bool valid_mtrr_type(unsigned t)
  50 {
  51         return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  52 }
  53 
  54 bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  55 {
  56         int i;
  57         u64 mask;
  58 
  59         if (!msr_mtrr_valid(msr))
  60                 return false;
  61 
  62         if (msr == MSR_IA32_CR_PAT) {
  63                 return kvm_pat_valid(data);
  64         } else if (msr == MSR_MTRRdefType) {
  65                 if (data & ~0xcff)
  66                         return false;
  67                 return valid_mtrr_type(data & 0xff);
  68         } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  69                 for (i = 0; i < 8 ; i++)
  70                         if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  71                                 return false;
  72                 return true;
  73         }
  74 
  75         /* variable MTRRs */
  76         WARN_ON(!(msr >= 0x200 && msr < 0x200 + 2 * KVM_NR_VAR_MTRR));
  77 
  78         mask = (~0ULL) << cpuid_maxphyaddr(vcpu);
  79         if ((msr & 1) == 0) {
  80                 /* MTRR base */
  81                 if (!valid_mtrr_type(data & 0xff))
  82                         return false;
  83                 mask |= 0xf00;
  84         } else
  85                 /* MTRR mask */
  86                 mask |= 0x7ff;
  87         if (data & mask) {
  88                 kvm_inject_gp(vcpu, 0);
  89                 return false;
  90         }
  91 
  92         return true;
  93 }
  94 EXPORT_SYMBOL_GPL(kvm_mtrr_valid);
  95 
  96 static bool mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
  97 {
  98         return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_E);
  99 }
 100 
 101 static bool fixed_mtrr_is_enabled(struct kvm_mtrr *mtrr_state)
 102 {
 103         return !!(mtrr_state->deftype & IA32_MTRR_DEF_TYPE_FE);
 104 }
 105 
 106 static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
 107 {
 108         return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
 109 }
 110 
 111 static u8 mtrr_disabled_type(struct kvm_vcpu *vcpu)
 112 {
 113         /*
 114          * Intel SDM 11.11.2.2: all MTRRs are disabled when
 115          * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
 116          * memory type is applied to all of physical memory.
 117          *
 118          * However, virtual machines can be run with CPUID such that
 119          * there are no MTRRs.  In that case, the firmware will never
 120          * enable MTRRs and it is obviously undesirable to run the
 121          * guest entirely with UC memory and we use WB.
 122          */
 123         if (guest_cpuid_has(vcpu, X86_FEATURE_MTRR))
 124                 return MTRR_TYPE_UNCACHABLE;
 125         else
 126                 return MTRR_TYPE_WRBACK;
 127 }
 128 
 129 /*
 130 * Three terms are used in the following code:
 131 * - segment, it indicates the address segments covered by fixed MTRRs.
 132 * - unit, it corresponds to the MSR entry in the segment.
 133 * - range, a range is covered in one memory cache type.
 134 */
 135 struct fixed_mtrr_segment {
 136         u64 start;
 137         u64 end;
 138 
 139         int range_shift;
 140 
 141         /* the start position in kvm_mtrr.fixed_ranges[]. */
 142         int range_start;
 143 };
 144 
 145 static struct fixed_mtrr_segment fixed_seg_table[] = {
 146         /* MSR_MTRRfix64K_00000, 1 unit. 64K fixed mtrr. */
 147         {
 148                 .start = 0x0,
 149                 .end = 0x80000,
 150                 .range_shift = 16, /* 64K */
 151                 .range_start = 0,
 152         },
 153 
 154         /*
 155          * MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000, 2 units,
 156          * 16K fixed mtrr.
 157          */
 158         {
 159                 .start = 0x80000,
 160                 .end = 0xc0000,
 161                 .range_shift = 14, /* 16K */
 162                 .range_start = 8,
 163         },
 164 
 165         /*
 166          * MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000, 8 units,
 167          * 4K fixed mtrr.
 168          */
 169         {
 170                 .start = 0xc0000,
 171                 .end = 0x100000,
 172                 .range_shift = 12, /* 12K */
 173                 .range_start = 24,
 174         }
 175 };
 176 
 177 /*
 178  * The size of unit is covered in one MSR, one MSR entry contains
 179  * 8 ranges so that unit size is always 8 * 2^range_shift.
 180  */
 181 static u64 fixed_mtrr_seg_unit_size(int seg)
 182 {
 183         return 8 << fixed_seg_table[seg].range_shift;
 184 }
 185 
 186 static bool fixed_msr_to_seg_unit(u32 msr, int *seg, int *unit)
 187 {
 188         switch (msr) {
 189         case MSR_MTRRfix64K_00000:
 190                 *seg = 0;
 191                 *unit = 0;
 192                 break;
 193         case MSR_MTRRfix16K_80000 ... MSR_MTRRfix16K_A0000:
 194                 *seg = 1;
 195                 *unit = array_index_nospec(
 196                         msr - MSR_MTRRfix16K_80000,
 197                         MSR_MTRRfix16K_A0000 - MSR_MTRRfix16K_80000 + 1);
 198                 break;
 199         case MSR_MTRRfix4K_C0000 ... MSR_MTRRfix4K_F8000:
 200                 *seg = 2;
 201                 *unit = array_index_nospec(
 202                         msr - MSR_MTRRfix4K_C0000,
 203                         MSR_MTRRfix4K_F8000 - MSR_MTRRfix4K_C0000 + 1);
 204                 break;
 205         default:
 206                 return false;
 207         }
 208 
 209         return true;
 210 }
 211 
 212 static void fixed_mtrr_seg_unit_range(int seg, int unit, u64 *start, u64 *end)
 213 {
 214         struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
 215         u64 unit_size = fixed_mtrr_seg_unit_size(seg);
 216 
 217         *start = mtrr_seg->start + unit * unit_size;
 218         *end = *start + unit_size;
 219         WARN_ON(*end > mtrr_seg->end);
 220 }
 221 
 222 static int fixed_mtrr_seg_unit_range_index(int seg, int unit)
 223 {
 224         struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
 225 
 226         WARN_ON(mtrr_seg->start + unit * fixed_mtrr_seg_unit_size(seg)
 227                 > mtrr_seg->end);
 228 
 229         /* each unit has 8 ranges. */
 230         return mtrr_seg->range_start + 8 * unit;
 231 }
 232 
 233 static int fixed_mtrr_seg_end_range_index(int seg)
 234 {
 235         struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
 236         int n;
 237 
 238         n = (mtrr_seg->end - mtrr_seg->start) >> mtrr_seg->range_shift;
 239         return mtrr_seg->range_start + n - 1;
 240 }
 241 
 242 static bool fixed_msr_to_range(u32 msr, u64 *start, u64 *end)
 243 {
 244         int seg, unit;
 245 
 246         if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
 247                 return false;
 248 
 249         fixed_mtrr_seg_unit_range(seg, unit, start, end);
 250         return true;
 251 }
 252 
 253 static int fixed_msr_to_range_index(u32 msr)
 254 {
 255         int seg, unit;
 256 
 257         if (!fixed_msr_to_seg_unit(msr, &seg, &unit))
 258                 return -1;
 259 
 260         return fixed_mtrr_seg_unit_range_index(seg, unit);
 261 }
 262 
 263 static int fixed_mtrr_addr_to_seg(u64 addr)
 264 {
 265         struct fixed_mtrr_segment *mtrr_seg;
 266         int seg, seg_num = ARRAY_SIZE(fixed_seg_table);
 267 
 268         for (seg = 0; seg < seg_num; seg++) {
 269                 mtrr_seg = &fixed_seg_table[seg];
 270                 if (mtrr_seg->start <= addr && addr < mtrr_seg->end)
 271                         return seg;
 272         }
 273 
 274         return -1;
 275 }
 276 
 277 static int fixed_mtrr_addr_seg_to_range_index(u64 addr, int seg)
 278 {
 279         struct fixed_mtrr_segment *mtrr_seg;
 280         int index;
 281 
 282         mtrr_seg = &fixed_seg_table[seg];
 283         index = mtrr_seg->range_start;
 284         index += (addr - mtrr_seg->start) >> mtrr_seg->range_shift;
 285         return index;
 286 }
 287 
 288 static u64 fixed_mtrr_range_end_addr(int seg, int index)
 289 {
 290         struct fixed_mtrr_segment *mtrr_seg = &fixed_seg_table[seg];
 291         int pos = index - mtrr_seg->range_start;
 292 
 293         return mtrr_seg->start + ((pos + 1) << mtrr_seg->range_shift);
 294 }
 295 
 296 static void var_mtrr_range(struct kvm_mtrr_range *range, u64 *start, u64 *end)
 297 {
 298         u64 mask;
 299 
 300         *start = range->base & PAGE_MASK;
 301 
 302         mask = range->mask & PAGE_MASK;
 303 
 304         /* This cannot overflow because writing to the reserved bits of
 305          * variable MTRRs causes a #GP.
 306          */
 307         *end = (*start | ~mask) + 1;
 308 }
 309 
 310 static void update_mtrr(struct kvm_vcpu *vcpu, u32 msr)
 311 {
 312         struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
 313         gfn_t start, end;
 314         int index;
 315 
 316         if (msr == MSR_IA32_CR_PAT || !tdp_enabled ||
 317               !kvm_arch_has_noncoherent_dma(vcpu->kvm))
 318                 return;
 319 
 320         if (!mtrr_is_enabled(mtrr_state) && msr != MSR_MTRRdefType)
 321                 return;
 322 
 323         /* fixed MTRRs. */
 324         if (fixed_msr_to_range(msr, &start, &end)) {
 325                 if (!fixed_mtrr_is_enabled(mtrr_state))
 326                         return;
 327         } else if (msr == MSR_MTRRdefType) {
 328                 start = 0x0;
 329                 end = ~0ULL;
 330         } else {
 331                 /* variable range MTRRs. */
 332                 index = (msr - 0x200) / 2;
 333                 var_mtrr_range(&mtrr_state->var_ranges[index], &start, &end);
 334         }
 335 
 336         kvm_zap_gfn_range(vcpu->kvm, gpa_to_gfn(start), gpa_to_gfn(end));
 337 }
 338 
 339 static bool var_mtrr_range_is_valid(struct kvm_mtrr_range *range)
 340 {
 341         return (range->mask & (1 << 11)) != 0;
 342 }
 343 
 344 static void set_var_mtrr_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 345 {
 346         struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
 347         struct kvm_mtrr_range *tmp, *cur;
 348         int index, is_mtrr_mask;
 349 
 350         index = (msr - 0x200) / 2;
 351         is_mtrr_mask = msr - 0x200 - 2 * index;
 352         cur = &mtrr_state->var_ranges[index];
 353 
 354         /* remove the entry if it's in the list. */
 355         if (var_mtrr_range_is_valid(cur))
 356                 list_del(&mtrr_state->var_ranges[index].node);
 357 
 358         /* Extend the mask with all 1 bits to the left, since those
 359          * bits must implicitly be 0.  The bits are then cleared
 360          * when reading them.
 361          */
 362         if (!is_mtrr_mask)
 363                 cur->base = data;
 364         else
 365                 cur->mask = data | (-1LL << cpuid_maxphyaddr(vcpu));
 366 
 367         /* add it to the list if it's enabled. */
 368         if (var_mtrr_range_is_valid(cur)) {
 369                 list_for_each_entry(tmp, &mtrr_state->head, node)
 370                         if (cur->base >= tmp->base)
 371                                 break;
 372                 list_add_tail(&cur->node, &tmp->node);
 373         }
 374 }
 375 
 376 int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
 377 {
 378         int index;
 379 
 380         if (!kvm_mtrr_valid(vcpu, msr, data))
 381                 return 1;
 382 
 383         index = fixed_msr_to_range_index(msr);
 384         if (index >= 0)
 385                 *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index] = data;
 386         else if (msr == MSR_MTRRdefType)
 387                 vcpu->arch.mtrr_state.deftype = data;
 388         else if (msr == MSR_IA32_CR_PAT)
 389                 vcpu->arch.pat = data;
 390         else
 391                 set_var_mtrr_msr(vcpu, msr, data);
 392 
 393         update_mtrr(vcpu, msr);
 394         return 0;
 395 }
 396 
 397 int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
 398 {
 399         int index;
 400 
 401         /* MSR_MTRRcap is a readonly MSR. */
 402         if (msr == MSR_MTRRcap) {
 403                 /*
 404                  * SMRR = 0
 405                  * WC = 1
 406                  * FIX = 1
 407                  * VCNT = KVM_NR_VAR_MTRR
 408                  */
 409                 *pdata = 0x500 | KVM_NR_VAR_MTRR;
 410                 return 0;
 411         }
 412 
 413         if (!msr_mtrr_valid(msr))
 414                 return 1;
 415 
 416         index = fixed_msr_to_range_index(msr);
 417         if (index >= 0)
 418                 *pdata = *(u64 *)&vcpu->arch.mtrr_state.fixed_ranges[index];
 419         else if (msr == MSR_MTRRdefType)
 420                 *pdata = vcpu->arch.mtrr_state.deftype;
 421         else if (msr == MSR_IA32_CR_PAT)
 422                 *pdata = vcpu->arch.pat;
 423         else {  /* Variable MTRRs */
 424                 int is_mtrr_mask;
 425 
 426                 index = (msr - 0x200) / 2;
 427                 is_mtrr_mask = msr - 0x200 - 2 * index;
 428                 if (!is_mtrr_mask)
 429                         *pdata = vcpu->arch.mtrr_state.var_ranges[index].base;
 430                 else
 431                         *pdata = vcpu->arch.mtrr_state.var_ranges[index].mask;
 432 
 433                 *pdata &= (1ULL << cpuid_maxphyaddr(vcpu)) - 1;
 434         }
 435 
 436         return 0;
 437 }
 438 
 439 void kvm_vcpu_mtrr_init(struct kvm_vcpu *vcpu)
 440 {
 441         INIT_LIST_HEAD(&vcpu->arch.mtrr_state.head);
 442 }
 443 
 444 struct mtrr_iter {
 445         /* input fields. */
 446         struct kvm_mtrr *mtrr_state;
 447         u64 start;
 448         u64 end;
 449 
 450         /* output fields. */
 451         int mem_type;
 452         /* mtrr is completely disabled? */
 453         bool mtrr_disabled;
 454         /* [start, end) is not fully covered in MTRRs? */
 455         bool partial_map;
 456 
 457         /* private fields. */
 458         union {
 459                 /* used for fixed MTRRs. */
 460                 struct {
 461                         int index;
 462                         int seg;
 463                 };
 464 
 465                 /* used for var MTRRs. */
 466                 struct {
 467                         struct kvm_mtrr_range *range;
 468                         /* max address has been covered in var MTRRs. */
 469                         u64 start_max;
 470                 };
 471         };
 472 
 473         bool fixed;
 474 };
 475 
 476 static bool mtrr_lookup_fixed_start(struct mtrr_iter *iter)
 477 {
 478         int seg, index;
 479 
 480         if (!fixed_mtrr_is_enabled(iter->mtrr_state))
 481                 return false;
 482 
 483         seg = fixed_mtrr_addr_to_seg(iter->start);
 484         if (seg < 0)
 485                 return false;
 486 
 487         iter->fixed = true;
 488         index = fixed_mtrr_addr_seg_to_range_index(iter->start, seg);
 489         iter->index = index;
 490         iter->seg = seg;
 491         return true;
 492 }
 493 
 494 static bool match_var_range(struct mtrr_iter *iter,
 495                             struct kvm_mtrr_range *range)
 496 {
 497         u64 start, end;
 498 
 499         var_mtrr_range(range, &start, &end);
 500         if (!(start >= iter->end || end <= iter->start)) {
 501                 iter->range = range;
 502 
 503                 /*
 504                  * the function is called when we do kvm_mtrr.head walking.
 505                  * Range has the minimum base address which interleaves
 506                  * [looker->start_max, looker->end).
 507                  */
 508                 iter->partial_map |= iter->start_max < start;
 509 
 510                 /* update the max address has been covered. */
 511                 iter->start_max = max(iter->start_max, end);
 512                 return true;
 513         }
 514 
 515         return false;
 516 }
 517 
 518 static void __mtrr_lookup_var_next(struct mtrr_iter *iter)
 519 {
 520         struct kvm_mtrr *mtrr_state = iter->mtrr_state;
 521 
 522         list_for_each_entry_continue(iter->range, &mtrr_state->head, node)
 523                 if (match_var_range(iter, iter->range))
 524                         return;
 525 
 526         iter->range = NULL;
 527         iter->partial_map |= iter->start_max < iter->end;
 528 }
 529 
 530 static void mtrr_lookup_var_start(struct mtrr_iter *iter)
 531 {
 532         struct kvm_mtrr *mtrr_state = iter->mtrr_state;
 533 
 534         iter->fixed = false;
 535         iter->start_max = iter->start;
 536         iter->range = NULL;
 537         iter->range = list_prepare_entry(iter->range, &mtrr_state->head, node);
 538 
 539         __mtrr_lookup_var_next(iter);
 540 }
 541 
 542 static void mtrr_lookup_fixed_next(struct mtrr_iter *iter)
 543 {
 544         /* terminate the lookup. */
 545         if (fixed_mtrr_range_end_addr(iter->seg, iter->index) >= iter->end) {
 546                 iter->fixed = false;
 547                 iter->range = NULL;
 548                 return;
 549         }
 550 
 551         iter->index++;
 552 
 553         /* have looked up for all fixed MTRRs. */
 554         if (iter->index >= ARRAY_SIZE(iter->mtrr_state->fixed_ranges))
 555                 return mtrr_lookup_var_start(iter);
 556 
 557         /* switch to next segment. */
 558         if (iter->index > fixed_mtrr_seg_end_range_index(iter->seg))
 559                 iter->seg++;
 560 }
 561 
 562 static void mtrr_lookup_var_next(struct mtrr_iter *iter)
 563 {
 564         __mtrr_lookup_var_next(iter);
 565 }
 566 
 567 static void mtrr_lookup_start(struct mtrr_iter *iter)
 568 {
 569         if (!mtrr_is_enabled(iter->mtrr_state)) {
 570                 iter->mtrr_disabled = true;
 571                 return;
 572         }
 573 
 574         if (!mtrr_lookup_fixed_start(iter))
 575                 mtrr_lookup_var_start(iter);
 576 }
 577 
 578 static void mtrr_lookup_init(struct mtrr_iter *iter,
 579                              struct kvm_mtrr *mtrr_state, u64 start, u64 end)
 580 {
 581         iter->mtrr_state = mtrr_state;
 582         iter->start = start;
 583         iter->end = end;
 584         iter->mtrr_disabled = false;
 585         iter->partial_map = false;
 586         iter->fixed = false;
 587         iter->range = NULL;
 588 
 589         mtrr_lookup_start(iter);
 590 }
 591 
 592 static bool mtrr_lookup_okay(struct mtrr_iter *iter)
 593 {
 594         if (iter->fixed) {
 595                 iter->mem_type = iter->mtrr_state->fixed_ranges[iter->index];
 596                 return true;
 597         }
 598 
 599         if (iter->range) {
 600                 iter->mem_type = iter->range->base & 0xff;
 601                 return true;
 602         }
 603 
 604         return false;
 605 }
 606 
 607 static void mtrr_lookup_next(struct mtrr_iter *iter)
 608 {
 609         if (iter->fixed)
 610                 mtrr_lookup_fixed_next(iter);
 611         else
 612                 mtrr_lookup_var_next(iter);
 613 }
 614 
 615 #define mtrr_for_each_mem_type(_iter_, _mtrr_, _gpa_start_, _gpa_end_) \
 616         for (mtrr_lookup_init(_iter_, _mtrr_, _gpa_start_, _gpa_end_); \
 617              mtrr_lookup_okay(_iter_); mtrr_lookup_next(_iter_))
 618 
 619 u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
 620 {
 621         struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
 622         struct mtrr_iter iter;
 623         u64 start, end;
 624         int type = -1;
 625         const int wt_wb_mask = (1 << MTRR_TYPE_WRBACK)
 626                                | (1 << MTRR_TYPE_WRTHROUGH);
 627 
 628         start = gfn_to_gpa(gfn);
 629         end = start + PAGE_SIZE;
 630 
 631         mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
 632                 int curr_type = iter.mem_type;
 633 
 634                 /*
 635                  * Please refer to Intel SDM Volume 3: 11.11.4.1 MTRR
 636                  * Precedences.
 637                  */
 638 
 639                 if (type == -1) {
 640                         type = curr_type;
 641                         continue;
 642                 }
 643 
 644                 /*
 645                  * If two or more variable memory ranges match and the
 646                  * memory types are identical, then that memory type is
 647                  * used.
 648                  */
 649                 if (type == curr_type)
 650                         continue;
 651 
 652                 /*
 653                  * If two or more variable memory ranges match and one of
 654                  * the memory types is UC, the UC memory type used.
 655                  */
 656                 if (curr_type == MTRR_TYPE_UNCACHABLE)
 657                         return MTRR_TYPE_UNCACHABLE;
 658 
 659                 /*
 660                  * If two or more variable memory ranges match and the
 661                  * memory types are WT and WB, the WT memory type is used.
 662                  */
 663                 if (((1 << type) & wt_wb_mask) &&
 664                       ((1 << curr_type) & wt_wb_mask)) {
 665                         type = MTRR_TYPE_WRTHROUGH;
 666                         continue;
 667                 }
 668 
 669                 /*
 670                  * For overlaps not defined by the above rules, processor
 671                  * behavior is undefined.
 672                  */
 673 
 674                 /* We use WB for this undefined behavior. :( */
 675                 return MTRR_TYPE_WRBACK;
 676         }
 677 
 678         if (iter.mtrr_disabled)
 679                 return mtrr_disabled_type(vcpu);
 680 
 681         /* not contained in any MTRRs. */
 682         if (type == -1)
 683                 return mtrr_default_type(mtrr_state);
 684 
 685         /*
 686          * We just check one page, partially covered by MTRRs is
 687          * impossible.
 688          */
 689         WARN_ON(iter.partial_map);
 690 
 691         return type;
 692 }
 693 EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
 694 
 695 bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
 696                                           int page_num)
 697 {
 698         struct kvm_mtrr *mtrr_state = &vcpu->arch.mtrr_state;
 699         struct mtrr_iter iter;
 700         u64 start, end;
 701         int type = -1;
 702 
 703         start = gfn_to_gpa(gfn);
 704         end = gfn_to_gpa(gfn + page_num);
 705         mtrr_for_each_mem_type(&iter, mtrr_state, start, end) {
 706                 if (type == -1) {
 707                         type = iter.mem_type;
 708                         continue;
 709                 }
 710 
 711                 if (type != iter.mem_type)
 712                         return false;
 713         }
 714 
 715         if (iter.mtrr_disabled)
 716                 return true;
 717 
 718         if (!iter.partial_map)
 719                 return true;
 720 
 721         if (type == -1)
 722                 return true;
 723 
 724         return type == mtrr_default_type(mtrr_state);
 725 }

/* [<][>][^][v][top][bottom][index][help] */