root/arch/x86/kvm/ioapic.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ioapic_read_indirect
  2. rtc_irq_eoi_tracking_reset
  3. rtc_status_pending_eoi_check_valid
  4. __rtc_irq_eoi_tracking_restore_one
  5. kvm_rtc_eoi_tracking_restore_one
  6. kvm_rtc_eoi_tracking_restore_all
  7. rtc_irq_eoi
  8. rtc_irq_check_coalesced
  9. ioapic_set_irq
  10. kvm_ioapic_inject_all
  11. kvm_ioapic_scan_entry
  12. kvm_arch_post_irq_ack_notifier_list_update
  13. ioapic_write_indirect
  14. ioapic_service
  15. kvm_ioapic_set_irq
  16. kvm_ioapic_clear_all
  17. kvm_ioapic_eoi_inject_work
  18. __kvm_ioapic_update_eoi
  19. kvm_ioapic_update_eoi
  20. to_ioapic
  21. ioapic_in_range
  22. ioapic_mmio_read
  23. ioapic_mmio_write
  24. kvm_ioapic_reset
  25. kvm_ioapic_init
  26. kvm_ioapic_destroy
  27. kvm_get_ioapic
  28. kvm_set_ioapic

   1 /*
   2  *  Copyright (C) 2001  MandrakeSoft S.A.
   3  *  Copyright 2010 Red Hat, Inc. and/or its affiliates.
   4  *
   5  *    MandrakeSoft S.A.
   6  *    43, rue d'Aboukir
   7  *    75002 Paris - France
   8  *    http://www.linux-mandrake.com/
   9  *    http://www.mandrakesoft.com/
  10  *
  11  *  This library is free software; you can redistribute it and/or
  12  *  modify it under the terms of the GNU Lesser General Public
  13  *  License as published by the Free Software Foundation; either
  14  *  version 2 of the License, or (at your option) any later version.
  15  *
  16  *  This library is distributed in the hope that it will be useful,
  17  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
  18  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19  *  Lesser General Public License for more details.
  20  *
  21  *  You should have received a copy of the GNU Lesser General Public
  22  *  License along with this library; if not, write to the Free Software
  23  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307 USA
  24  *
  25  *  Yunhong Jiang <yunhong.jiang@intel.com>
  26  *  Yaozu (Eddie) Dong <eddie.dong@intel.com>
  27  *  Based on Xen 3.1 code.
  28  */
  29 
  30 #include <linux/kvm_host.h>
  31 #include <linux/kvm.h>
  32 #include <linux/mm.h>
  33 #include <linux/highmem.h>
  34 #include <linux/smp.h>
  35 #include <linux/hrtimer.h>
  36 #include <linux/io.h>
  37 #include <linux/slab.h>
  38 #include <linux/export.h>
  39 #include <linux/nospec.h>
  40 #include <asm/processor.h>
  41 #include <asm/page.h>
  42 #include <asm/current.h>
  43 #include <trace/events/kvm.h>
  44 
  45 #include "ioapic.h"
  46 #include "lapic.h"
  47 #include "irq.h"
  48 
  49 static int ioapic_service(struct kvm_ioapic *vioapic, int irq,
  50                 bool line_status);
  51 
  52 static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic,
  53                                           unsigned long addr,
  54                                           unsigned long length)
  55 {
  56         unsigned long result = 0;
  57 
  58         switch (ioapic->ioregsel) {
  59         case IOAPIC_REG_VERSION:
  60                 result = ((((IOAPIC_NUM_PINS - 1) & 0xff) << 16)
  61                           | (IOAPIC_VERSION_ID & 0xff));
  62                 break;
  63 
  64         case IOAPIC_REG_APIC_ID:
  65         case IOAPIC_REG_ARB_ID:
  66                 result = ((ioapic->id & 0xf) << 24);
  67                 break;
  68 
  69         default:
  70                 {
  71                         u32 redir_index = (ioapic->ioregsel - 0x10) >> 1;
  72                         u64 redir_content = ~0ULL;
  73 
  74                         if (redir_index < IOAPIC_NUM_PINS) {
  75                                 u32 index = array_index_nospec(
  76                                         redir_index, IOAPIC_NUM_PINS);
  77 
  78                                 redir_content = ioapic->redirtbl[index].bits;
  79                         }
  80 
  81                         result = (ioapic->ioregsel & 0x1) ?
  82                             (redir_content >> 32) & 0xffffffff :
  83                             redir_content & 0xffffffff;
  84                         break;
  85                 }
  86         }
  87 
  88         return result;
  89 }
  90 
  91 static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
  92 {
  93         ioapic->rtc_status.pending_eoi = 0;
  94         bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPU_ID);
  95 }
  96 
  97 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
  98 
  99 static void rtc_status_pending_eoi_check_valid(struct kvm_ioapic *ioapic)
 100 {
 101         if (WARN_ON(ioapic->rtc_status.pending_eoi < 0))
 102                 kvm_rtc_eoi_tracking_restore_all(ioapic);
 103 }
 104 
 105 static void __rtc_irq_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
 106 {
 107         bool new_val, old_val;
 108         struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
 109         struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
 110         union kvm_ioapic_redirect_entry *e;
 111 
 112         e = &ioapic->redirtbl[RTC_GSI];
 113         if (!kvm_apic_match_dest(vcpu, NULL, 0, e->fields.dest_id,
 114                                 e->fields.dest_mode))
 115                 return;
 116 
 117         new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
 118         old_val = test_bit(vcpu->vcpu_id, dest_map->map);
 119 
 120         if (new_val == old_val)
 121                 return;
 122 
 123         if (new_val) {
 124                 __set_bit(vcpu->vcpu_id, dest_map->map);
 125                 dest_map->vectors[vcpu->vcpu_id] = e->fields.vector;
 126                 ioapic->rtc_status.pending_eoi++;
 127         } else {
 128                 __clear_bit(vcpu->vcpu_id, dest_map->map);
 129                 ioapic->rtc_status.pending_eoi--;
 130                 rtc_status_pending_eoi_check_valid(ioapic);
 131         }
 132 }
 133 
 134 void kvm_rtc_eoi_tracking_restore_one(struct kvm_vcpu *vcpu)
 135 {
 136         struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
 137 
 138         spin_lock(&ioapic->lock);
 139         __rtc_irq_eoi_tracking_restore_one(vcpu);
 140         spin_unlock(&ioapic->lock);
 141 }
 142 
 143 static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic)
 144 {
 145         struct kvm_vcpu *vcpu;
 146         int i;
 147 
 148         if (RTC_GSI >= IOAPIC_NUM_PINS)
 149                 return;
 150 
 151         rtc_irq_eoi_tracking_reset(ioapic);
 152         kvm_for_each_vcpu(i, vcpu, ioapic->kvm)
 153             __rtc_irq_eoi_tracking_restore_one(vcpu);
 154 }
 155 
 156 static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
 157 {
 158         if (test_and_clear_bit(vcpu->vcpu_id,
 159                                ioapic->rtc_status.dest_map.map)) {
 160                 --ioapic->rtc_status.pending_eoi;
 161                 rtc_status_pending_eoi_check_valid(ioapic);
 162         }
 163 }
 164 
 165 static bool rtc_irq_check_coalesced(struct kvm_ioapic *ioapic)
 166 {
 167         if (ioapic->rtc_status.pending_eoi > 0)
 168                 return true; /* coalesced */
 169 
 170         return false;
 171 }
 172 
 173 static int ioapic_set_irq(struct kvm_ioapic *ioapic, unsigned int irq,
 174                 int irq_level, bool line_status)
 175 {
 176         union kvm_ioapic_redirect_entry entry;
 177         u32 mask = 1 << irq;
 178         u32 old_irr;
 179         int edge, ret;
 180 
 181         entry = ioapic->redirtbl[irq];
 182         edge = (entry.fields.trig_mode == IOAPIC_EDGE_TRIG);
 183 
 184         if (!irq_level) {
 185                 ioapic->irr &= ~mask;
 186                 ret = 1;
 187                 goto out;
 188         }
 189 
 190         /*
 191          * Return 0 for coalesced interrupts; for edge-triggered interrupts,
 192          * this only happens if a previous edge has not been delivered due
 193          * do masking.  For level interrupts, the remote_irr field tells
 194          * us if the interrupt is waiting for an EOI.
 195          *
 196          * RTC is special: it is edge-triggered, but userspace likes to know
 197          * if it has been already ack-ed via EOI because coalesced RTC
 198          * interrupts lead to time drift in Windows guests.  So we track
 199          * EOI manually for the RTC interrupt.
 200          */
 201         if (irq == RTC_GSI && line_status &&
 202                 rtc_irq_check_coalesced(ioapic)) {
 203                 ret = 0;
 204                 goto out;
 205         }
 206 
 207         old_irr = ioapic->irr;
 208         ioapic->irr |= mask;
 209         if (edge) {
 210                 ioapic->irr_delivered &= ~mask;
 211                 if (old_irr == ioapic->irr) {
 212                         ret = 0;
 213                         goto out;
 214                 }
 215         }
 216 
 217         ret = ioapic_service(ioapic, irq, line_status);
 218 
 219 out:
 220         trace_kvm_ioapic_set_irq(entry.bits, irq, ret == 0);
 221         return ret;
 222 }
 223 
 224 static void kvm_ioapic_inject_all(struct kvm_ioapic *ioapic, unsigned long irr)
 225 {
 226         u32 idx;
 227 
 228         rtc_irq_eoi_tracking_reset(ioapic);
 229         for_each_set_bit(idx, &irr, IOAPIC_NUM_PINS)
 230                 ioapic_set_irq(ioapic, idx, 1, true);
 231 
 232         kvm_rtc_eoi_tracking_restore_all(ioapic);
 233 }
 234 
 235 
 236 void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
 237 {
 238         struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
 239         struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
 240         union kvm_ioapic_redirect_entry *e;
 241         int index;
 242 
 243         spin_lock(&ioapic->lock);
 244 
 245         /* Make sure we see any missing RTC EOI */
 246         if (test_bit(vcpu->vcpu_id, dest_map->map))
 247                 __set_bit(dest_map->vectors[vcpu->vcpu_id],
 248                           ioapic_handled_vectors);
 249 
 250         for (index = 0; index < IOAPIC_NUM_PINS; index++) {
 251                 e = &ioapic->redirtbl[index];
 252                 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
 253                     kvm_irq_has_notifier(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index) ||
 254                     index == RTC_GSI) {
 255                         if (kvm_apic_match_dest(vcpu, NULL, 0,
 256                                      e->fields.dest_id, e->fields.dest_mode) ||
 257                             kvm_apic_pending_eoi(vcpu, e->fields.vector))
 258                                 __set_bit(e->fields.vector,
 259                                           ioapic_handled_vectors);
 260                 }
 261         }
 262         spin_unlock(&ioapic->lock);
 263 }
 264 
 265 void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
 266 {
 267         if (!ioapic_in_kernel(kvm))
 268                 return;
 269         kvm_make_scan_ioapic_request(kvm);
 270 }
 271 
 272 static void ioapic_write_indirect(struct kvm_ioapic *ioapic, u32 val)
 273 {
 274         unsigned index;
 275         bool mask_before, mask_after;
 276         int old_remote_irr, old_delivery_status;
 277         union kvm_ioapic_redirect_entry *e;
 278 
 279         switch (ioapic->ioregsel) {
 280         case IOAPIC_REG_VERSION:
 281                 /* Writes are ignored. */
 282                 break;
 283 
 284         case IOAPIC_REG_APIC_ID:
 285                 ioapic->id = (val >> 24) & 0xf;
 286                 break;
 287 
 288         case IOAPIC_REG_ARB_ID:
 289                 break;
 290 
 291         default:
 292                 index = (ioapic->ioregsel - 0x10) >> 1;
 293 
 294                 if (index >= IOAPIC_NUM_PINS)
 295                         return;
 296                 index = array_index_nospec(index, IOAPIC_NUM_PINS);
 297                 e = &ioapic->redirtbl[index];
 298                 mask_before = e->fields.mask;
 299                 /* Preserve read-only fields */
 300                 old_remote_irr = e->fields.remote_irr;
 301                 old_delivery_status = e->fields.delivery_status;
 302                 if (ioapic->ioregsel & 1) {
 303                         e->bits &= 0xffffffff;
 304                         e->bits |= (u64) val << 32;
 305                 } else {
 306                         e->bits &= ~0xffffffffULL;
 307                         e->bits |= (u32) val;
 308                 }
 309                 e->fields.remote_irr = old_remote_irr;
 310                 e->fields.delivery_status = old_delivery_status;
 311 
 312                 /*
 313                  * Some OSes (Linux, Xen) assume that Remote IRR bit will
 314                  * be cleared by IOAPIC hardware when the entry is configured
 315                  * as edge-triggered. This behavior is used to simulate an
 316                  * explicit EOI on IOAPICs that don't have the EOI register.
 317                  */
 318                 if (e->fields.trig_mode == IOAPIC_EDGE_TRIG)
 319                         e->fields.remote_irr = 0;
 320 
 321                 mask_after = e->fields.mask;
 322                 if (mask_before != mask_after)
 323                         kvm_fire_mask_notifiers(ioapic->kvm, KVM_IRQCHIP_IOAPIC, index, mask_after);
 324                 if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG
 325                     && ioapic->irr & (1 << index))
 326                         ioapic_service(ioapic, index, false);
 327                 kvm_make_scan_ioapic_request(ioapic->kvm);
 328                 break;
 329         }
 330 }
 331 
 332 static int ioapic_service(struct kvm_ioapic *ioapic, int irq, bool line_status)
 333 {
 334         union kvm_ioapic_redirect_entry *entry = &ioapic->redirtbl[irq];
 335         struct kvm_lapic_irq irqe;
 336         int ret;
 337 
 338         if (entry->fields.mask ||
 339             (entry->fields.trig_mode == IOAPIC_LEVEL_TRIG &&
 340             entry->fields.remote_irr))
 341                 return -1;
 342 
 343         irqe.dest_id = entry->fields.dest_id;
 344         irqe.vector = entry->fields.vector;
 345         irqe.dest_mode = entry->fields.dest_mode;
 346         irqe.trig_mode = entry->fields.trig_mode;
 347         irqe.delivery_mode = entry->fields.delivery_mode << 8;
 348         irqe.level = 1;
 349         irqe.shorthand = 0;
 350         irqe.msi_redir_hint = false;
 351 
 352         if (irqe.trig_mode == IOAPIC_EDGE_TRIG)
 353                 ioapic->irr_delivered |= 1 << irq;
 354 
 355         if (irq == RTC_GSI && line_status) {
 356                 /*
 357                  * pending_eoi cannot ever become negative (see
 358                  * rtc_status_pending_eoi_check_valid) and the caller
 359                  * ensures that it is only called if it is >= zero, namely
 360                  * if rtc_irq_check_coalesced returns false).
 361                  */
 362                 BUG_ON(ioapic->rtc_status.pending_eoi != 0);
 363                 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
 364                                                &ioapic->rtc_status.dest_map);
 365                 ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
 366         } else
 367                 ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
 368 
 369         if (ret && irqe.trig_mode == IOAPIC_LEVEL_TRIG)
 370                 entry->fields.remote_irr = 1;
 371 
 372         return ret;
 373 }
 374 
 375 int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int irq_source_id,
 376                        int level, bool line_status)
 377 {
 378         int ret, irq_level;
 379 
 380         BUG_ON(irq < 0 || irq >= IOAPIC_NUM_PINS);
 381 
 382         spin_lock(&ioapic->lock);
 383         irq_level = __kvm_irq_line_state(&ioapic->irq_states[irq],
 384                                          irq_source_id, level);
 385         ret = ioapic_set_irq(ioapic, irq, irq_level, line_status);
 386 
 387         spin_unlock(&ioapic->lock);
 388 
 389         return ret;
 390 }
 391 
 392 void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id)
 393 {
 394         int i;
 395 
 396         spin_lock(&ioapic->lock);
 397         for (i = 0; i < KVM_IOAPIC_NUM_PINS; i++)
 398                 __clear_bit(irq_source_id, &ioapic->irq_states[i]);
 399         spin_unlock(&ioapic->lock);
 400 }
 401 
 402 static void kvm_ioapic_eoi_inject_work(struct work_struct *work)
 403 {
 404         int i;
 405         struct kvm_ioapic *ioapic = container_of(work, struct kvm_ioapic,
 406                                                  eoi_inject.work);
 407         spin_lock(&ioapic->lock);
 408         for (i = 0; i < IOAPIC_NUM_PINS; i++) {
 409                 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
 410 
 411                 if (ent->fields.trig_mode != IOAPIC_LEVEL_TRIG)
 412                         continue;
 413 
 414                 if (ioapic->irr & (1 << i) && !ent->fields.remote_irr)
 415                         ioapic_service(ioapic, i, false);
 416         }
 417         spin_unlock(&ioapic->lock);
 418 }
 419 
 420 #define IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT 10000
 421 
 422 static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
 423                         struct kvm_ioapic *ioapic, int vector, int trigger_mode)
 424 {
 425         struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
 426         struct kvm_lapic *apic = vcpu->arch.apic;
 427         int i;
 428 
 429         /* RTC special handling */
 430         if (test_bit(vcpu->vcpu_id, dest_map->map) &&
 431             vector == dest_map->vectors[vcpu->vcpu_id])
 432                 rtc_irq_eoi(ioapic, vcpu);
 433 
 434         for (i = 0; i < IOAPIC_NUM_PINS; i++) {
 435                 union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
 436 
 437                 if (ent->fields.vector != vector)
 438                         continue;
 439 
 440                 /*
 441                  * We are dropping lock while calling ack notifiers because ack
 442                  * notifier callbacks for assigned devices call into IOAPIC
 443                  * recursively. Since remote_irr is cleared only after call
 444                  * to notifiers if the same vector will be delivered while lock
 445                  * is dropped it will be put into irr and will be delivered
 446                  * after ack notifier returns.
 447                  */
 448                 spin_unlock(&ioapic->lock);
 449                 kvm_notify_acked_irq(ioapic->kvm, KVM_IRQCHIP_IOAPIC, i);
 450                 spin_lock(&ioapic->lock);
 451 
 452                 if (trigger_mode != IOAPIC_LEVEL_TRIG ||
 453                     kvm_lapic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)
 454                         continue;
 455 
 456                 ASSERT(ent->fields.trig_mode == IOAPIC_LEVEL_TRIG);
 457                 ent->fields.remote_irr = 0;
 458                 if (!ent->fields.mask && (ioapic->irr & (1 << i))) {
 459                         ++ioapic->irq_eoi[i];
 460                         if (ioapic->irq_eoi[i] == IOAPIC_SUCCESSIVE_IRQ_MAX_COUNT) {
 461                                 /*
 462                                  * Real hardware does not deliver the interrupt
 463                                  * immediately during eoi broadcast, and this
 464                                  * lets a buggy guest make slow progress
 465                                  * even if it does not correctly handle a
 466                                  * level-triggered interrupt.  Emulate this
 467                                  * behavior if we detect an interrupt storm.
 468                                  */
 469                                 schedule_delayed_work(&ioapic->eoi_inject, HZ / 100);
 470                                 ioapic->irq_eoi[i] = 0;
 471                                 trace_kvm_ioapic_delayed_eoi_inj(ent->bits);
 472                         } else {
 473                                 ioapic_service(ioapic, i, false);
 474                         }
 475                 } else {
 476                         ioapic->irq_eoi[i] = 0;
 477                 }
 478         }
 479 }
 480 
 481 void kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu, int vector, int trigger_mode)
 482 {
 483         struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
 484 
 485         spin_lock(&ioapic->lock);
 486         __kvm_ioapic_update_eoi(vcpu, ioapic, vector, trigger_mode);
 487         spin_unlock(&ioapic->lock);
 488 }
 489 
 490 static inline struct kvm_ioapic *to_ioapic(struct kvm_io_device *dev)
 491 {
 492         return container_of(dev, struct kvm_ioapic, dev);
 493 }
 494 
 495 static inline int ioapic_in_range(struct kvm_ioapic *ioapic, gpa_t addr)
 496 {
 497         return ((addr >= ioapic->base_address &&
 498                  (addr < ioapic->base_address + IOAPIC_MEM_LENGTH)));
 499 }
 500 
 501 static int ioapic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
 502                                 gpa_t addr, int len, void *val)
 503 {
 504         struct kvm_ioapic *ioapic = to_ioapic(this);
 505         u32 result;
 506         if (!ioapic_in_range(ioapic, addr))
 507                 return -EOPNOTSUPP;
 508 
 509         ASSERT(!(addr & 0xf));  /* check alignment */
 510 
 511         addr &= 0xff;
 512         spin_lock(&ioapic->lock);
 513         switch (addr) {
 514         case IOAPIC_REG_SELECT:
 515                 result = ioapic->ioregsel;
 516                 break;
 517 
 518         case IOAPIC_REG_WINDOW:
 519                 result = ioapic_read_indirect(ioapic, addr, len);
 520                 break;
 521 
 522         default:
 523                 result = 0;
 524                 break;
 525         }
 526         spin_unlock(&ioapic->lock);
 527 
 528         switch (len) {
 529         case 8:
 530                 *(u64 *) val = result;
 531                 break;
 532         case 1:
 533         case 2:
 534         case 4:
 535                 memcpy(val, (char *)&result, len);
 536                 break;
 537         default:
 538                 printk(KERN_WARNING "ioapic: wrong length %d\n", len);
 539         }
 540         return 0;
 541 }
 542 
 543 static int ioapic_mmio_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
 544                                  gpa_t addr, int len, const void *val)
 545 {
 546         struct kvm_ioapic *ioapic = to_ioapic(this);
 547         u32 data;
 548         if (!ioapic_in_range(ioapic, addr))
 549                 return -EOPNOTSUPP;
 550 
 551         ASSERT(!(addr & 0xf));  /* check alignment */
 552 
 553         switch (len) {
 554         case 8:
 555         case 4:
 556                 data = *(u32 *) val;
 557                 break;
 558         case 2:
 559                 data = *(u16 *) val;
 560                 break;
 561         case 1:
 562                 data = *(u8  *) val;
 563                 break;
 564         default:
 565                 printk(KERN_WARNING "ioapic: Unsupported size %d\n", len);
 566                 return 0;
 567         }
 568 
 569         addr &= 0xff;
 570         spin_lock(&ioapic->lock);
 571         switch (addr) {
 572         case IOAPIC_REG_SELECT:
 573                 ioapic->ioregsel = data & 0xFF; /* 8-bit register */
 574                 break;
 575 
 576         case IOAPIC_REG_WINDOW:
 577                 ioapic_write_indirect(ioapic, data);
 578                 break;
 579 
 580         default:
 581                 break;
 582         }
 583         spin_unlock(&ioapic->lock);
 584         return 0;
 585 }
 586 
 587 static void kvm_ioapic_reset(struct kvm_ioapic *ioapic)
 588 {
 589         int i;
 590 
 591         cancel_delayed_work_sync(&ioapic->eoi_inject);
 592         for (i = 0; i < IOAPIC_NUM_PINS; i++)
 593                 ioapic->redirtbl[i].fields.mask = 1;
 594         ioapic->base_address = IOAPIC_DEFAULT_BASE_ADDRESS;
 595         ioapic->ioregsel = 0;
 596         ioapic->irr = 0;
 597         ioapic->irr_delivered = 0;
 598         ioapic->id = 0;
 599         memset(ioapic->irq_eoi, 0x00, sizeof(ioapic->irq_eoi));
 600         rtc_irq_eoi_tracking_reset(ioapic);
 601 }
 602 
 603 static const struct kvm_io_device_ops ioapic_mmio_ops = {
 604         .read     = ioapic_mmio_read,
 605         .write    = ioapic_mmio_write,
 606 };
 607 
 608 int kvm_ioapic_init(struct kvm *kvm)
 609 {
 610         struct kvm_ioapic *ioapic;
 611         int ret;
 612 
 613         ioapic = kzalloc(sizeof(struct kvm_ioapic), GFP_KERNEL_ACCOUNT);
 614         if (!ioapic)
 615                 return -ENOMEM;
 616         spin_lock_init(&ioapic->lock);
 617         INIT_DELAYED_WORK(&ioapic->eoi_inject, kvm_ioapic_eoi_inject_work);
 618         kvm->arch.vioapic = ioapic;
 619         kvm_ioapic_reset(ioapic);
 620         kvm_iodevice_init(&ioapic->dev, &ioapic_mmio_ops);
 621         ioapic->kvm = kvm;
 622         mutex_lock(&kvm->slots_lock);
 623         ret = kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, ioapic->base_address,
 624                                       IOAPIC_MEM_LENGTH, &ioapic->dev);
 625         mutex_unlock(&kvm->slots_lock);
 626         if (ret < 0) {
 627                 kvm->arch.vioapic = NULL;
 628                 kfree(ioapic);
 629         }
 630 
 631         return ret;
 632 }
 633 
 634 void kvm_ioapic_destroy(struct kvm *kvm)
 635 {
 636         struct kvm_ioapic *ioapic = kvm->arch.vioapic;
 637 
 638         if (!ioapic)
 639                 return;
 640 
 641         cancel_delayed_work_sync(&ioapic->eoi_inject);
 642         mutex_lock(&kvm->slots_lock);
 643         kvm_io_bus_unregister_dev(kvm, KVM_MMIO_BUS, &ioapic->dev);
 644         mutex_unlock(&kvm->slots_lock);
 645         kvm->arch.vioapic = NULL;
 646         kfree(ioapic);
 647 }
 648 
 649 void kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
 650 {
 651         struct kvm_ioapic *ioapic = kvm->arch.vioapic;
 652 
 653         spin_lock(&ioapic->lock);
 654         memcpy(state, ioapic, sizeof(struct kvm_ioapic_state));
 655         state->irr &= ~ioapic->irr_delivered;
 656         spin_unlock(&ioapic->lock);
 657 }
 658 
 659 void kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state)
 660 {
 661         struct kvm_ioapic *ioapic = kvm->arch.vioapic;
 662 
 663         spin_lock(&ioapic->lock);
 664         memcpy(ioapic, state, sizeof(struct kvm_ioapic_state));
 665         ioapic->irr = 0;
 666         ioapic->irr_delivered = 0;
 667         kvm_make_scan_ioapic_request(kvm);
 668         kvm_ioapic_inject_all(ioapic, state->irr);
 669         spin_unlock(&ioapic->lock);
 670 }

/* [<][>][^][v][top][bottom][index][help] */