root/arch/s390/kvm/priv.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. handle_ri
  2. kvm_s390_handle_aa
  3. handle_gs
  4. kvm_s390_handle_e3
  5. handle_set_clock
  6. handle_set_prefix
  7. handle_store_prefix
  8. handle_store_cpu_address
  9. kvm_s390_skey_check_enable
  10. try_handle_skey
  11. handle_iske
  12. handle_rrbe
  13. handle_sske
  14. handle_ipte_interlock
  15. handle_test_block
  16. handle_tpi
  17. handle_tsch
  18. handle_io_inst
  19. handle_pqap
  20. handle_stfl
  21. is_valid_psw
  22. kvm_s390_handle_lpsw
  23. handle_lpswe
  24. handle_stidp
  25. handle_stsi_3_2_2
  26. insert_stsi_usr_data
  27. handle_stsi
  28. kvm_s390_handle_b2
  29. handle_epsw
  30. handle_pfmf
  31. __do_essa
  32. handle_essa
  33. kvm_s390_handle_b9
  34. kvm_s390_handle_lctl
  35. kvm_s390_handle_stctl
  36. handle_lctlg
  37. handle_stctg
  38. kvm_s390_handle_eb
  39. handle_tprot
  40. kvm_s390_handle_e5
  41. handle_sckpf
  42. handle_ptff
  43. kvm_s390_handle_01

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  * handling privileged instructions
   4  *
   5  * Copyright IBM Corp. 2008, 2018
   6  *
   7  *    Author(s): Carsten Otte <cotte@de.ibm.com>
   8  *               Christian Borntraeger <borntraeger@de.ibm.com>
   9  */
  10 
  11 #include <linux/kvm.h>
  12 #include <linux/gfp.h>
  13 #include <linux/errno.h>
  14 #include <linux/compat.h>
  15 #include <linux/mm_types.h>
  16 
  17 #include <asm/asm-offsets.h>
  18 #include <asm/facility.h>
  19 #include <asm/current.h>
  20 #include <asm/debug.h>
  21 #include <asm/ebcdic.h>
  22 #include <asm/sysinfo.h>
  23 #include <asm/pgtable.h>
  24 #include <asm/page-states.h>
  25 #include <asm/pgalloc.h>
  26 #include <asm/gmap.h>
  27 #include <asm/io.h>
  28 #include <asm/ptrace.h>
  29 #include <asm/sclp.h>
  30 #include <asm/ap.h>
  31 #include "gaccess.h"
  32 #include "kvm-s390.h"
  33 #include "trace.h"
  34 
  35 static int handle_ri(struct kvm_vcpu *vcpu)
  36 {
  37         vcpu->stat.instruction_ri++;
  38 
  39         if (test_kvm_facility(vcpu->kvm, 64)) {
  40                 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
  41                 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
  42                 kvm_s390_retry_instr(vcpu);
  43                 return 0;
  44         } else
  45                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  46 }
  47 
  48 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
  49 {
  50         if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
  51                 return handle_ri(vcpu);
  52         else
  53                 return -EOPNOTSUPP;
  54 }
  55 
  56 static int handle_gs(struct kvm_vcpu *vcpu)
  57 {
  58         vcpu->stat.instruction_gs++;
  59 
  60         if (test_kvm_facility(vcpu->kvm, 133)) {
  61                 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
  62                 preempt_disable();
  63                 __ctl_set_bit(2, 4);
  64                 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
  65                 restore_gs_cb(current->thread.gs_cb);
  66                 preempt_enable();
  67                 vcpu->arch.sie_block->ecb |= ECB_GS;
  68                 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
  69                 vcpu->arch.gs_enabled = 1;
  70                 kvm_s390_retry_instr(vcpu);
  71                 return 0;
  72         } else
  73                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
  74 }
  75 
  76 int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
  77 {
  78         int code = vcpu->arch.sie_block->ipb & 0xff;
  79 
  80         if (code == 0x49 || code == 0x4d)
  81                 return handle_gs(vcpu);
  82         else
  83                 return -EOPNOTSUPP;
  84 }
  85 /* Handle SCK (SET CLOCK) interception */
  86 static int handle_set_clock(struct kvm_vcpu *vcpu)
  87 {
  88         struct kvm_s390_vm_tod_clock gtod = { 0 };
  89         int rc;
  90         u8 ar;
  91         u64 op2;
  92 
  93         vcpu->stat.instruction_sck++;
  94 
  95         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
  96                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
  97 
  98         op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
  99         if (op2 & 7)    /* Operand must be on a doubleword boundary */
 100                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 101         rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
 102         if (rc)
 103                 return kvm_s390_inject_prog_cond(vcpu, rc);
 104 
 105         VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
 106         kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
 107 
 108         kvm_s390_set_psw_cc(vcpu, 0);
 109         return 0;
 110 }
 111 
 112 static int handle_set_prefix(struct kvm_vcpu *vcpu)
 113 {
 114         u64 operand2;
 115         u32 address;
 116         int rc;
 117         u8 ar;
 118 
 119         vcpu->stat.instruction_spx++;
 120 
 121         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 122                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 123 
 124         operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
 125 
 126         /* must be word boundary */
 127         if (operand2 & 3)
 128                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 129 
 130         /* get the value */
 131         rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
 132         if (rc)
 133                 return kvm_s390_inject_prog_cond(vcpu, rc);
 134 
 135         address &= 0x7fffe000u;
 136 
 137         /*
 138          * Make sure the new value is valid memory. We only need to check the
 139          * first page, since address is 8k aligned and memory pieces are always
 140          * at least 1MB aligned and have at least a size of 1MB.
 141          */
 142         if (kvm_is_error_gpa(vcpu->kvm, address))
 143                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 144 
 145         kvm_s390_set_prefix(vcpu, address);
 146         trace_kvm_s390_handle_prefix(vcpu, 1, address);
 147         return 0;
 148 }
 149 
 150 static int handle_store_prefix(struct kvm_vcpu *vcpu)
 151 {
 152         u64 operand2;
 153         u32 address;
 154         int rc;
 155         u8 ar;
 156 
 157         vcpu->stat.instruction_stpx++;
 158 
 159         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 160                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 161 
 162         operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
 163 
 164         /* must be word boundary */
 165         if (operand2 & 3)
 166                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 167 
 168         address = kvm_s390_get_prefix(vcpu);
 169 
 170         /* get the value */
 171         rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
 172         if (rc)
 173                 return kvm_s390_inject_prog_cond(vcpu, rc);
 174 
 175         VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
 176         trace_kvm_s390_handle_prefix(vcpu, 0, address);
 177         return 0;
 178 }
 179 
 180 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
 181 {
 182         u16 vcpu_id = vcpu->vcpu_id;
 183         u64 ga;
 184         int rc;
 185         u8 ar;
 186 
 187         vcpu->stat.instruction_stap++;
 188 
 189         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 190                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 191 
 192         ga = kvm_s390_get_base_disp_s(vcpu, &ar);
 193 
 194         if (ga & 1)
 195                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 196 
 197         rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
 198         if (rc)
 199                 return kvm_s390_inject_prog_cond(vcpu, rc);
 200 
 201         VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
 202         trace_kvm_s390_handle_stap(vcpu, ga);
 203         return 0;
 204 }
 205 
 206 int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
 207 {
 208         int rc;
 209 
 210         trace_kvm_s390_skey_related_inst(vcpu);
 211         /* Already enabled? */
 212         if (vcpu->arch.skey_enabled)
 213                 return 0;
 214 
 215         rc = s390_enable_skey();
 216         VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
 217         if (rc)
 218                 return rc;
 219 
 220         if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
 221                 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
 222         if (!vcpu->kvm->arch.use_skf)
 223                 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
 224         else
 225                 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
 226         vcpu->arch.skey_enabled = true;
 227         return 0;
 228 }
 229 
 230 static int try_handle_skey(struct kvm_vcpu *vcpu)
 231 {
 232         int rc;
 233 
 234         rc = kvm_s390_skey_check_enable(vcpu);
 235         if (rc)
 236                 return rc;
 237         if (vcpu->kvm->arch.use_skf) {
 238                 /* with storage-key facility, SIE interprets it for us */
 239                 kvm_s390_retry_instr(vcpu);
 240                 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
 241                 return -EAGAIN;
 242         }
 243         return 0;
 244 }
 245 
 246 static int handle_iske(struct kvm_vcpu *vcpu)
 247 {
 248         unsigned long gaddr, vmaddr;
 249         unsigned char key;
 250         int reg1, reg2;
 251         bool unlocked;
 252         int rc;
 253 
 254         vcpu->stat.instruction_iske++;
 255 
 256         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 257                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 258 
 259         rc = try_handle_skey(vcpu);
 260         if (rc)
 261                 return rc != -EAGAIN ? rc : 0;
 262 
 263         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
 264 
 265         gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
 266         gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
 267         gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
 268         vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
 269         if (kvm_is_error_hva(vmaddr))
 270                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 271 retry:
 272         unlocked = false;
 273         down_read(&current->mm->mmap_sem);
 274         rc = get_guest_storage_key(current->mm, vmaddr, &key);
 275 
 276         if (rc) {
 277                 rc = fixup_user_fault(current, current->mm, vmaddr,
 278                                       FAULT_FLAG_WRITE, &unlocked);
 279                 if (!rc) {
 280                         up_read(&current->mm->mmap_sem);
 281                         goto retry;
 282                 }
 283         }
 284         up_read(&current->mm->mmap_sem);
 285         if (rc == -EFAULT)
 286                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 287         if (rc < 0)
 288                 return rc;
 289         vcpu->run->s.regs.gprs[reg1] &= ~0xff;
 290         vcpu->run->s.regs.gprs[reg1] |= key;
 291         return 0;
 292 }
 293 
 294 static int handle_rrbe(struct kvm_vcpu *vcpu)
 295 {
 296         unsigned long vmaddr, gaddr;
 297         int reg1, reg2;
 298         bool unlocked;
 299         int rc;
 300 
 301         vcpu->stat.instruction_rrbe++;
 302 
 303         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 304                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 305 
 306         rc = try_handle_skey(vcpu);
 307         if (rc)
 308                 return rc != -EAGAIN ? rc : 0;
 309 
 310         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
 311 
 312         gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
 313         gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
 314         gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
 315         vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
 316         if (kvm_is_error_hva(vmaddr))
 317                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 318 retry:
 319         unlocked = false;
 320         down_read(&current->mm->mmap_sem);
 321         rc = reset_guest_reference_bit(current->mm, vmaddr);
 322         if (rc < 0) {
 323                 rc = fixup_user_fault(current, current->mm, vmaddr,
 324                                       FAULT_FLAG_WRITE, &unlocked);
 325                 if (!rc) {
 326                         up_read(&current->mm->mmap_sem);
 327                         goto retry;
 328                 }
 329         }
 330         up_read(&current->mm->mmap_sem);
 331         if (rc == -EFAULT)
 332                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 333         if (rc < 0)
 334                 return rc;
 335         kvm_s390_set_psw_cc(vcpu, rc);
 336         return 0;
 337 }
 338 
 339 #define SSKE_NQ 0x8
 340 #define SSKE_MR 0x4
 341 #define SSKE_MC 0x2
 342 #define SSKE_MB 0x1
 343 static int handle_sske(struct kvm_vcpu *vcpu)
 344 {
 345         unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
 346         unsigned long start, end;
 347         unsigned char key, oldkey;
 348         int reg1, reg2;
 349         bool unlocked;
 350         int rc;
 351 
 352         vcpu->stat.instruction_sske++;
 353 
 354         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 355                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 356 
 357         rc = try_handle_skey(vcpu);
 358         if (rc)
 359                 return rc != -EAGAIN ? rc : 0;
 360 
 361         if (!test_kvm_facility(vcpu->kvm, 8))
 362                 m3 &= ~SSKE_MB;
 363         if (!test_kvm_facility(vcpu->kvm, 10))
 364                 m3 &= ~(SSKE_MC | SSKE_MR);
 365         if (!test_kvm_facility(vcpu->kvm, 14))
 366                 m3 &= ~SSKE_NQ;
 367 
 368         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
 369 
 370         key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
 371         start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
 372         start = kvm_s390_logical_to_effective(vcpu, start);
 373         if (m3 & SSKE_MB) {
 374                 /* start already designates an absolute address */
 375                 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
 376         } else {
 377                 start = kvm_s390_real_to_abs(vcpu, start);
 378                 end = start + PAGE_SIZE;
 379         }
 380 
 381         while (start != end) {
 382                 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
 383                 unlocked = false;
 384 
 385                 if (kvm_is_error_hva(vmaddr))
 386                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 387 
 388                 down_read(&current->mm->mmap_sem);
 389                 rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
 390                                                 m3 & SSKE_NQ, m3 & SSKE_MR,
 391                                                 m3 & SSKE_MC);
 392 
 393                 if (rc < 0) {
 394                         rc = fixup_user_fault(current, current->mm, vmaddr,
 395                                               FAULT_FLAG_WRITE, &unlocked);
 396                         rc = !rc ? -EAGAIN : rc;
 397                 }
 398                 up_read(&current->mm->mmap_sem);
 399                 if (rc == -EFAULT)
 400                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 401                 if (rc < 0)
 402                         return rc;
 403                 start += PAGE_SIZE;
 404         }
 405 
 406         if (m3 & (SSKE_MC | SSKE_MR)) {
 407                 if (m3 & SSKE_MB) {
 408                         /* skey in reg1 is unpredictable */
 409                         kvm_s390_set_psw_cc(vcpu, 3);
 410                 } else {
 411                         kvm_s390_set_psw_cc(vcpu, rc);
 412                         vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
 413                         vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
 414                 }
 415         }
 416         if (m3 & SSKE_MB) {
 417                 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
 418                         vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
 419                 else
 420                         vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
 421                 end = kvm_s390_logical_to_effective(vcpu, end);
 422                 vcpu->run->s.regs.gprs[reg2] |= end;
 423         }
 424         return 0;
 425 }
 426 
 427 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
 428 {
 429         vcpu->stat.instruction_ipte_interlock++;
 430         if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
 431                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 432         wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
 433         kvm_s390_retry_instr(vcpu);
 434         VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
 435         return 0;
 436 }
 437 
 438 static int handle_test_block(struct kvm_vcpu *vcpu)
 439 {
 440         gpa_t addr;
 441         int reg2;
 442 
 443         vcpu->stat.instruction_tb++;
 444 
 445         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 446                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 447 
 448         kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
 449         addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
 450         addr = kvm_s390_logical_to_effective(vcpu, addr);
 451         if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
 452                 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
 453         addr = kvm_s390_real_to_abs(vcpu, addr);
 454 
 455         if (kvm_is_error_gpa(vcpu->kvm, addr))
 456                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
 457         /*
 458          * We don't expect errors on modern systems, and do not care
 459          * about storage keys (yet), so let's just clear the page.
 460          */
 461         if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
 462                 return -EFAULT;
 463         kvm_s390_set_psw_cc(vcpu, 0);
 464         vcpu->run->s.regs.gprs[0] = 0;
 465         return 0;
 466 }
 467 
 468 static int handle_tpi(struct kvm_vcpu *vcpu)
 469 {
 470         struct kvm_s390_interrupt_info *inti;
 471         unsigned long len;
 472         u32 tpi_data[3];
 473         int rc;
 474         u64 addr;
 475         u8 ar;
 476 
 477         vcpu->stat.instruction_tpi++;
 478 
 479         addr = kvm_s390_get_base_disp_s(vcpu, &ar);
 480         if (addr & 3)
 481                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 482 
 483         inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
 484         if (!inti) {
 485                 kvm_s390_set_psw_cc(vcpu, 0);
 486                 return 0;
 487         }
 488 
 489         tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
 490         tpi_data[1] = inti->io.io_int_parm;
 491         tpi_data[2] = inti->io.io_int_word;
 492         if (addr) {
 493                 /*
 494                  * Store the two-word I/O interruption code into the
 495                  * provided area.
 496                  */
 497                 len = sizeof(tpi_data) - 4;
 498                 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
 499                 if (rc) {
 500                         rc = kvm_s390_inject_prog_cond(vcpu, rc);
 501                         goto reinject_interrupt;
 502                 }
 503         } else {
 504                 /*
 505                  * Store the three-word I/O interruption code into
 506                  * the appropriate lowcore area.
 507                  */
 508                 len = sizeof(tpi_data);
 509                 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
 510                         /* failed writes to the low core are not recoverable */
 511                         rc = -EFAULT;
 512                         goto reinject_interrupt;
 513                 }
 514         }
 515 
 516         /* irq was successfully handed to the guest */
 517         kfree(inti);
 518         kvm_s390_set_psw_cc(vcpu, 1);
 519         return 0;
 520 reinject_interrupt:
 521         /*
 522          * If we encounter a problem storing the interruption code, the
 523          * instruction is suppressed from the guest's view: reinject the
 524          * interrupt.
 525          */
 526         if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
 527                 kfree(inti);
 528                 rc = -EFAULT;
 529         }
 530         /* don't set the cc, a pgm irq was injected or we drop to user space */
 531         return rc ? -EFAULT : 0;
 532 }
 533 
 534 static int handle_tsch(struct kvm_vcpu *vcpu)
 535 {
 536         struct kvm_s390_interrupt_info *inti = NULL;
 537         const u64 isc_mask = 0xffUL << 24; /* all iscs set */
 538 
 539         vcpu->stat.instruction_tsch++;
 540 
 541         /* a valid schid has at least one bit set */
 542         if (vcpu->run->s.regs.gprs[1])
 543                 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
 544                                            vcpu->run->s.regs.gprs[1]);
 545 
 546         /*
 547          * Prepare exit to userspace.
 548          * We indicate whether we dequeued a pending I/O interrupt
 549          * so that userspace can re-inject it if the instruction gets
 550          * a program check. While this may re-order the pending I/O
 551          * interrupts, this is no problem since the priority is kept
 552          * intact.
 553          */
 554         vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
 555         vcpu->run->s390_tsch.dequeued = !!inti;
 556         if (inti) {
 557                 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
 558                 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
 559                 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
 560                 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
 561         }
 562         vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
 563         kfree(inti);
 564         return -EREMOTE;
 565 }
 566 
 567 static int handle_io_inst(struct kvm_vcpu *vcpu)
 568 {
 569         VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
 570 
 571         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 572                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 573 
 574         if (vcpu->kvm->arch.css_support) {
 575                 /*
 576                  * Most I/O instructions will be handled by userspace.
 577                  * Exceptions are tpi and the interrupt portion of tsch.
 578                  */
 579                 if (vcpu->arch.sie_block->ipa == 0xb236)
 580                         return handle_tpi(vcpu);
 581                 if (vcpu->arch.sie_block->ipa == 0xb235)
 582                         return handle_tsch(vcpu);
 583                 /* Handle in userspace. */
 584                 vcpu->stat.instruction_io_other++;
 585                 return -EOPNOTSUPP;
 586         } else {
 587                 /*
 588                  * Set condition code 3 to stop the guest from issuing channel
 589                  * I/O instructions.
 590                  */
 591                 kvm_s390_set_psw_cc(vcpu, 3);
 592                 return 0;
 593         }
 594 }
 595 
 596 /*
 597  * handle_pqap: Handling pqap interception
 598  * @vcpu: the vcpu having issue the pqap instruction
 599  *
 600  * We now support PQAP/AQIC instructions and we need to correctly
 601  * answer the guest even if no dedicated driver's hook is available.
 602  *
 603  * The intercepting code calls a dedicated callback for this instruction
 604  * if a driver did register one in the CRYPTO satellite of the
 605  * SIE block.
 606  *
 607  * If no callback is available, the queues are not available, return this
 608  * response code to the caller and set CC to 3.
 609  * Else return the response code returned by the callback.
 610  */
 611 static int handle_pqap(struct kvm_vcpu *vcpu)
 612 {
 613         struct ap_queue_status status = {};
 614         unsigned long reg0;
 615         int ret;
 616         uint8_t fc;
 617 
 618         /* Verify that the AP instruction are available */
 619         if (!ap_instructions_available())
 620                 return -EOPNOTSUPP;
 621         /* Verify that the guest is allowed to use AP instructions */
 622         if (!(vcpu->arch.sie_block->eca & ECA_APIE))
 623                 return -EOPNOTSUPP;
 624         /*
 625          * The only possibly intercepted functions when AP instructions are
 626          * available for the guest are AQIC and TAPQ with the t bit set
 627          * since we do not set IC.3 (FIII) we currently will only intercept
 628          * the AQIC function code.
 629          * Note: running nested under z/VM can result in intercepts for other
 630          * function codes, e.g. PQAP(QCI). We do not support this and bail out.
 631          */
 632         reg0 = vcpu->run->s.regs.gprs[0];
 633         fc = (reg0 >> 24) & 0xff;
 634         if (fc != 0x03)
 635                 return -EOPNOTSUPP;
 636 
 637         /* PQAP instruction is allowed for guest kernel only */
 638         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 639                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 640 
 641         /* Common PQAP instruction specification exceptions */
 642         /* bits 41-47 must all be zeros */
 643         if (reg0 & 0x007f0000UL)
 644                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 645         /* APFT not install and T bit set */
 646         if (!test_kvm_facility(vcpu->kvm, 15) && (reg0 & 0x00800000UL))
 647                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 648         /* APXA not installed and APID greater 64 or APQI greater 16 */
 649         if (!(vcpu->kvm->arch.crypto.crycbd & 0x02) && (reg0 & 0x0000c0f0UL))
 650                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 651 
 652         /* AQIC function code specific exception */
 653         /* facility 65 not present for AQIC function code */
 654         if (!test_kvm_facility(vcpu->kvm, 65))
 655                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 656 
 657         /*
 658          * Verify that the hook callback is registered, lock the owner
 659          * and call the hook.
 660          */
 661         if (vcpu->kvm->arch.crypto.pqap_hook) {
 662                 if (!try_module_get(vcpu->kvm->arch.crypto.pqap_hook->owner))
 663                         return -EOPNOTSUPP;
 664                 ret = vcpu->kvm->arch.crypto.pqap_hook->hook(vcpu);
 665                 module_put(vcpu->kvm->arch.crypto.pqap_hook->owner);
 666                 if (!ret && vcpu->run->s.regs.gprs[1] & 0x00ff0000)
 667                         kvm_s390_set_psw_cc(vcpu, 3);
 668                 return ret;
 669         }
 670         /*
 671          * A vfio_driver must register a hook.
 672          * No hook means no driver to enable the SIE CRYCB and no queues.
 673          * We send this response to the guest.
 674          */
 675         status.response_code = 0x01;
 676         memcpy(&vcpu->run->s.regs.gprs[1], &status, sizeof(status));
 677         kvm_s390_set_psw_cc(vcpu, 3);
 678         return 0;
 679 }
 680 
 681 static int handle_stfl(struct kvm_vcpu *vcpu)
 682 {
 683         int rc;
 684         unsigned int fac;
 685 
 686         vcpu->stat.instruction_stfl++;
 687 
 688         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 689                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 690 
 691         /*
 692          * We need to shift the lower 32 facility bits (bit 0-31) from a u64
 693          * into a u32 memory representation. They will remain bits 0-31.
 694          */
 695         fac = *vcpu->kvm->arch.model.fac_list >> 32;
 696         rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
 697                             &fac, sizeof(fac));
 698         if (rc)
 699                 return rc;
 700         VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
 701         trace_kvm_s390_handle_stfl(vcpu, fac);
 702         return 0;
 703 }
 704 
 705 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
 706 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
 707 #define PSW_ADDR_24 0x0000000000ffffffUL
 708 #define PSW_ADDR_31 0x000000007fffffffUL
 709 
 710 int is_valid_psw(psw_t *psw)
 711 {
 712         if (psw->mask & PSW_MASK_UNASSIGNED)
 713                 return 0;
 714         if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
 715                 if (psw->addr & ~PSW_ADDR_31)
 716                         return 0;
 717         }
 718         if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
 719                 return 0;
 720         if ((psw->mask & PSW_MASK_ADDR_MODE) ==  PSW_MASK_EA)
 721                 return 0;
 722         if (psw->addr & 1)
 723                 return 0;
 724         return 1;
 725 }
 726 
 727 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
 728 {
 729         psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
 730         psw_compat_t new_psw;
 731         u64 addr;
 732         int rc;
 733         u8 ar;
 734 
 735         vcpu->stat.instruction_lpsw++;
 736 
 737         if (gpsw->mask & PSW_MASK_PSTATE)
 738                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 739 
 740         addr = kvm_s390_get_base_disp_s(vcpu, &ar);
 741         if (addr & 7)
 742                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 743 
 744         rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
 745         if (rc)
 746                 return kvm_s390_inject_prog_cond(vcpu, rc);
 747         if (!(new_psw.mask & PSW32_MASK_BASE))
 748                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 749         gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
 750         gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
 751         gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
 752         if (!is_valid_psw(gpsw))
 753                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 754         return 0;
 755 }
 756 
 757 static int handle_lpswe(struct kvm_vcpu *vcpu)
 758 {
 759         psw_t new_psw;
 760         u64 addr;
 761         int rc;
 762         u8 ar;
 763 
 764         vcpu->stat.instruction_lpswe++;
 765 
 766         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 767                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 768 
 769         addr = kvm_s390_get_base_disp_s(vcpu, &ar);
 770         if (addr & 7)
 771                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 772         rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
 773         if (rc)
 774                 return kvm_s390_inject_prog_cond(vcpu, rc);
 775         vcpu->arch.sie_block->gpsw = new_psw;
 776         if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
 777                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 778         return 0;
 779 }
 780 
 781 static int handle_stidp(struct kvm_vcpu *vcpu)
 782 {
 783         u64 stidp_data = vcpu->kvm->arch.model.cpuid;
 784         u64 operand2;
 785         int rc;
 786         u8 ar;
 787 
 788         vcpu->stat.instruction_stidp++;
 789 
 790         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 791                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 792 
 793         operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
 794 
 795         if (operand2 & 7)
 796                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 797 
 798         rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
 799         if (rc)
 800                 return kvm_s390_inject_prog_cond(vcpu, rc);
 801 
 802         VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
 803         return 0;
 804 }
 805 
 806 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
 807 {
 808         int cpus = 0;
 809         int n;
 810 
 811         cpus = atomic_read(&vcpu->kvm->online_vcpus);
 812 
 813         /* deal with other level 3 hypervisors */
 814         if (stsi(mem, 3, 2, 2))
 815                 mem->count = 0;
 816         if (mem->count < 8)
 817                 mem->count++;
 818         for (n = mem->count - 1; n > 0 ; n--)
 819                 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
 820 
 821         memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
 822         mem->vm[0].cpus_total = cpus;
 823         mem->vm[0].cpus_configured = cpus;
 824         mem->vm[0].cpus_standby = 0;
 825         mem->vm[0].cpus_reserved = 0;
 826         mem->vm[0].caf = 1000;
 827         memcpy(mem->vm[0].name, "KVMguest", 8);
 828         ASCEBC(mem->vm[0].name, 8);
 829         memcpy(mem->vm[0].cpi, "KVM/Linux       ", 16);
 830         ASCEBC(mem->vm[0].cpi, 16);
 831 }
 832 
 833 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
 834                                  u8 fc, u8 sel1, u16 sel2)
 835 {
 836         vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
 837         vcpu->run->s390_stsi.addr = addr;
 838         vcpu->run->s390_stsi.ar = ar;
 839         vcpu->run->s390_stsi.fc = fc;
 840         vcpu->run->s390_stsi.sel1 = sel1;
 841         vcpu->run->s390_stsi.sel2 = sel2;
 842 }
 843 
 844 static int handle_stsi(struct kvm_vcpu *vcpu)
 845 {
 846         int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
 847         int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
 848         int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
 849         unsigned long mem = 0;
 850         u64 operand2;
 851         int rc = 0;
 852         u8 ar;
 853 
 854         vcpu->stat.instruction_stsi++;
 855         VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
 856 
 857         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
 858                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
 859 
 860         if (fc > 3) {
 861                 kvm_s390_set_psw_cc(vcpu, 3);
 862                 return 0;
 863         }
 864 
 865         if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
 866             || vcpu->run->s.regs.gprs[1] & 0xffff0000)
 867                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 868 
 869         if (fc == 0) {
 870                 vcpu->run->s.regs.gprs[0] = 3 << 28;
 871                 kvm_s390_set_psw_cc(vcpu, 0);
 872                 return 0;
 873         }
 874 
 875         operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
 876 
 877         if (operand2 & 0xfff)
 878                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
 879 
 880         switch (fc) {
 881         case 1: /* same handling for 1 and 2 */
 882         case 2:
 883                 mem = get_zeroed_page(GFP_KERNEL);
 884                 if (!mem)
 885                         goto out_no_data;
 886                 if (stsi((void *) mem, fc, sel1, sel2))
 887                         goto out_no_data;
 888                 break;
 889         case 3:
 890                 if (sel1 != 2 || sel2 != 2)
 891                         goto out_no_data;
 892                 mem = get_zeroed_page(GFP_KERNEL);
 893                 if (!mem)
 894                         goto out_no_data;
 895                 handle_stsi_3_2_2(vcpu, (void *) mem);
 896                 break;
 897         }
 898 
 899         rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
 900         if (rc) {
 901                 rc = kvm_s390_inject_prog_cond(vcpu, rc);
 902                 goto out;
 903         }
 904         if (vcpu->kvm->arch.user_stsi) {
 905                 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
 906                 rc = -EREMOTE;
 907         }
 908         trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
 909         free_page(mem);
 910         kvm_s390_set_psw_cc(vcpu, 0);
 911         vcpu->run->s.regs.gprs[0] = 0;
 912         return rc;
 913 out_no_data:
 914         kvm_s390_set_psw_cc(vcpu, 3);
 915 out:
 916         free_page(mem);
 917         return rc;
 918 }
 919 
 920 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
 921 {
 922         switch (vcpu->arch.sie_block->ipa & 0x00ff) {
 923         case 0x02:
 924                 return handle_stidp(vcpu);
 925         case 0x04:
 926                 return handle_set_clock(vcpu);
 927         case 0x10:
 928                 return handle_set_prefix(vcpu);
 929         case 0x11:
 930                 return handle_store_prefix(vcpu);
 931         case 0x12:
 932                 return handle_store_cpu_address(vcpu);
 933         case 0x14:
 934                 return kvm_s390_handle_vsie(vcpu);
 935         case 0x21:
 936         case 0x50:
 937                 return handle_ipte_interlock(vcpu);
 938         case 0x29:
 939                 return handle_iske(vcpu);
 940         case 0x2a:
 941                 return handle_rrbe(vcpu);
 942         case 0x2b:
 943                 return handle_sske(vcpu);
 944         case 0x2c:
 945                 return handle_test_block(vcpu);
 946         case 0x30:
 947         case 0x31:
 948         case 0x32:
 949         case 0x33:
 950         case 0x34:
 951         case 0x35:
 952         case 0x36:
 953         case 0x37:
 954         case 0x38:
 955         case 0x39:
 956         case 0x3a:
 957         case 0x3b:
 958         case 0x3c:
 959         case 0x5f:
 960         case 0x74:
 961         case 0x76:
 962                 return handle_io_inst(vcpu);
 963         case 0x56:
 964                 return handle_sthyi(vcpu);
 965         case 0x7d:
 966                 return handle_stsi(vcpu);
 967         case 0xaf:
 968                 return handle_pqap(vcpu);
 969         case 0xb1:
 970                 return handle_stfl(vcpu);
 971         case 0xb2:
 972                 return handle_lpswe(vcpu);
 973         default:
 974                 return -EOPNOTSUPP;
 975         }
 976 }
 977 
 978 static int handle_epsw(struct kvm_vcpu *vcpu)
 979 {
 980         int reg1, reg2;
 981 
 982         vcpu->stat.instruction_epsw++;
 983 
 984         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
 985 
 986         /* This basically extracts the mask half of the psw. */
 987         vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
 988         vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
 989         if (reg2) {
 990                 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
 991                 vcpu->run->s.regs.gprs[reg2] |=
 992                         vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
 993         }
 994         return 0;
 995 }
 996 
 997 #define PFMF_RESERVED   0xfffc0101UL
 998 #define PFMF_SK         0x00020000UL
 999 #define PFMF_CF         0x00010000UL
1000 #define PFMF_UI         0x00008000UL
1001 #define PFMF_FSC        0x00007000UL
1002 #define PFMF_NQ         0x00000800UL
1003 #define PFMF_MR         0x00000400UL
1004 #define PFMF_MC         0x00000200UL
1005 #define PFMF_KEY        0x000000feUL
1006 
1007 static int handle_pfmf(struct kvm_vcpu *vcpu)
1008 {
1009         bool mr = false, mc = false, nq;
1010         int reg1, reg2;
1011         unsigned long start, end;
1012         unsigned char key;
1013 
1014         vcpu->stat.instruction_pfmf++;
1015 
1016         kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
1017 
1018         if (!test_kvm_facility(vcpu->kvm, 8))
1019                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1020 
1021         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1022                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1023 
1024         if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
1025                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1026 
1027         /* Only provide non-quiescing support if enabled for the guest */
1028         if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
1029             !test_kvm_facility(vcpu->kvm, 14))
1030                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1031 
1032         /* Only provide conditional-SSKE support if enabled for the guest */
1033         if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
1034             test_kvm_facility(vcpu->kvm, 10)) {
1035                 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
1036                 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
1037         }
1038 
1039         nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
1040         key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
1041         start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
1042         start = kvm_s390_logical_to_effective(vcpu, start);
1043 
1044         if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1045                 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
1046                         return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
1047         }
1048 
1049         switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1050         case 0x00000000:
1051                 /* only 4k frames specify a real address */
1052                 start = kvm_s390_real_to_abs(vcpu, start);
1053                 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
1054                 break;
1055         case 0x00001000:
1056                 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
1057                 break;
1058         case 0x00002000:
1059                 /* only support 2G frame size if EDAT2 is available and we are
1060                    not in 24-bit addressing mode */
1061                 if (!test_kvm_facility(vcpu->kvm, 78) ||
1062                     psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
1063                         return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1064                 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
1065                 break;
1066         default:
1067                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1068         }
1069 
1070         while (start != end) {
1071                 unsigned long vmaddr;
1072                 bool unlocked = false;
1073 
1074                 /* Translate guest address to host address */
1075                 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
1076                 if (kvm_is_error_hva(vmaddr))
1077                         return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1078 
1079                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
1080                         if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
1081                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1082                 }
1083 
1084                 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
1085                         int rc = kvm_s390_skey_check_enable(vcpu);
1086 
1087                         if (rc)
1088                                 return rc;
1089                         down_read(&current->mm->mmap_sem);
1090                         rc = cond_set_guest_storage_key(current->mm, vmaddr,
1091                                                         key, NULL, nq, mr, mc);
1092                         if (rc < 0) {
1093                                 rc = fixup_user_fault(current, current->mm, vmaddr,
1094                                                       FAULT_FLAG_WRITE, &unlocked);
1095                                 rc = !rc ? -EAGAIN : rc;
1096                         }
1097                         up_read(&current->mm->mmap_sem);
1098                         if (rc == -EFAULT)
1099                                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1100                         if (rc == -EAGAIN)
1101                                 continue;
1102                         if (rc < 0)
1103                                 return rc;
1104                 }
1105                 start += PAGE_SIZE;
1106         }
1107         if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
1108                 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
1109                         vcpu->run->s.regs.gprs[reg2] = end;
1110                 } else {
1111                         vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
1112                         end = kvm_s390_logical_to_effective(vcpu, end);
1113                         vcpu->run->s.regs.gprs[reg2] |= end;
1114                 }
1115         }
1116         return 0;
1117 }
1118 
1119 /*
1120  * Must be called with relevant read locks held (kvm->mm->mmap_sem, kvm->srcu)
1121  */
1122 static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
1123 {
1124         int r1, r2, nappended, entries;
1125         unsigned long gfn, hva, res, pgstev, ptev;
1126         unsigned long *cbrlo;
1127 
1128         /*
1129          * We don't need to set SD.FPF.SK to 1 here, because if we have a
1130          * machine check here we either handle it or crash
1131          */
1132 
1133         kvm_s390_get_regs_rre(vcpu, &r1, &r2);
1134         gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
1135         hva = gfn_to_hva(vcpu->kvm, gfn);
1136         entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1137 
1138         if (kvm_is_error_hva(hva))
1139                 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1140 
1141         nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
1142         if (nappended < 0) {
1143                 res = orc ? 0x10 : 0;
1144                 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
1145                 return 0;
1146         }
1147         res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
1148         /*
1149          * Set the block-content state part of the result. 0 means resident, so
1150          * nothing to do if the page is valid. 2 is for preserved pages
1151          * (non-present and non-zero), and 3 for zero pages (non-present and
1152          * zero).
1153          */
1154         if (ptev & _PAGE_INVALID) {
1155                 res |= 2;
1156                 if (pgstev & _PGSTE_GPS_ZERO)
1157                         res |= 1;
1158         }
1159         if (pgstev & _PGSTE_GPS_NODAT)
1160                 res |= 0x20;
1161         vcpu->run->s.regs.gprs[r1] = res;
1162         /*
1163          * It is possible that all the normal 511 slots were full, in which case
1164          * we will now write in the 512th slot, which is reserved for host use.
1165          * In both cases we let the normal essa handling code process all the
1166          * slots, including the reserved one, if needed.
1167          */
1168         if (nappended > 0) {
1169                 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1170                 cbrlo[entries] = gfn << PAGE_SHIFT;
1171         }
1172 
1173         if (orc) {
1174                 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
1175 
1176                 /* Increment only if we are really flipping the bit */
1177                 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1178                         atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
1179         }
1180 
1181         return nappended;
1182 }
1183 
1184 static int handle_essa(struct kvm_vcpu *vcpu)
1185 {
1186         /* entries expected to be 1FF */
1187         int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1188         unsigned long *cbrlo;
1189         struct gmap *gmap;
1190         int i, orc;
1191 
1192         VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
1193         gmap = vcpu->arch.gmap;
1194         vcpu->stat.instruction_essa++;
1195         if (!vcpu->kvm->arch.use_cmma)
1196                 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1197 
1198         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1199                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1200         /* Check for invalid operation request code */
1201         orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1202         /* ORCs 0-6 are always valid */
1203         if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
1204                                                 : ESSA_SET_STABLE_IF_RESIDENT))
1205                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1206 
1207         if (!vcpu->kvm->arch.migration_mode) {
1208                 /*
1209                  * CMMA is enabled in the KVM settings, but is disabled in
1210                  * the SIE block and in the mm_context, and we are not doing
1211                  * a migration. Enable CMMA in the mm_context.
1212                  * Since we need to take a write lock to write to the context
1213                  * to avoid races with storage keys handling, we check if the
1214                  * value really needs to be written to; if the value is
1215                  * already correct, we do nothing and avoid the lock.
1216                  */
1217                 if (vcpu->kvm->mm->context.uses_cmm == 0) {
1218                         down_write(&vcpu->kvm->mm->mmap_sem);
1219                         vcpu->kvm->mm->context.uses_cmm = 1;
1220                         up_write(&vcpu->kvm->mm->mmap_sem);
1221                 }
1222                 /*
1223                  * If we are here, we are supposed to have CMMA enabled in
1224                  * the SIE block. Enabling CMMA works on a per-CPU basis,
1225                  * while the context use_cmma flag is per process.
1226                  * It's possible that the context flag is enabled and the
1227                  * SIE flag is not, so we set the flag always; if it was
1228                  * already set, nothing changes, otherwise we enable it
1229                  * on this CPU too.
1230                  */
1231                 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1232                 /* Retry the ESSA instruction */
1233                 kvm_s390_retry_instr(vcpu);
1234         } else {
1235                 int srcu_idx;
1236 
1237                 down_read(&vcpu->kvm->mm->mmap_sem);
1238                 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1239                 i = __do_essa(vcpu, orc);
1240                 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1241                 up_read(&vcpu->kvm->mm->mmap_sem);
1242                 if (i < 0)
1243                         return i;
1244                 /* Account for the possible extra cbrl entry */
1245                 entries += i;
1246         }
1247         vcpu->arch.sie_block->cbrlo &= PAGE_MASK;       /* reset nceo */
1248         cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1249         down_read(&gmap->mm->mmap_sem);
1250         for (i = 0; i < entries; ++i)
1251                 __gmap_zap(gmap, cbrlo[i]);
1252         up_read(&gmap->mm->mmap_sem);
1253         return 0;
1254 }
1255 
1256 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1257 {
1258         switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1259         case 0x8a:
1260         case 0x8e:
1261         case 0x8f:
1262                 return handle_ipte_interlock(vcpu);
1263         case 0x8d:
1264                 return handle_epsw(vcpu);
1265         case 0xab:
1266                 return handle_essa(vcpu);
1267         case 0xaf:
1268                 return handle_pfmf(vcpu);
1269         default:
1270                 return -EOPNOTSUPP;
1271         }
1272 }
1273 
1274 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1275 {
1276         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1277         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1278         int reg, rc, nr_regs;
1279         u32 ctl_array[16];
1280         u64 ga;
1281         u8 ar;
1282 
1283         vcpu->stat.instruction_lctl++;
1284 
1285         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1286                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1287 
1288         ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1289 
1290         if (ga & 3)
1291                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1292 
1293         VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1294         trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
1295 
1296         nr_regs = ((reg3 - reg1) & 0xf) + 1;
1297         rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1298         if (rc)
1299                 return kvm_s390_inject_prog_cond(vcpu, rc);
1300         reg = reg1;
1301         nr_regs = 0;
1302         do {
1303                 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1304                 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1305                 if (reg == reg3)
1306                         break;
1307                 reg = (reg + 1) % 16;
1308         } while (1);
1309         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1310         return 0;
1311 }
1312 
1313 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1314 {
1315         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1316         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1317         int reg, rc, nr_regs;
1318         u32 ctl_array[16];
1319         u64 ga;
1320         u8 ar;
1321 
1322         vcpu->stat.instruction_stctl++;
1323 
1324         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1325                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1326 
1327         ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1328 
1329         if (ga & 3)
1330                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1331 
1332         VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1333         trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1334 
1335         reg = reg1;
1336         nr_regs = 0;
1337         do {
1338                 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1339                 if (reg == reg3)
1340                         break;
1341                 reg = (reg + 1) % 16;
1342         } while (1);
1343         rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1344         return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1345 }
1346 
1347 static int handle_lctlg(struct kvm_vcpu *vcpu)
1348 {
1349         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1350         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1351         int reg, rc, nr_regs;
1352         u64 ctl_array[16];
1353         u64 ga;
1354         u8 ar;
1355 
1356         vcpu->stat.instruction_lctlg++;
1357 
1358         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1359                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1360 
1361         ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1362 
1363         if (ga & 7)
1364                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1365 
1366         VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1367         trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1368 
1369         nr_regs = ((reg3 - reg1) & 0xf) + 1;
1370         rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1371         if (rc)
1372                 return kvm_s390_inject_prog_cond(vcpu, rc);
1373         reg = reg1;
1374         nr_regs = 0;
1375         do {
1376                 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1377                 if (reg == reg3)
1378                         break;
1379                 reg = (reg + 1) % 16;
1380         } while (1);
1381         kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1382         return 0;
1383 }
1384 
1385 static int handle_stctg(struct kvm_vcpu *vcpu)
1386 {
1387         int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1388         int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1389         int reg, rc, nr_regs;
1390         u64 ctl_array[16];
1391         u64 ga;
1392         u8 ar;
1393 
1394         vcpu->stat.instruction_stctg++;
1395 
1396         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1397                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1398 
1399         ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1400 
1401         if (ga & 7)
1402                 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1403 
1404         VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1405         trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1406 
1407         reg = reg1;
1408         nr_regs = 0;
1409         do {
1410                 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1411                 if (reg == reg3)
1412                         break;
1413                 reg = (reg + 1) % 16;
1414         } while (1);
1415         rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1416         return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1417 }
1418 
1419 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1420 {
1421         switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
1422         case 0x25:
1423                 return handle_stctg(vcpu);
1424         case 0x2f:
1425                 return handle_lctlg(vcpu);
1426         case 0x60:
1427         case 0x61:
1428         case 0x62:
1429                 return handle_ri(vcpu);
1430         default:
1431                 return -EOPNOTSUPP;
1432         }
1433 }
1434 
1435 static int handle_tprot(struct kvm_vcpu *vcpu)
1436 {
1437         u64 address1, address2;
1438         unsigned long hva, gpa;
1439         int ret = 0, cc = 0;
1440         bool writable;
1441         u8 ar;
1442 
1443         vcpu->stat.instruction_tprot++;
1444 
1445         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1446                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1447 
1448         kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1449 
1450         /* we only handle the Linux memory detection case:
1451          * access key == 0
1452          * everything else goes to userspace. */
1453         if (address2 & 0xf0)
1454                 return -EOPNOTSUPP;
1455         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1456                 ipte_lock(vcpu);
1457         ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1458         if (ret == PGM_PROTECTION) {
1459                 /* Write protected? Try again with read-only... */
1460                 cc = 1;
1461                 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1462                                               GACC_FETCH);
1463         }
1464         if (ret) {
1465                 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1466                         ret = kvm_s390_inject_program_int(vcpu, ret);
1467                 } else if (ret > 0) {
1468                         /* Translation not available */
1469                         kvm_s390_set_psw_cc(vcpu, 3);
1470                         ret = 0;
1471                 }
1472                 goto out_unlock;
1473         }
1474 
1475         hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1476         if (kvm_is_error_hva(hva)) {
1477                 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1478         } else {
1479                 if (!writable)
1480                         cc = 1;         /* Write not permitted ==> read-only */
1481                 kvm_s390_set_psw_cc(vcpu, cc);
1482                 /* Note: CC2 only occurs for storage keys (not supported yet) */
1483         }
1484 out_unlock:
1485         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1486                 ipte_unlock(vcpu);
1487         return ret;
1488 }
1489 
1490 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1491 {
1492         switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1493         case 0x01:
1494                 return handle_tprot(vcpu);
1495         default:
1496                 return -EOPNOTSUPP;
1497         }
1498 }
1499 
1500 static int handle_sckpf(struct kvm_vcpu *vcpu)
1501 {
1502         u32 value;
1503 
1504         vcpu->stat.instruction_sckpf++;
1505 
1506         if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1507                 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1508 
1509         if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1510                 return kvm_s390_inject_program_int(vcpu,
1511                                                    PGM_SPECIFICATION);
1512 
1513         value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1514         vcpu->arch.sie_block->todpr = value;
1515 
1516         return 0;
1517 }
1518 
1519 static int handle_ptff(struct kvm_vcpu *vcpu)
1520 {
1521         vcpu->stat.instruction_ptff++;
1522 
1523         /* we don't emulate any control instructions yet */
1524         kvm_s390_set_psw_cc(vcpu, 3);
1525         return 0;
1526 }
1527 
1528 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1529 {
1530         switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1531         case 0x04:
1532                 return handle_ptff(vcpu);
1533         case 0x07:
1534                 return handle_sckpf(vcpu);
1535         default:
1536                 return -EOPNOTSUPP;
1537         }
1538 }

/* [<][>][^][v][top][bottom][index][help] */