1/* 2 * CXL Flash Device Driver 3 * 4 * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation 5 * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation 6 * 7 * Copyright (C) 2015 IBM Corporation 8 * 9 * This program is free software; you can redistribute it and/or 10 * modify it under the terms of the GNU General Public License 11 * as published by the Free Software Foundation; either version 12 * 2 of the License, or (at your option) any later version. 13 */ 14 15#include <linux/delay.h> 16#include <linux/file.h> 17#include <linux/syscalls.h> 18#include <misc/cxl.h> 19#include <asm/unaligned.h> 20 21#include <scsi/scsi.h> 22#include <scsi/scsi_host.h> 23#include <scsi/scsi_cmnd.h> 24#include <scsi/scsi_eh.h> 25#include <uapi/scsi/cxlflash_ioctl.h> 26 27#include "sislite.h" 28#include "common.h" 29#include "vlun.h" 30#include "superpipe.h" 31 32struct cxlflash_global global; 33 34/** 35 * marshal_rele_to_resize() - translate release to resize structure 36 * @rele: Source structure from which to translate/copy. 37 * @resize: Destination structure for the translate/copy. 38 */ 39static void marshal_rele_to_resize(struct dk_cxlflash_release *release, 40 struct dk_cxlflash_resize *resize) 41{ 42 resize->hdr = release->hdr; 43 resize->context_id = release->context_id; 44 resize->rsrc_handle = release->rsrc_handle; 45} 46 47/** 48 * marshal_det_to_rele() - translate detach to release structure 49 * @detach: Destination structure for the translate/copy. 50 * @rele: Source structure from which to translate/copy. 51 */ 52static void marshal_det_to_rele(struct dk_cxlflash_detach *detach, 53 struct dk_cxlflash_release *release) 54{ 55 release->hdr = detach->hdr; 56 release->context_id = detach->context_id; 57} 58 59/** 60 * cxlflash_free_errpage() - frees resources associated with global error page 61 */ 62void cxlflash_free_errpage(void) 63{ 64 65 mutex_lock(&global.mutex); 66 if (global.err_page) { 67 __free_page(global.err_page); 68 global.err_page = NULL; 69 } 70 mutex_unlock(&global.mutex); 71} 72 73/** 74 * cxlflash_stop_term_user_contexts() - stops/terminates known user contexts 75 * @cfg: Internal structure associated with the host. 76 * 77 * When the host needs to go down, all users must be quiesced and their 78 * memory freed. This is accomplished by putting the contexts in error 79 * state which will notify the user and let them 'drive' the tear down. 80 * Meanwhile, this routine camps until all user contexts have been removed. 81 */ 82void cxlflash_stop_term_user_contexts(struct cxlflash_cfg *cfg) 83{ 84 struct device *dev = &cfg->dev->dev; 85 int i, found; 86 87 cxlflash_mark_contexts_error(cfg); 88 89 while (true) { 90 found = false; 91 92 for (i = 0; i < MAX_CONTEXT; i++) 93 if (cfg->ctx_tbl[i]) { 94 found = true; 95 break; 96 } 97 98 if (!found && list_empty(&cfg->ctx_err_recovery)) 99 return; 100 101 dev_dbg(dev, "%s: Wait for user contexts to quiesce...\n", 102 __func__); 103 wake_up_all(&cfg->reset_waitq); 104 ssleep(1); 105 } 106} 107 108/** 109 * find_error_context() - locates a context by cookie on the error recovery list 110 * @cfg: Internal structure associated with the host. 111 * @rctxid: Desired context by id. 112 * @file: Desired context by file. 113 * 114 * Return: Found context on success, NULL on failure 115 */ 116static struct ctx_info *find_error_context(struct cxlflash_cfg *cfg, u64 rctxid, 117 struct file *file) 118{ 119 struct ctx_info *ctxi; 120 121 list_for_each_entry(ctxi, &cfg->ctx_err_recovery, list) 122 if ((ctxi->ctxid == rctxid) || (ctxi->file == file)) 123 return ctxi; 124 125 return NULL; 126} 127 128/** 129 * get_context() - obtains a validated and locked context reference 130 * @cfg: Internal structure associated with the host. 131 * @rctxid: Desired context (raw, un-decoded format). 132 * @arg: LUN information or file associated with request. 133 * @ctx_ctrl: Control information to 'steer' desired lookup. 134 * 135 * NOTE: despite the name pid, in linux, current->pid actually refers 136 * to the lightweight process id (tid) and can change if the process is 137 * multi threaded. The tgid remains constant for the process and only changes 138 * when the process of fork. For all intents and purposes, think of tgid 139 * as a pid in the traditional sense. 140 * 141 * Return: Validated context on success, NULL on failure 142 */ 143struct ctx_info *get_context(struct cxlflash_cfg *cfg, u64 rctxid, 144 void *arg, enum ctx_ctrl ctx_ctrl) 145{ 146 struct device *dev = &cfg->dev->dev; 147 struct ctx_info *ctxi = NULL; 148 struct lun_access *lun_access = NULL; 149 struct file *file = NULL; 150 struct llun_info *lli = arg; 151 u64 ctxid = DECODE_CTXID(rctxid); 152 int rc; 153 pid_t pid = current->tgid, ctxpid = 0; 154 155 if (ctx_ctrl & CTX_CTRL_FILE) { 156 lli = NULL; 157 file = (struct file *)arg; 158 } 159 160 if (ctx_ctrl & CTX_CTRL_CLONE) 161 pid = current->parent->tgid; 162 163 if (likely(ctxid < MAX_CONTEXT)) { 164 while (true) { 165 mutex_lock(&cfg->ctx_tbl_list_mutex); 166 ctxi = cfg->ctx_tbl[ctxid]; 167 if (ctxi) 168 if ((file && (ctxi->file != file)) || 169 (!file && (ctxi->ctxid != rctxid))) 170 ctxi = NULL; 171 172 if ((ctx_ctrl & CTX_CTRL_ERR) || 173 (!ctxi && (ctx_ctrl & CTX_CTRL_ERR_FALLBACK))) 174 ctxi = find_error_context(cfg, rctxid, file); 175 if (!ctxi) { 176 mutex_unlock(&cfg->ctx_tbl_list_mutex); 177 goto out; 178 } 179 180 /* 181 * Need to acquire ownership of the context while still 182 * under the table/list lock to serialize with a remove 183 * thread. Use the 'try' to avoid stalling the 184 * table/list lock for a single context. 185 * 186 * Note that the lock order is: 187 * 188 * cfg->ctx_tbl_list_mutex -> ctxi->mutex 189 * 190 * Therefore release ctx_tbl_list_mutex before retrying. 191 */ 192 rc = mutex_trylock(&ctxi->mutex); 193 mutex_unlock(&cfg->ctx_tbl_list_mutex); 194 if (rc) 195 break; /* got the context's lock! */ 196 } 197 198 if (ctxi->unavail) 199 goto denied; 200 201 ctxpid = ctxi->pid; 202 if (likely(!(ctx_ctrl & CTX_CTRL_NOPID))) 203 if (pid != ctxpid) 204 goto denied; 205 206 if (lli) { 207 list_for_each_entry(lun_access, &ctxi->luns, list) 208 if (lun_access->lli == lli) 209 goto out; 210 goto denied; 211 } 212 } 213 214out: 215 dev_dbg(dev, "%s: rctxid=%016llX ctxinfo=%p ctxpid=%u pid=%u " 216 "ctx_ctrl=%u\n", __func__, rctxid, ctxi, ctxpid, pid, 217 ctx_ctrl); 218 219 return ctxi; 220 221denied: 222 mutex_unlock(&ctxi->mutex); 223 ctxi = NULL; 224 goto out; 225} 226 227/** 228 * put_context() - release a context that was retrieved from get_context() 229 * @ctxi: Context to release. 230 * 231 * For now, releasing the context equates to unlocking it's mutex. 232 */ 233void put_context(struct ctx_info *ctxi) 234{ 235 mutex_unlock(&ctxi->mutex); 236} 237 238/** 239 * afu_attach() - attach a context to the AFU 240 * @cfg: Internal structure associated with the host. 241 * @ctxi: Context to attach. 242 * 243 * Upon setting the context capabilities, they must be confirmed with 244 * a read back operation as the context might have been closed since 245 * the mailbox was unlocked. When this occurs, registration is failed. 246 * 247 * Return: 0 on success, -errno on failure 248 */ 249static int afu_attach(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) 250{ 251 struct device *dev = &cfg->dev->dev; 252 struct afu *afu = cfg->afu; 253 struct sisl_ctrl_map __iomem *ctrl_map = ctxi->ctrl_map; 254 int rc = 0; 255 u64 val; 256 257 /* Unlock cap and restrict user to read/write cmds in translated mode */ 258 readq_be(&ctrl_map->mbox_r); 259 val = (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD); 260 writeq_be(val, &ctrl_map->ctx_cap); 261 val = readq_be(&ctrl_map->ctx_cap); 262 if (val != (SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD)) { 263 dev_err(dev, "%s: ctx may be closed val=%016llX\n", 264 __func__, val); 265 rc = -EAGAIN; 266 goto out; 267 } 268 269 /* Set up MMIO registers pointing to the RHT */ 270 writeq_be((u64)ctxi->rht_start, &ctrl_map->rht_start); 271 val = SISL_RHT_CNT_ID((u64)MAX_RHT_PER_CONTEXT, (u64)(afu->ctx_hndl)); 272 writeq_be(val, &ctrl_map->rht_cnt_id); 273out: 274 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 275 return rc; 276} 277 278/** 279 * read_cap16() - issues a SCSI READ_CAP16 command 280 * @sdev: SCSI device associated with LUN. 281 * @lli: LUN destined for capacity request. 282 * 283 * The READ_CAP16 can take quite a while to complete. Should an EEH occur while 284 * in scsi_execute(), the EEH handler will attempt to recover. As part of the 285 * recovery, the handler drains all currently running ioctls, waiting until they 286 * have completed before proceeding with a reset. As this routine is used on the 287 * ioctl path, this can create a condition where the EEH handler becomes stuck, 288 * infinitely waiting for this ioctl thread. To avoid this behavior, temporarily 289 * unmark this thread as an ioctl thread by releasing the ioctl read semaphore. 290 * This will allow the EEH handler to proceed with a recovery while this thread 291 * is still running. Once the scsi_execute() returns, reacquire the ioctl read 292 * semaphore and check the adapter state in case it changed while inside of 293 * scsi_execute(). The state check will wait if the adapter is still being 294 * recovered or return a failure if the recovery failed. In the event that the 295 * adapter reset failed, simply return the failure as the ioctl would be unable 296 * to continue. 297 * 298 * Note that the above puts a requirement on this routine to only be called on 299 * an ioctl thread. 300 * 301 * Return: 0 on success, -errno on failure 302 */ 303static int read_cap16(struct scsi_device *sdev, struct llun_info *lli) 304{ 305 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 306 struct device *dev = &cfg->dev->dev; 307 struct glun_info *gli = lli->parent; 308 u8 *cmd_buf = NULL; 309 u8 *scsi_cmd = NULL; 310 u8 *sense_buf = NULL; 311 int rc = 0; 312 int result = 0; 313 int retry_cnt = 0; 314 u32 to = CMD_TIMEOUT * HZ; 315 316retry: 317 cmd_buf = kzalloc(CMD_BUFSIZE, GFP_KERNEL); 318 scsi_cmd = kzalloc(MAX_COMMAND_SIZE, GFP_KERNEL); 319 sense_buf = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL); 320 if (unlikely(!cmd_buf || !scsi_cmd || !sense_buf)) { 321 rc = -ENOMEM; 322 goto out; 323 } 324 325 scsi_cmd[0] = SERVICE_ACTION_IN_16; /* read cap(16) */ 326 scsi_cmd[1] = SAI_READ_CAPACITY_16; /* service action */ 327 put_unaligned_be32(CMD_BUFSIZE, &scsi_cmd[10]); 328 329 dev_dbg(dev, "%s: %ssending cmd(0x%x)\n", __func__, 330 retry_cnt ? "re" : "", scsi_cmd[0]); 331 332 /* Drop the ioctl read semahpore across lengthy call */ 333 up_read(&cfg->ioctl_rwsem); 334 result = scsi_execute(sdev, scsi_cmd, DMA_FROM_DEVICE, cmd_buf, 335 CMD_BUFSIZE, sense_buf, to, CMD_RETRIES, 0, NULL); 336 down_read(&cfg->ioctl_rwsem); 337 rc = check_state(cfg); 338 if (rc) { 339 dev_err(dev, "%s: Failed state! result=0x08%X\n", 340 __func__, result); 341 rc = -ENODEV; 342 goto out; 343 } 344 345 if (driver_byte(result) == DRIVER_SENSE) { 346 result &= ~(0xFF<<24); /* DRIVER_SENSE is not an error */ 347 if (result & SAM_STAT_CHECK_CONDITION) { 348 struct scsi_sense_hdr sshdr; 349 350 scsi_normalize_sense(sense_buf, SCSI_SENSE_BUFFERSIZE, 351 &sshdr); 352 switch (sshdr.sense_key) { 353 case NO_SENSE: 354 case RECOVERED_ERROR: 355 /* fall through */ 356 case NOT_READY: 357 result &= ~SAM_STAT_CHECK_CONDITION; 358 break; 359 case UNIT_ATTENTION: 360 switch (sshdr.asc) { 361 case 0x29: /* Power on Reset or Device Reset */ 362 /* fall through */ 363 case 0x2A: /* Device capacity changed */ 364 case 0x3F: /* Report LUNs changed */ 365 /* Retry the command once more */ 366 if (retry_cnt++ < 1) { 367 kfree(cmd_buf); 368 kfree(scsi_cmd); 369 kfree(sense_buf); 370 goto retry; 371 } 372 } 373 break; 374 default: 375 break; 376 } 377 } 378 } 379 380 if (result) { 381 dev_err(dev, "%s: command failed, result=0x%x\n", 382 __func__, result); 383 rc = -EIO; 384 goto out; 385 } 386 387 /* 388 * Read cap was successful, grab values from the buffer; 389 * note that we don't need to worry about unaligned access 390 * as the buffer is allocated on an aligned boundary. 391 */ 392 mutex_lock(&gli->mutex); 393 gli->max_lba = be64_to_cpu(*((__be64 *)&cmd_buf[0])); 394 gli->blk_len = be32_to_cpu(*((__be32 *)&cmd_buf[8])); 395 mutex_unlock(&gli->mutex); 396 397out: 398 kfree(cmd_buf); 399 kfree(scsi_cmd); 400 kfree(sense_buf); 401 402 dev_dbg(dev, "%s: maxlba=%lld blklen=%d rc=%d\n", 403 __func__, gli->max_lba, gli->blk_len, rc); 404 return rc; 405} 406 407/** 408 * get_rhte() - obtains validated resource handle table entry reference 409 * @ctxi: Context owning the resource handle. 410 * @rhndl: Resource handle associated with entry. 411 * @lli: LUN associated with request. 412 * 413 * Return: Validated RHTE on success, NULL on failure 414 */ 415struct sisl_rht_entry *get_rhte(struct ctx_info *ctxi, res_hndl_t rhndl, 416 struct llun_info *lli) 417{ 418 struct sisl_rht_entry *rhte = NULL; 419 420 if (unlikely(!ctxi->rht_start)) { 421 pr_debug("%s: Context does not have allocated RHT!\n", 422 __func__); 423 goto out; 424 } 425 426 if (unlikely(rhndl >= MAX_RHT_PER_CONTEXT)) { 427 pr_debug("%s: Bad resource handle! (%d)\n", __func__, rhndl); 428 goto out; 429 } 430 431 if (unlikely(ctxi->rht_lun[rhndl] != lli)) { 432 pr_debug("%s: Bad resource handle LUN! (%d)\n", 433 __func__, rhndl); 434 goto out; 435 } 436 437 rhte = &ctxi->rht_start[rhndl]; 438 if (unlikely(rhte->nmask == 0)) { 439 pr_debug("%s: Unopened resource handle! (%d)\n", 440 __func__, rhndl); 441 rhte = NULL; 442 goto out; 443 } 444 445out: 446 return rhte; 447} 448 449/** 450 * rhte_checkout() - obtains free/empty resource handle table entry 451 * @ctxi: Context owning the resource handle. 452 * @lli: LUN associated with request. 453 * 454 * Return: Free RHTE on success, NULL on failure 455 */ 456struct sisl_rht_entry *rhte_checkout(struct ctx_info *ctxi, 457 struct llun_info *lli) 458{ 459 struct sisl_rht_entry *rhte = NULL; 460 int i; 461 462 /* Find a free RHT entry */ 463 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) 464 if (ctxi->rht_start[i].nmask == 0) { 465 rhte = &ctxi->rht_start[i]; 466 ctxi->rht_out++; 467 break; 468 } 469 470 if (likely(rhte)) 471 ctxi->rht_lun[i] = lli; 472 473 pr_debug("%s: returning rhte=%p (%d)\n", __func__, rhte, i); 474 return rhte; 475} 476 477/** 478 * rhte_checkin() - releases a resource handle table entry 479 * @ctxi: Context owning the resource handle. 480 * @rhte: RHTE to release. 481 */ 482void rhte_checkin(struct ctx_info *ctxi, 483 struct sisl_rht_entry *rhte) 484{ 485 u32 rsrc_handle = rhte - ctxi->rht_start; 486 487 rhte->nmask = 0; 488 rhte->fp = 0; 489 ctxi->rht_out--; 490 ctxi->rht_lun[rsrc_handle] = NULL; 491 ctxi->rht_needs_ws[rsrc_handle] = false; 492} 493 494/** 495 * rhte_format1() - populates a RHTE for format 1 496 * @rhte: RHTE to populate. 497 * @lun_id: LUN ID of LUN associated with RHTE. 498 * @perm: Desired permissions for RHTE. 499 * @port_sel: Port selection mask 500 */ 501static void rht_format1(struct sisl_rht_entry *rhte, u64 lun_id, u32 perm, 502 u32 port_sel) 503{ 504 /* 505 * Populate the Format 1 RHT entry for direct access (physical 506 * LUN) using the synchronization sequence defined in the 507 * SISLite specification. 508 */ 509 struct sisl_rht_entry_f1 dummy = { 0 }; 510 struct sisl_rht_entry_f1 *rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; 511 512 memset(rhte_f1, 0, sizeof(*rhte_f1)); 513 rhte_f1->fp = SISL_RHT_FP(1U, 0); 514 dma_wmb(); /* Make setting of format bit visible */ 515 516 rhte_f1->lun_id = lun_id; 517 dma_wmb(); /* Make setting of LUN id visible */ 518 519 /* 520 * Use a dummy RHT Format 1 entry to build the second dword 521 * of the entry that must be populated in a single write when 522 * enabled (valid bit set to TRUE). 523 */ 524 dummy.valid = 0x80; 525 dummy.fp = SISL_RHT_FP(1U, perm); 526 dummy.port_sel = port_sel; 527 rhte_f1->dw = dummy.dw; 528 529 dma_wmb(); /* Make remaining RHT entry fields visible */ 530} 531 532/** 533 * cxlflash_lun_attach() - attaches a user to a LUN and manages the LUN's mode 534 * @gli: LUN to attach. 535 * @mode: Desired mode of the LUN. 536 * @locked: Mutex status on current thread. 537 * 538 * Return: 0 on success, -errno on failure 539 */ 540int cxlflash_lun_attach(struct glun_info *gli, enum lun_mode mode, bool locked) 541{ 542 int rc = 0; 543 544 if (!locked) 545 mutex_lock(&gli->mutex); 546 547 if (gli->mode == MODE_NONE) 548 gli->mode = mode; 549 else if (gli->mode != mode) { 550 pr_debug("%s: LUN operating in mode %d, requested mode %d\n", 551 __func__, gli->mode, mode); 552 rc = -EINVAL; 553 goto out; 554 } 555 556 gli->users++; 557 WARN_ON(gli->users <= 0); 558out: 559 pr_debug("%s: Returning rc=%d gli->mode=%u gli->users=%u\n", 560 __func__, rc, gli->mode, gli->users); 561 if (!locked) 562 mutex_unlock(&gli->mutex); 563 return rc; 564} 565 566/** 567 * cxlflash_lun_detach() - detaches a user from a LUN and resets the LUN's mode 568 * @gli: LUN to detach. 569 * 570 * When resetting the mode, terminate block allocation resources as they 571 * are no longer required (service is safe to call even when block allocation 572 * resources were not present - such as when transitioning from physical mode). 573 * These resources will be reallocated when needed (subsequent transition to 574 * virtual mode). 575 */ 576void cxlflash_lun_detach(struct glun_info *gli) 577{ 578 mutex_lock(&gli->mutex); 579 WARN_ON(gli->mode == MODE_NONE); 580 if (--gli->users == 0) { 581 gli->mode = MODE_NONE; 582 cxlflash_ba_terminate(&gli->blka.ba_lun); 583 } 584 pr_debug("%s: gli->users=%u\n", __func__, gli->users); 585 WARN_ON(gli->users < 0); 586 mutex_unlock(&gli->mutex); 587} 588 589/** 590 * _cxlflash_disk_release() - releases the specified resource entry 591 * @sdev: SCSI device associated with LUN. 592 * @ctxi: Context owning resources. 593 * @release: Release ioctl data structure. 594 * 595 * For LUNs in virtual mode, the virtual LUN associated with the specified 596 * resource handle is resized to 0 prior to releasing the RHTE. Note that the 597 * AFU sync should _not_ be performed when the context is sitting on the error 598 * recovery list. A context on the error recovery list is not known to the AFU 599 * due to reset. When the context is recovered, it will be reattached and made 600 * known again to the AFU. 601 * 602 * Return: 0 on success, -errno on failure 603 */ 604int _cxlflash_disk_release(struct scsi_device *sdev, 605 struct ctx_info *ctxi, 606 struct dk_cxlflash_release *release) 607{ 608 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 609 struct device *dev = &cfg->dev->dev; 610 struct llun_info *lli = sdev->hostdata; 611 struct glun_info *gli = lli->parent; 612 struct afu *afu = cfg->afu; 613 bool put_ctx = false; 614 615 struct dk_cxlflash_resize size; 616 res_hndl_t rhndl = release->rsrc_handle; 617 618 int rc = 0; 619 u64 ctxid = DECODE_CTXID(release->context_id), 620 rctxid = release->context_id; 621 622 struct sisl_rht_entry *rhte; 623 struct sisl_rht_entry_f1 *rhte_f1; 624 625 dev_dbg(dev, "%s: ctxid=%llu rhndl=0x%llx gli->mode=%u gli->users=%u\n", 626 __func__, ctxid, release->rsrc_handle, gli->mode, gli->users); 627 628 if (!ctxi) { 629 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 630 if (unlikely(!ctxi)) { 631 dev_dbg(dev, "%s: Bad context! (%llu)\n", 632 __func__, ctxid); 633 rc = -EINVAL; 634 goto out; 635 } 636 637 put_ctx = true; 638 } 639 640 rhte = get_rhte(ctxi, rhndl, lli); 641 if (unlikely(!rhte)) { 642 dev_dbg(dev, "%s: Bad resource handle! (%d)\n", 643 __func__, rhndl); 644 rc = -EINVAL; 645 goto out; 646 } 647 648 /* 649 * Resize to 0 for virtual LUNS by setting the size 650 * to 0. This will clear LXT_START and LXT_CNT fields 651 * in the RHT entry and properly sync with the AFU. 652 * 653 * Afterwards we clear the remaining fields. 654 */ 655 switch (gli->mode) { 656 case MODE_VIRTUAL: 657 marshal_rele_to_resize(release, &size); 658 size.req_size = 0; 659 rc = _cxlflash_vlun_resize(sdev, ctxi, &size); 660 if (rc) { 661 dev_dbg(dev, "%s: resize failed rc %d\n", __func__, rc); 662 goto out; 663 } 664 665 break; 666 case MODE_PHYSICAL: 667 /* 668 * Clear the Format 1 RHT entry for direct access 669 * (physical LUN) using the synchronization sequence 670 * defined in the SISLite specification. 671 */ 672 rhte_f1 = (struct sisl_rht_entry_f1 *)rhte; 673 674 rhte_f1->valid = 0; 675 dma_wmb(); /* Make revocation of RHT entry visible */ 676 677 rhte_f1->lun_id = 0; 678 dma_wmb(); /* Make clearing of LUN id visible */ 679 680 rhte_f1->dw = 0; 681 dma_wmb(); /* Make RHT entry bottom-half clearing visible */ 682 683 if (!ctxi->err_recovery_active) 684 cxlflash_afu_sync(afu, ctxid, rhndl, AFU_HW_SYNC); 685 break; 686 default: 687 WARN(1, "Unsupported LUN mode!"); 688 goto out; 689 } 690 691 rhte_checkin(ctxi, rhte); 692 cxlflash_lun_detach(gli); 693 694out: 695 if (put_ctx) 696 put_context(ctxi); 697 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 698 return rc; 699} 700 701int cxlflash_disk_release(struct scsi_device *sdev, 702 struct dk_cxlflash_release *release) 703{ 704 return _cxlflash_disk_release(sdev, NULL, release); 705} 706 707/** 708 * destroy_context() - releases a context 709 * @cfg: Internal structure associated with the host. 710 * @ctxi: Context to release. 711 * 712 * Note that the rht_lun member of the context was cut from a single 713 * allocation when the context was created and therefore does not need 714 * to be explicitly freed. Also note that we conditionally check for the 715 * existence of the context control map before clearing the RHT registers 716 * and context capabilities because it is possible to destroy a context 717 * while the context is in the error state (previous mapping was removed 718 * [so we don't have to worry about clearing] and context is waiting for 719 * a new mapping). 720 */ 721static void destroy_context(struct cxlflash_cfg *cfg, 722 struct ctx_info *ctxi) 723{ 724 struct afu *afu = cfg->afu; 725 726 WARN_ON(!list_empty(&ctxi->luns)); 727 728 /* Clear RHT registers and drop all capabilities for this context */ 729 if (afu->afu_map && ctxi->ctrl_map) { 730 writeq_be(0, &ctxi->ctrl_map->rht_start); 731 writeq_be(0, &ctxi->ctrl_map->rht_cnt_id); 732 writeq_be(0, &ctxi->ctrl_map->ctx_cap); 733 } 734 735 /* Free memory associated with context */ 736 free_page((ulong)ctxi->rht_start); 737 kfree(ctxi->rht_needs_ws); 738 kfree(ctxi->rht_lun); 739 kfree(ctxi); 740} 741 742/** 743 * create_context() - allocates and initializes a context 744 * @cfg: Internal structure associated with the host. 745 * @ctx: Previously obtained CXL context reference. 746 * @ctxid: Previously obtained process element associated with CXL context. 747 * @adap_fd: Previously obtained adapter fd associated with CXL context. 748 * @file: Previously obtained file associated with CXL context. 749 * @perms: User-specified permissions. 750 * 751 * The context's mutex is locked when an allocated context is returned. 752 * 753 * Return: Allocated context on success, NULL on failure 754 */ 755static struct ctx_info *create_context(struct cxlflash_cfg *cfg, 756 struct cxl_context *ctx, int ctxid, 757 int adap_fd, struct file *file, 758 u32 perms) 759{ 760 struct device *dev = &cfg->dev->dev; 761 struct afu *afu = cfg->afu; 762 struct ctx_info *ctxi = NULL; 763 struct llun_info **lli = NULL; 764 u8 *ws = NULL; 765 struct sisl_rht_entry *rhte; 766 767 ctxi = kzalloc(sizeof(*ctxi), GFP_KERNEL); 768 lli = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*lli)), GFP_KERNEL); 769 ws = kzalloc((MAX_RHT_PER_CONTEXT * sizeof(*ws)), GFP_KERNEL); 770 if (unlikely(!ctxi || !lli || !ws)) { 771 dev_err(dev, "%s: Unable to allocate context!\n", __func__); 772 goto err; 773 } 774 775 rhte = (struct sisl_rht_entry *)get_zeroed_page(GFP_KERNEL); 776 if (unlikely(!rhte)) { 777 dev_err(dev, "%s: Unable to allocate RHT!\n", __func__); 778 goto err; 779 } 780 781 ctxi->rht_lun = lli; 782 ctxi->rht_needs_ws = ws; 783 ctxi->rht_start = rhte; 784 ctxi->rht_perms = perms; 785 786 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 787 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 788 ctxi->lfd = adap_fd; 789 ctxi->pid = current->tgid; /* tgid = pid */ 790 ctxi->ctx = ctx; 791 ctxi->file = file; 792 mutex_init(&ctxi->mutex); 793 INIT_LIST_HEAD(&ctxi->luns); 794 INIT_LIST_HEAD(&ctxi->list); /* initialize for list_empty() */ 795 796 mutex_lock(&ctxi->mutex); 797out: 798 return ctxi; 799 800err: 801 kfree(ws); 802 kfree(lli); 803 kfree(ctxi); 804 ctxi = NULL; 805 goto out; 806} 807 808/** 809 * _cxlflash_disk_detach() - detaches a LUN from a context 810 * @sdev: SCSI device associated with LUN. 811 * @ctxi: Context owning resources. 812 * @detach: Detach ioctl data structure. 813 * 814 * As part of the detach, all per-context resources associated with the LUN 815 * are cleaned up. When detaching the last LUN for a context, the context 816 * itself is cleaned up and released. 817 * 818 * Return: 0 on success, -errno on failure 819 */ 820static int _cxlflash_disk_detach(struct scsi_device *sdev, 821 struct ctx_info *ctxi, 822 struct dk_cxlflash_detach *detach) 823{ 824 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 825 struct device *dev = &cfg->dev->dev; 826 struct llun_info *lli = sdev->hostdata; 827 struct lun_access *lun_access, *t; 828 struct dk_cxlflash_release rel; 829 bool put_ctx = false; 830 831 int i; 832 int rc = 0; 833 int lfd; 834 u64 ctxid = DECODE_CTXID(detach->context_id), 835 rctxid = detach->context_id; 836 837 dev_dbg(dev, "%s: ctxid=%llu\n", __func__, ctxid); 838 839 if (!ctxi) { 840 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 841 if (unlikely(!ctxi)) { 842 dev_dbg(dev, "%s: Bad context! (%llu)\n", 843 __func__, ctxid); 844 rc = -EINVAL; 845 goto out; 846 } 847 848 put_ctx = true; 849 } 850 851 /* Cleanup outstanding resources tied to this LUN */ 852 if (ctxi->rht_out) { 853 marshal_det_to_rele(detach, &rel); 854 for (i = 0; i < MAX_RHT_PER_CONTEXT; i++) { 855 if (ctxi->rht_lun[i] == lli) { 856 rel.rsrc_handle = i; 857 _cxlflash_disk_release(sdev, ctxi, &rel); 858 } 859 860 /* No need to loop further if we're done */ 861 if (ctxi->rht_out == 0) 862 break; 863 } 864 } 865 866 /* Take our LUN out of context, free the node */ 867 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 868 if (lun_access->lli == lli) { 869 list_del(&lun_access->list); 870 kfree(lun_access); 871 lun_access = NULL; 872 break; 873 } 874 875 /* Tear down context following last LUN cleanup */ 876 if (list_empty(&ctxi->luns)) { 877 ctxi->unavail = true; 878 mutex_unlock(&ctxi->mutex); 879 mutex_lock(&cfg->ctx_tbl_list_mutex); 880 mutex_lock(&ctxi->mutex); 881 882 /* Might not have been in error list so conditionally remove */ 883 if (!list_empty(&ctxi->list)) 884 list_del(&ctxi->list); 885 cfg->ctx_tbl[ctxid] = NULL; 886 mutex_unlock(&cfg->ctx_tbl_list_mutex); 887 mutex_unlock(&ctxi->mutex); 888 889 lfd = ctxi->lfd; 890 destroy_context(cfg, ctxi); 891 ctxi = NULL; 892 put_ctx = false; 893 894 /* 895 * As a last step, clean up external resources when not 896 * already on an external cleanup thread, i.e.: close(adap_fd). 897 * 898 * NOTE: this will free up the context from the CXL services, 899 * allowing it to dole out the same context_id on a future 900 * (or even currently in-flight) disk_attach operation. 901 */ 902 if (lfd != -1) 903 sys_close(lfd); 904 } 905 906 /* Release the sdev reference that bound this LUN to the context */ 907 scsi_device_put(sdev); 908 909out: 910 if (put_ctx) 911 put_context(ctxi); 912 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 913 return rc; 914} 915 916static int cxlflash_disk_detach(struct scsi_device *sdev, 917 struct dk_cxlflash_detach *detach) 918{ 919 return _cxlflash_disk_detach(sdev, NULL, detach); 920} 921 922/** 923 * cxlflash_cxl_release() - release handler for adapter file descriptor 924 * @inode: File-system inode associated with fd. 925 * @file: File installed with adapter file descriptor. 926 * 927 * This routine is the release handler for the fops registered with 928 * the CXL services on an initial attach for a context. It is called 929 * when a close is performed on the adapter file descriptor returned 930 * to the user. Programmatically, the user is not required to perform 931 * the close, as it is handled internally via the detach ioctl when 932 * a context is being removed. Note that nothing prevents the user 933 * from performing a close, but the user should be aware that doing 934 * so is considered catastrophic and subsequent usage of the superpipe 935 * API with previously saved off tokens will fail. 936 * 937 * When initiated from an external close (either by the user or via 938 * a process tear down), the routine derives the context reference 939 * and calls detach for each LUN associated with the context. The 940 * final detach operation will cause the context itself to be freed. 941 * Note that the saved off lfd is reset prior to calling detach to 942 * signify that the final detach should not perform a close. 943 * 944 * When initiated from a detach operation as part of the tear down 945 * of a context, the context is first completely freed and then the 946 * close is performed. This routine will fail to derive the context 947 * reference (due to the context having already been freed) and then 948 * call into the CXL release entry point. 949 * 950 * Thus, with exception to when the CXL process element (context id) 951 * lookup fails (a case that should theoretically never occur), every 952 * call into this routine results in a complete freeing of a context. 953 * 954 * As part of the detach, all per-context resources associated with the LUN 955 * are cleaned up. When detaching the last LUN for a context, the context 956 * itself is cleaned up and released. 957 * 958 * Return: 0 on success 959 */ 960static int cxlflash_cxl_release(struct inode *inode, struct file *file) 961{ 962 struct cxl_context *ctx = cxl_fops_get_context(file); 963 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 964 cxl_fops); 965 struct device *dev = &cfg->dev->dev; 966 struct ctx_info *ctxi = NULL; 967 struct dk_cxlflash_detach detach = { { 0 }, 0 }; 968 struct lun_access *lun_access, *t; 969 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 970 int ctxid; 971 972 ctxid = cxl_process_element(ctx); 973 if (unlikely(ctxid < 0)) { 974 dev_err(dev, "%s: Context %p was closed! (%d)\n", 975 __func__, ctx, ctxid); 976 goto out; 977 } 978 979 ctxi = get_context(cfg, ctxid, file, ctrl); 980 if (unlikely(!ctxi)) { 981 ctxi = get_context(cfg, ctxid, file, ctrl | CTX_CTRL_CLONE); 982 if (!ctxi) { 983 dev_dbg(dev, "%s: Context %d already free!\n", 984 __func__, ctxid); 985 goto out_release; 986 } 987 988 dev_dbg(dev, "%s: Another process owns context %d!\n", 989 __func__, ctxid); 990 put_context(ctxi); 991 goto out; 992 } 993 994 dev_dbg(dev, "%s: close(%d) for context %d\n", 995 __func__, ctxi->lfd, ctxid); 996 997 /* Reset the file descriptor to indicate we're on a close() thread */ 998 ctxi->lfd = -1; 999 detach.context_id = ctxi->ctxid; 1000 list_for_each_entry_safe(lun_access, t, &ctxi->luns, list) 1001 _cxlflash_disk_detach(lun_access->sdev, ctxi, &detach); 1002out_release: 1003 cxl_fd_release(inode, file); 1004out: 1005 dev_dbg(dev, "%s: returning\n", __func__); 1006 return 0; 1007} 1008 1009/** 1010 * unmap_context() - clears a previously established mapping 1011 * @ctxi: Context owning the mapping. 1012 * 1013 * This routine is used to switch between the error notification page 1014 * (dummy page of all 1's) and the real mapping (established by the CXL 1015 * fault handler). 1016 */ 1017static void unmap_context(struct ctx_info *ctxi) 1018{ 1019 unmap_mapping_range(ctxi->file->f_mapping, 0, 0, 1); 1020} 1021 1022/** 1023 * get_err_page() - obtains and allocates the error notification page 1024 * 1025 * Return: error notification page on success, NULL on failure 1026 */ 1027static struct page *get_err_page(void) 1028{ 1029 struct page *err_page = global.err_page; 1030 1031 if (unlikely(!err_page)) { 1032 err_page = alloc_page(GFP_KERNEL); 1033 if (unlikely(!err_page)) { 1034 pr_err("%s: Unable to allocate err_page!\n", __func__); 1035 goto out; 1036 } 1037 1038 memset(page_address(err_page), -1, PAGE_SIZE); 1039 1040 /* Serialize update w/ other threads to avoid a leak */ 1041 mutex_lock(&global.mutex); 1042 if (likely(!global.err_page)) 1043 global.err_page = err_page; 1044 else { 1045 __free_page(err_page); 1046 err_page = global.err_page; 1047 } 1048 mutex_unlock(&global.mutex); 1049 } 1050 1051out: 1052 pr_debug("%s: returning err_page=%p\n", __func__, err_page); 1053 return err_page; 1054} 1055 1056/** 1057 * cxlflash_mmap_fault() - mmap fault handler for adapter file descriptor 1058 * @vma: VM area associated with mapping. 1059 * @vmf: VM fault associated with current fault. 1060 * 1061 * To support error notification via MMIO, faults are 'caught' by this routine 1062 * that was inserted before passing back the adapter file descriptor on attach. 1063 * When a fault occurs, this routine evaluates if error recovery is active and 1064 * if so, installs the error page to 'notify' the user about the error state. 1065 * During normal operation, the fault is simply handled by the original fault 1066 * handler that was installed by CXL services as part of initializing the 1067 * adapter file descriptor. The VMA's page protection bits are toggled to 1068 * indicate cached/not-cached depending on the memory backing the fault. 1069 * 1070 * Return: 0 on success, VM_FAULT_SIGBUS on failure 1071 */ 1072static int cxlflash_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 1073{ 1074 struct file *file = vma->vm_file; 1075 struct cxl_context *ctx = cxl_fops_get_context(file); 1076 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1077 cxl_fops); 1078 struct device *dev = &cfg->dev->dev; 1079 struct ctx_info *ctxi = NULL; 1080 struct page *err_page = NULL; 1081 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 1082 int rc = 0; 1083 int ctxid; 1084 1085 ctxid = cxl_process_element(ctx); 1086 if (unlikely(ctxid < 0)) { 1087 dev_err(dev, "%s: Context %p was closed! (%d)\n", 1088 __func__, ctx, ctxid); 1089 goto err; 1090 } 1091 1092 ctxi = get_context(cfg, ctxid, file, ctrl); 1093 if (unlikely(!ctxi)) { 1094 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid); 1095 goto err; 1096 } 1097 1098 dev_dbg(dev, "%s: fault(%d) for context %d\n", 1099 __func__, ctxi->lfd, ctxid); 1100 1101 if (likely(!ctxi->err_recovery_active)) { 1102 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 1103 rc = ctxi->cxl_mmap_vmops->fault(vma, vmf); 1104 } else { 1105 dev_dbg(dev, "%s: err recovery active, use err_page!\n", 1106 __func__); 1107 1108 err_page = get_err_page(); 1109 if (unlikely(!err_page)) { 1110 dev_err(dev, "%s: Could not obtain error page!\n", 1111 __func__); 1112 rc = VM_FAULT_RETRY; 1113 goto out; 1114 } 1115 1116 get_page(err_page); 1117 vmf->page = err_page; 1118 vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); 1119 } 1120 1121out: 1122 if (likely(ctxi)) 1123 put_context(ctxi); 1124 dev_dbg(dev, "%s: returning rc=%d\n", __func__, rc); 1125 return rc; 1126 1127err: 1128 rc = VM_FAULT_SIGBUS; 1129 goto out; 1130} 1131 1132/* 1133 * Local MMAP vmops to 'catch' faults 1134 */ 1135static const struct vm_operations_struct cxlflash_mmap_vmops = { 1136 .fault = cxlflash_mmap_fault, 1137}; 1138 1139/** 1140 * cxlflash_cxl_mmap() - mmap handler for adapter file descriptor 1141 * @file: File installed with adapter file descriptor. 1142 * @vma: VM area associated with mapping. 1143 * 1144 * Installs local mmap vmops to 'catch' faults for error notification support. 1145 * 1146 * Return: 0 on success, -errno on failure 1147 */ 1148static int cxlflash_cxl_mmap(struct file *file, struct vm_area_struct *vma) 1149{ 1150 struct cxl_context *ctx = cxl_fops_get_context(file); 1151 struct cxlflash_cfg *cfg = container_of(file->f_op, struct cxlflash_cfg, 1152 cxl_fops); 1153 struct device *dev = &cfg->dev->dev; 1154 struct ctx_info *ctxi = NULL; 1155 enum ctx_ctrl ctrl = CTX_CTRL_ERR_FALLBACK | CTX_CTRL_FILE; 1156 int ctxid; 1157 int rc = 0; 1158 1159 ctxid = cxl_process_element(ctx); 1160 if (unlikely(ctxid < 0)) { 1161 dev_err(dev, "%s: Context %p was closed! (%d)\n", 1162 __func__, ctx, ctxid); 1163 rc = -EIO; 1164 goto out; 1165 } 1166 1167 ctxi = get_context(cfg, ctxid, file, ctrl); 1168 if (unlikely(!ctxi)) { 1169 dev_dbg(dev, "%s: Bad context! (%d)\n", __func__, ctxid); 1170 rc = -EIO; 1171 goto out; 1172 } 1173 1174 dev_dbg(dev, "%s: mmap(%d) for context %d\n", 1175 __func__, ctxi->lfd, ctxid); 1176 1177 rc = cxl_fd_mmap(file, vma); 1178 if (likely(!rc)) { 1179 /* Insert ourself in the mmap fault handler path */ 1180 ctxi->cxl_mmap_vmops = vma->vm_ops; 1181 vma->vm_ops = &cxlflash_mmap_vmops; 1182 } 1183 1184out: 1185 if (likely(ctxi)) 1186 put_context(ctxi); 1187 return rc; 1188} 1189 1190const struct file_operations cxlflash_cxl_fops = { 1191 .owner = THIS_MODULE, 1192 .mmap = cxlflash_cxl_mmap, 1193 .release = cxlflash_cxl_release, 1194}; 1195 1196/** 1197 * cxlflash_mark_contexts_error() - move contexts to error state and list 1198 * @cfg: Internal structure associated with the host. 1199 * 1200 * A context is only moved over to the error list when there are no outstanding 1201 * references to it. This ensures that a running operation has completed. 1202 * 1203 * Return: 0 on success, -errno on failure 1204 */ 1205int cxlflash_mark_contexts_error(struct cxlflash_cfg *cfg) 1206{ 1207 int i, rc = 0; 1208 struct ctx_info *ctxi = NULL; 1209 1210 mutex_lock(&cfg->ctx_tbl_list_mutex); 1211 1212 for (i = 0; i < MAX_CONTEXT; i++) { 1213 ctxi = cfg->ctx_tbl[i]; 1214 if (ctxi) { 1215 mutex_lock(&ctxi->mutex); 1216 cfg->ctx_tbl[i] = NULL; 1217 list_add(&ctxi->list, &cfg->ctx_err_recovery); 1218 ctxi->err_recovery_active = true; 1219 ctxi->ctrl_map = NULL; 1220 unmap_context(ctxi); 1221 mutex_unlock(&ctxi->mutex); 1222 } 1223 } 1224 1225 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1226 return rc; 1227} 1228 1229/* 1230 * Dummy NULL fops 1231 */ 1232static const struct file_operations null_fops = { 1233 .owner = THIS_MODULE, 1234}; 1235 1236/** 1237 * check_state() - checks and responds to the current adapter state 1238 * @cfg: Internal structure associated with the host. 1239 * 1240 * This routine can block and should only be used on process context. 1241 * It assumes that the caller is an ioctl thread and holding the ioctl 1242 * read semaphore. This is temporarily let up across the wait to allow 1243 * for draining actively running ioctls. Also note that when waking up 1244 * from waiting in reset, the state is unknown and must be checked again 1245 * before proceeding. 1246 * 1247 * Return: 0 on success, -errno on failure 1248 */ 1249int check_state(struct cxlflash_cfg *cfg) 1250{ 1251 struct device *dev = &cfg->dev->dev; 1252 int rc = 0; 1253 1254retry: 1255 switch (cfg->state) { 1256 case STATE_RESET: 1257 dev_dbg(dev, "%s: Reset state, going to wait...\n", __func__); 1258 up_read(&cfg->ioctl_rwsem); 1259 rc = wait_event_interruptible(cfg->reset_waitq, 1260 cfg->state != STATE_RESET); 1261 down_read(&cfg->ioctl_rwsem); 1262 if (unlikely(rc)) 1263 break; 1264 goto retry; 1265 case STATE_FAILTERM: 1266 dev_dbg(dev, "%s: Failed/Terminating!\n", __func__); 1267 rc = -ENODEV; 1268 break; 1269 default: 1270 break; 1271 } 1272 1273 return rc; 1274} 1275 1276/** 1277 * cxlflash_disk_attach() - attach a LUN to a context 1278 * @sdev: SCSI device associated with LUN. 1279 * @attach: Attach ioctl data structure. 1280 * 1281 * Creates a context and attaches LUN to it. A LUN can only be attached 1282 * one time to a context (subsequent attaches for the same context/LUN pair 1283 * are not supported). Additional LUNs can be attached to a context by 1284 * specifying the 'reuse' flag defined in the cxlflash_ioctl.h header. 1285 * 1286 * Return: 0 on success, -errno on failure 1287 */ 1288static int cxlflash_disk_attach(struct scsi_device *sdev, 1289 struct dk_cxlflash_attach *attach) 1290{ 1291 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1292 struct device *dev = &cfg->dev->dev; 1293 struct afu *afu = cfg->afu; 1294 struct llun_info *lli = sdev->hostdata; 1295 struct glun_info *gli = lli->parent; 1296 struct cxl_ioctl_start_work *work; 1297 struct ctx_info *ctxi = NULL; 1298 struct lun_access *lun_access = NULL; 1299 int rc = 0; 1300 u32 perms; 1301 int ctxid = -1; 1302 u64 rctxid = 0UL; 1303 struct file *file; 1304 1305 struct cxl_context *ctx; 1306 1307 int fd = -1; 1308 1309 if (attach->num_interrupts > 4) { 1310 dev_dbg(dev, "%s: Cannot support this many interrupts %llu\n", 1311 __func__, attach->num_interrupts); 1312 rc = -EINVAL; 1313 goto out; 1314 } 1315 1316 if (gli->max_lba == 0) { 1317 dev_dbg(dev, "%s: No capacity info for this LUN (%016llX)\n", 1318 __func__, lli->lun_id[sdev->channel]); 1319 rc = read_cap16(sdev, lli); 1320 if (rc) { 1321 dev_err(dev, "%s: Invalid device! (%d)\n", 1322 __func__, rc); 1323 rc = -ENODEV; 1324 goto out; 1325 } 1326 dev_dbg(dev, "%s: LBA = %016llX\n", __func__, gli->max_lba); 1327 dev_dbg(dev, "%s: BLK_LEN = %08X\n", __func__, gli->blk_len); 1328 } 1329 1330 if (attach->hdr.flags & DK_CXLFLASH_ATTACH_REUSE_CONTEXT) { 1331 rctxid = attach->context_id; 1332 ctxi = get_context(cfg, rctxid, NULL, 0); 1333 if (!ctxi) { 1334 dev_dbg(dev, "%s: Bad context! (%016llX)\n", 1335 __func__, rctxid); 1336 rc = -EINVAL; 1337 goto out; 1338 } 1339 1340 list_for_each_entry(lun_access, &ctxi->luns, list) 1341 if (lun_access->lli == lli) { 1342 dev_dbg(dev, "%s: Already attached!\n", 1343 __func__); 1344 rc = -EINVAL; 1345 goto out; 1346 } 1347 } 1348 1349 rc = scsi_device_get(sdev); 1350 if (unlikely(rc)) { 1351 dev_err(dev, "%s: Unable to get sdev reference!\n", __func__); 1352 goto out; 1353 } 1354 1355 lun_access = kzalloc(sizeof(*lun_access), GFP_KERNEL); 1356 if (unlikely(!lun_access)) { 1357 dev_err(dev, "%s: Unable to allocate lun_access!\n", __func__); 1358 rc = -ENOMEM; 1359 goto err0; 1360 } 1361 1362 lun_access->lli = lli; 1363 lun_access->sdev = sdev; 1364 1365 /* Non-NULL context indicates reuse */ 1366 if (ctxi) { 1367 dev_dbg(dev, "%s: Reusing context for LUN! (%016llX)\n", 1368 __func__, rctxid); 1369 list_add(&lun_access->list, &ctxi->luns); 1370 fd = ctxi->lfd; 1371 goto out_attach; 1372 } 1373 1374 ctx = cxl_dev_context_init(cfg->dev); 1375 if (unlikely(IS_ERR_OR_NULL(ctx))) { 1376 dev_err(dev, "%s: Could not initialize context %p\n", 1377 __func__, ctx); 1378 rc = -ENODEV; 1379 goto err1; 1380 } 1381 1382 ctxid = cxl_process_element(ctx); 1383 if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) { 1384 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid); 1385 rc = -EPERM; 1386 goto err2; 1387 } 1388 1389 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); 1390 if (unlikely(fd < 0)) { 1391 rc = -ENODEV; 1392 dev_err(dev, "%s: Could not get file descriptor\n", __func__); 1393 goto err2; 1394 } 1395 1396 /* Translate read/write O_* flags from fcntl.h to AFU permission bits */ 1397 perms = SISL_RHT_PERM(attach->hdr.flags + 1); 1398 1399 ctxi = create_context(cfg, ctx, ctxid, fd, file, perms); 1400 if (unlikely(!ctxi)) { 1401 dev_err(dev, "%s: Failed to create context! (%d)\n", 1402 __func__, ctxid); 1403 goto err3; 1404 } 1405 1406 work = &ctxi->work; 1407 work->num_interrupts = attach->num_interrupts; 1408 work->flags = CXL_START_WORK_NUM_IRQS; 1409 1410 rc = cxl_start_work(ctx, work); 1411 if (unlikely(rc)) { 1412 dev_dbg(dev, "%s: Could not start context rc=%d\n", 1413 __func__, rc); 1414 goto err4; 1415 } 1416 1417 rc = afu_attach(cfg, ctxi); 1418 if (unlikely(rc)) { 1419 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); 1420 goto err5; 1421 } 1422 1423 /* 1424 * No error paths after this point. Once the fd is installed it's 1425 * visible to user space and can't be undone safely on this thread. 1426 * There is no need to worry about a deadlock here because no one 1427 * knows about us yet; we can be the only one holding our mutex. 1428 */ 1429 list_add(&lun_access->list, &ctxi->luns); 1430 mutex_unlock(&ctxi->mutex); 1431 mutex_lock(&cfg->ctx_tbl_list_mutex); 1432 mutex_lock(&ctxi->mutex); 1433 cfg->ctx_tbl[ctxid] = ctxi; 1434 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1435 fd_install(fd, file); 1436 1437out_attach: 1438 attach->hdr.return_flags = 0; 1439 attach->context_id = ctxi->ctxid; 1440 attach->block_size = gli->blk_len; 1441 attach->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1442 attach->last_lba = gli->max_lba; 1443 attach->max_xfer = sdev->host->max_sectors * MAX_SECTOR_UNIT; 1444 attach->max_xfer /= gli->blk_len; 1445 1446out: 1447 attach->adap_fd = fd; 1448 1449 if (ctxi) 1450 put_context(ctxi); 1451 1452 dev_dbg(dev, "%s: returning ctxid=%d fd=%d bs=%lld rc=%d llba=%lld\n", 1453 __func__, ctxid, fd, attach->block_size, rc, attach->last_lba); 1454 return rc; 1455 1456err5: 1457 cxl_stop_context(ctx); 1458err4: 1459 put_context(ctxi); 1460 destroy_context(cfg, ctxi); 1461 ctxi = NULL; 1462err3: 1463 /* 1464 * Here, we're overriding the fops with a dummy all-NULL fops because 1465 * fput() calls the release fop, which will cause us to mistakenly 1466 * call into the CXL code. Rather than try to add yet more complexity 1467 * to that routine (cxlflash_cxl_release) we should try to fix the 1468 * issue here. 1469 */ 1470 file->f_op = &null_fops; 1471 fput(file); 1472 put_unused_fd(fd); 1473 fd = -1; 1474err2: 1475 cxl_release_context(ctx); 1476err1: 1477 kfree(lun_access); 1478err0: 1479 scsi_device_put(sdev); 1480 goto out; 1481} 1482 1483/** 1484 * recover_context() - recovers a context in error 1485 * @cfg: Internal structure associated with the host. 1486 * @ctxi: Context to release. 1487 * 1488 * Restablishes the state for a context-in-error. 1489 * 1490 * Return: 0 on success, -errno on failure 1491 */ 1492static int recover_context(struct cxlflash_cfg *cfg, struct ctx_info *ctxi) 1493{ 1494 struct device *dev = &cfg->dev->dev; 1495 int rc = 0; 1496 int old_fd, fd = -1; 1497 int ctxid = -1; 1498 struct file *file; 1499 struct cxl_context *ctx; 1500 struct afu *afu = cfg->afu; 1501 1502 ctx = cxl_dev_context_init(cfg->dev); 1503 if (unlikely(IS_ERR_OR_NULL(ctx))) { 1504 dev_err(dev, "%s: Could not initialize context %p\n", 1505 __func__, ctx); 1506 rc = -ENODEV; 1507 goto out; 1508 } 1509 1510 ctxid = cxl_process_element(ctx); 1511 if (unlikely((ctxid > MAX_CONTEXT) || (ctxid < 0))) { 1512 dev_err(dev, "%s: ctxid (%d) invalid!\n", __func__, ctxid); 1513 rc = -EPERM; 1514 goto err1; 1515 } 1516 1517 file = cxl_get_fd(ctx, &cfg->cxl_fops, &fd); 1518 if (unlikely(fd < 0)) { 1519 rc = -ENODEV; 1520 dev_err(dev, "%s: Could not get file descriptor\n", __func__); 1521 goto err1; 1522 } 1523 1524 rc = cxl_start_work(ctx, &ctxi->work); 1525 if (unlikely(rc)) { 1526 dev_dbg(dev, "%s: Could not start context rc=%d\n", 1527 __func__, rc); 1528 goto err2; 1529 } 1530 1531 /* Update with new MMIO area based on updated context id */ 1532 ctxi->ctrl_map = &afu->afu_map->ctrls[ctxid].ctrl; 1533 1534 rc = afu_attach(cfg, ctxi); 1535 if (rc) { 1536 dev_err(dev, "%s: Could not attach AFU rc %d\n", __func__, rc); 1537 goto err3; 1538 } 1539 1540 /* 1541 * No error paths after this point. Once the fd is installed it's 1542 * visible to user space and can't be undone safely on this thread. 1543 */ 1544 old_fd = ctxi->lfd; 1545 ctxi->ctxid = ENCODE_CTXID(ctxi, ctxid); 1546 ctxi->lfd = fd; 1547 ctxi->ctx = ctx; 1548 ctxi->file = file; 1549 1550 /* 1551 * Put context back in table (note the reinit of the context list); 1552 * we must first drop the context's mutex and then acquire it in 1553 * order with the table/list mutex to avoid a deadlock - safe to do 1554 * here because no one can find us at this moment in time. 1555 */ 1556 mutex_unlock(&ctxi->mutex); 1557 mutex_lock(&cfg->ctx_tbl_list_mutex); 1558 mutex_lock(&ctxi->mutex); 1559 list_del_init(&ctxi->list); 1560 cfg->ctx_tbl[ctxid] = ctxi; 1561 mutex_unlock(&cfg->ctx_tbl_list_mutex); 1562 fd_install(fd, file); 1563 1564 /* Release the original adapter fd and associated CXL resources */ 1565 sys_close(old_fd); 1566out: 1567 dev_dbg(dev, "%s: returning ctxid=%d fd=%d rc=%d\n", 1568 __func__, ctxid, fd, rc); 1569 return rc; 1570 1571err3: 1572 cxl_stop_context(ctx); 1573err2: 1574 fput(file); 1575 put_unused_fd(fd); 1576err1: 1577 cxl_release_context(ctx); 1578 goto out; 1579} 1580 1581/** 1582 * cxlflash_afu_recover() - initiates AFU recovery 1583 * @sdev: SCSI device associated with LUN. 1584 * @recover: Recover ioctl data structure. 1585 * 1586 * Only a single recovery is allowed at a time to avoid exhausting CXL 1587 * resources (leading to recovery failure) in the event that we're up 1588 * against the maximum number of contexts limit. For similar reasons, 1589 * a context recovery is retried if there are multiple recoveries taking 1590 * place at the same time and the failure was due to CXL services being 1591 * unable to keep up. 1592 * 1593 * Because a user can detect an error condition before the kernel, it is 1594 * quite possible for this routine to act as the kernel's EEH detection 1595 * source (MMIO read of mbox_r). Because of this, there is a window of 1596 * time where an EEH might have been detected but not yet 'serviced' 1597 * (callback invoked, causing the device to enter reset state). To avoid 1598 * looping in this routine during that window, a 1 second sleep is in place 1599 * between the time the MMIO failure is detected and the time a wait on the 1600 * reset wait queue is attempted via check_state(). 1601 * 1602 * Return: 0 on success, -errno on failure 1603 */ 1604static int cxlflash_afu_recover(struct scsi_device *sdev, 1605 struct dk_cxlflash_recover_afu *recover) 1606{ 1607 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1608 struct device *dev = &cfg->dev->dev; 1609 struct llun_info *lli = sdev->hostdata; 1610 struct afu *afu = cfg->afu; 1611 struct ctx_info *ctxi = NULL; 1612 struct mutex *mutex = &cfg->ctx_recovery_mutex; 1613 u64 ctxid = DECODE_CTXID(recover->context_id), 1614 rctxid = recover->context_id; 1615 long reg; 1616 int lretry = 20; /* up to 2 seconds */ 1617 int rc = 0; 1618 1619 atomic_inc(&cfg->recovery_threads); 1620 rc = mutex_lock_interruptible(mutex); 1621 if (rc) 1622 goto out; 1623 1624 dev_dbg(dev, "%s: reason 0x%016llX rctxid=%016llX\n", 1625 __func__, recover->reason, rctxid); 1626 1627retry: 1628 /* Ensure that this process is attached to the context */ 1629 ctxi = get_context(cfg, rctxid, lli, CTX_CTRL_ERR_FALLBACK); 1630 if (unlikely(!ctxi)) { 1631 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); 1632 rc = -EINVAL; 1633 goto out; 1634 } 1635 1636 if (ctxi->err_recovery_active) { 1637retry_recover: 1638 rc = recover_context(cfg, ctxi); 1639 if (unlikely(rc)) { 1640 dev_err(dev, "%s: Recovery failed for context %llu (rc=%d)\n", 1641 __func__, ctxid, rc); 1642 if ((rc == -ENODEV) && 1643 ((atomic_read(&cfg->recovery_threads) > 1) || 1644 (lretry--))) { 1645 dev_dbg(dev, "%s: Going to try again!\n", 1646 __func__); 1647 mutex_unlock(mutex); 1648 msleep(100); 1649 rc = mutex_lock_interruptible(mutex); 1650 if (rc) 1651 goto out; 1652 goto retry_recover; 1653 } 1654 1655 goto out; 1656 } 1657 1658 ctxi->err_recovery_active = false; 1659 recover->context_id = ctxi->ctxid; 1660 recover->adap_fd = ctxi->lfd; 1661 recover->mmio_size = sizeof(afu->afu_map->hosts[0].harea); 1662 recover->hdr.return_flags |= 1663 DK_CXLFLASH_RECOVER_AFU_CONTEXT_RESET; 1664 goto out; 1665 } 1666 1667 /* Test if in error state */ 1668 reg = readq_be(&afu->ctrl_map->mbox_r); 1669 if (reg == -1) { 1670 dev_dbg(dev, "%s: MMIO fail, wait for recovery.\n", __func__); 1671 1672 /* 1673 * Before checking the state, put back the context obtained with 1674 * get_context() as it is no longer needed and sleep for a short 1675 * period of time (see prolog notes). 1676 */ 1677 put_context(ctxi); 1678 ctxi = NULL; 1679 ssleep(1); 1680 rc = check_state(cfg); 1681 if (unlikely(rc)) 1682 goto out; 1683 goto retry; 1684 } 1685 1686 dev_dbg(dev, "%s: MMIO working, no recovery required!\n", __func__); 1687out: 1688 if (likely(ctxi)) 1689 put_context(ctxi); 1690 mutex_unlock(mutex); 1691 atomic_dec_if_positive(&cfg->recovery_threads); 1692 return rc; 1693} 1694 1695/** 1696 * process_sense() - evaluates and processes sense data 1697 * @sdev: SCSI device associated with LUN. 1698 * @verify: Verify ioctl data structure. 1699 * 1700 * Return: 0 on success, -errno on failure 1701 */ 1702static int process_sense(struct scsi_device *sdev, 1703 struct dk_cxlflash_verify *verify) 1704{ 1705 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1706 struct device *dev = &cfg->dev->dev; 1707 struct llun_info *lli = sdev->hostdata; 1708 struct glun_info *gli = lli->parent; 1709 u64 prev_lba = gli->max_lba; 1710 struct scsi_sense_hdr sshdr = { 0 }; 1711 int rc = 0; 1712 1713 rc = scsi_normalize_sense((const u8 *)&verify->sense_data, 1714 DK_CXLFLASH_VERIFY_SENSE_LEN, &sshdr); 1715 if (!rc) { 1716 dev_err(dev, "%s: Failed to normalize sense data!\n", __func__); 1717 rc = -EINVAL; 1718 goto out; 1719 } 1720 1721 switch (sshdr.sense_key) { 1722 case NO_SENSE: 1723 case RECOVERED_ERROR: 1724 /* fall through */ 1725 case NOT_READY: 1726 break; 1727 case UNIT_ATTENTION: 1728 switch (sshdr.asc) { 1729 case 0x29: /* Power on Reset or Device Reset */ 1730 /* fall through */ 1731 case 0x2A: /* Device settings/capacity changed */ 1732 rc = read_cap16(sdev, lli); 1733 if (rc) { 1734 rc = -ENODEV; 1735 break; 1736 } 1737 if (prev_lba != gli->max_lba) 1738 dev_dbg(dev, "%s: Capacity changed old=%lld " 1739 "new=%lld\n", __func__, prev_lba, 1740 gli->max_lba); 1741 break; 1742 case 0x3F: /* Report LUNs changed, Rescan. */ 1743 scsi_scan_host(cfg->host); 1744 break; 1745 default: 1746 rc = -EIO; 1747 break; 1748 } 1749 break; 1750 default: 1751 rc = -EIO; 1752 break; 1753 } 1754out: 1755 dev_dbg(dev, "%s: sense_key %x asc %x ascq %x rc %d\n", __func__, 1756 sshdr.sense_key, sshdr.asc, sshdr.ascq, rc); 1757 return rc; 1758} 1759 1760/** 1761 * cxlflash_disk_verify() - verifies a LUN is the same and handle size changes 1762 * @sdev: SCSI device associated with LUN. 1763 * @verify: Verify ioctl data structure. 1764 * 1765 * Return: 0 on success, -errno on failure 1766 */ 1767static int cxlflash_disk_verify(struct scsi_device *sdev, 1768 struct dk_cxlflash_verify *verify) 1769{ 1770 int rc = 0; 1771 struct ctx_info *ctxi = NULL; 1772 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1773 struct device *dev = &cfg->dev->dev; 1774 struct llun_info *lli = sdev->hostdata; 1775 struct glun_info *gli = lli->parent; 1776 struct sisl_rht_entry *rhte = NULL; 1777 res_hndl_t rhndl = verify->rsrc_handle; 1778 u64 ctxid = DECODE_CTXID(verify->context_id), 1779 rctxid = verify->context_id; 1780 u64 last_lba = 0; 1781 1782 dev_dbg(dev, "%s: ctxid=%llu rhndl=%016llX, hint=%016llX, " 1783 "flags=%016llX\n", __func__, ctxid, verify->rsrc_handle, 1784 verify->hint, verify->hdr.flags); 1785 1786 ctxi = get_context(cfg, rctxid, lli, 0); 1787 if (unlikely(!ctxi)) { 1788 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); 1789 rc = -EINVAL; 1790 goto out; 1791 } 1792 1793 rhte = get_rhte(ctxi, rhndl, lli); 1794 if (unlikely(!rhte)) { 1795 dev_dbg(dev, "%s: Bad resource handle! (%d)\n", 1796 __func__, rhndl); 1797 rc = -EINVAL; 1798 goto out; 1799 } 1800 1801 /* 1802 * Look at the hint/sense to see if it requires us to redrive 1803 * inquiry (i.e. the Unit attention is due to the WWN changing). 1804 */ 1805 if (verify->hint & DK_CXLFLASH_VERIFY_HINT_SENSE) { 1806 /* Can't hold mutex across process_sense/read_cap16, 1807 * since we could have an intervening EEH event. 1808 */ 1809 ctxi->unavail = true; 1810 mutex_unlock(&ctxi->mutex); 1811 rc = process_sense(sdev, verify); 1812 if (unlikely(rc)) { 1813 dev_err(dev, "%s: Failed to validate sense data (%d)\n", 1814 __func__, rc); 1815 mutex_lock(&ctxi->mutex); 1816 ctxi->unavail = false; 1817 goto out; 1818 } 1819 mutex_lock(&ctxi->mutex); 1820 ctxi->unavail = false; 1821 } 1822 1823 switch (gli->mode) { 1824 case MODE_PHYSICAL: 1825 last_lba = gli->max_lba; 1826 break; 1827 case MODE_VIRTUAL: 1828 /* Cast lxt_cnt to u64 for multiply to be treated as 64bit op */ 1829 last_lba = ((u64)rhte->lxt_cnt * MC_CHUNK_SIZE * gli->blk_len); 1830 last_lba /= CXLFLASH_BLOCK_SIZE; 1831 last_lba--; 1832 break; 1833 default: 1834 WARN(1, "Unsupported LUN mode!"); 1835 } 1836 1837 verify->last_lba = last_lba; 1838 1839out: 1840 if (likely(ctxi)) 1841 put_context(ctxi); 1842 dev_dbg(dev, "%s: returning rc=%d llba=%llX\n", 1843 __func__, rc, verify->last_lba); 1844 return rc; 1845} 1846 1847/** 1848 * decode_ioctl() - translates an encoded ioctl to an easily identifiable string 1849 * @cmd: The ioctl command to decode. 1850 * 1851 * Return: A string identifying the decoded ioctl. 1852 */ 1853static char *decode_ioctl(int cmd) 1854{ 1855 switch (cmd) { 1856 case DK_CXLFLASH_ATTACH: 1857 return __stringify_1(DK_CXLFLASH_ATTACH); 1858 case DK_CXLFLASH_USER_DIRECT: 1859 return __stringify_1(DK_CXLFLASH_USER_DIRECT); 1860 case DK_CXLFLASH_USER_VIRTUAL: 1861 return __stringify_1(DK_CXLFLASH_USER_VIRTUAL); 1862 case DK_CXLFLASH_VLUN_RESIZE: 1863 return __stringify_1(DK_CXLFLASH_VLUN_RESIZE); 1864 case DK_CXLFLASH_RELEASE: 1865 return __stringify_1(DK_CXLFLASH_RELEASE); 1866 case DK_CXLFLASH_DETACH: 1867 return __stringify_1(DK_CXLFLASH_DETACH); 1868 case DK_CXLFLASH_VERIFY: 1869 return __stringify_1(DK_CXLFLASH_VERIFY); 1870 case DK_CXLFLASH_VLUN_CLONE: 1871 return __stringify_1(DK_CXLFLASH_VLUN_CLONE); 1872 case DK_CXLFLASH_RECOVER_AFU: 1873 return __stringify_1(DK_CXLFLASH_RECOVER_AFU); 1874 case DK_CXLFLASH_MANAGE_LUN: 1875 return __stringify_1(DK_CXLFLASH_MANAGE_LUN); 1876 } 1877 1878 return "UNKNOWN"; 1879} 1880 1881/** 1882 * cxlflash_disk_direct_open() - opens a direct (physical) disk 1883 * @sdev: SCSI device associated with LUN. 1884 * @arg: UDirect ioctl data structure. 1885 * 1886 * On successful return, the user is informed of the resource handle 1887 * to be used to identify the direct lun and the size (in blocks) of 1888 * the direct lun in last LBA format. 1889 * 1890 * Return: 0 on success, -errno on failure 1891 */ 1892static int cxlflash_disk_direct_open(struct scsi_device *sdev, void *arg) 1893{ 1894 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1895 struct device *dev = &cfg->dev->dev; 1896 struct afu *afu = cfg->afu; 1897 struct llun_info *lli = sdev->hostdata; 1898 struct glun_info *gli = lli->parent; 1899 1900 struct dk_cxlflash_udirect *pphys = (struct dk_cxlflash_udirect *)arg; 1901 1902 u64 ctxid = DECODE_CTXID(pphys->context_id), 1903 rctxid = pphys->context_id; 1904 u64 lun_size = 0; 1905 u64 last_lba = 0; 1906 u64 rsrc_handle = -1; 1907 u32 port = CHAN2PORT(sdev->channel); 1908 1909 int rc = 0; 1910 1911 struct ctx_info *ctxi = NULL; 1912 struct sisl_rht_entry *rhte = NULL; 1913 1914 pr_debug("%s: ctxid=%llu ls=0x%llx\n", __func__, ctxid, lun_size); 1915 1916 rc = cxlflash_lun_attach(gli, MODE_PHYSICAL, false); 1917 if (unlikely(rc)) { 1918 dev_dbg(dev, "%s: Failed to attach to LUN! (PHYSICAL)\n", 1919 __func__); 1920 goto out; 1921 } 1922 1923 ctxi = get_context(cfg, rctxid, lli, 0); 1924 if (unlikely(!ctxi)) { 1925 dev_dbg(dev, "%s: Bad context! (%llu)\n", __func__, ctxid); 1926 rc = -EINVAL; 1927 goto err1; 1928 } 1929 1930 rhte = rhte_checkout(ctxi, lli); 1931 if (unlikely(!rhte)) { 1932 dev_dbg(dev, "%s: too many opens for this context\n", __func__); 1933 rc = -EMFILE; /* too many opens */ 1934 goto err1; 1935 } 1936 1937 rsrc_handle = (rhte - ctxi->rht_start); 1938 1939 rht_format1(rhte, lli->lun_id[sdev->channel], ctxi->rht_perms, port); 1940 cxlflash_afu_sync(afu, ctxid, rsrc_handle, AFU_LW_SYNC); 1941 1942 last_lba = gli->max_lba; 1943 pphys->hdr.return_flags = 0; 1944 pphys->last_lba = last_lba; 1945 pphys->rsrc_handle = rsrc_handle; 1946 1947out: 1948 if (likely(ctxi)) 1949 put_context(ctxi); 1950 dev_dbg(dev, "%s: returning handle 0x%llx rc=%d llba %lld\n", 1951 __func__, rsrc_handle, rc, last_lba); 1952 return rc; 1953 1954err1: 1955 cxlflash_lun_detach(gli); 1956 goto out; 1957} 1958 1959/** 1960 * ioctl_common() - common IOCTL handler for driver 1961 * @sdev: SCSI device associated with LUN. 1962 * @cmd: IOCTL command. 1963 * 1964 * Handles common fencing operations that are valid for multiple ioctls. Always 1965 * allow through ioctls that are cleanup oriented in nature, even when operating 1966 * in a failed/terminating state. 1967 * 1968 * Return: 0 on success, -errno on failure 1969 */ 1970static int ioctl_common(struct scsi_device *sdev, int cmd) 1971{ 1972 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 1973 struct device *dev = &cfg->dev->dev; 1974 struct llun_info *lli = sdev->hostdata; 1975 int rc = 0; 1976 1977 if (unlikely(!lli)) { 1978 dev_dbg(dev, "%s: Unknown LUN\n", __func__); 1979 rc = -EINVAL; 1980 goto out; 1981 } 1982 1983 rc = check_state(cfg); 1984 if (unlikely(rc) && (cfg->state == STATE_FAILTERM)) { 1985 switch (cmd) { 1986 case DK_CXLFLASH_VLUN_RESIZE: 1987 case DK_CXLFLASH_RELEASE: 1988 case DK_CXLFLASH_DETACH: 1989 dev_dbg(dev, "%s: Command override! (%d)\n", 1990 __func__, rc); 1991 rc = 0; 1992 break; 1993 } 1994 } 1995out: 1996 return rc; 1997} 1998 1999/** 2000 * cxlflash_ioctl() - IOCTL handler for driver 2001 * @sdev: SCSI device associated with LUN. 2002 * @cmd: IOCTL command. 2003 * @arg: Userspace ioctl data structure. 2004 * 2005 * A read/write semaphore is used to implement a 'drain' of currently 2006 * running ioctls. The read semaphore is taken at the beginning of each 2007 * ioctl thread and released upon concluding execution. Additionally the 2008 * semaphore should be released and then reacquired in any ioctl execution 2009 * path which will wait for an event to occur that is outside the scope of 2010 * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, 2011 * a thread simply needs to acquire the write semaphore. 2012 * 2013 * Return: 0 on success, -errno on failure 2014 */ 2015int cxlflash_ioctl(struct scsi_device *sdev, int cmd, void __user *arg) 2016{ 2017 typedef int (*sioctl) (struct scsi_device *, void *); 2018 2019 struct cxlflash_cfg *cfg = (struct cxlflash_cfg *)sdev->host->hostdata; 2020 struct device *dev = &cfg->dev->dev; 2021 struct afu *afu = cfg->afu; 2022 struct dk_cxlflash_hdr *hdr; 2023 char buf[sizeof(union cxlflash_ioctls)]; 2024 size_t size = 0; 2025 bool known_ioctl = false; 2026 int idx; 2027 int rc = 0; 2028 struct Scsi_Host *shost = sdev->host; 2029 sioctl do_ioctl = NULL; 2030 2031 static const struct { 2032 size_t size; 2033 sioctl ioctl; 2034 } ioctl_tbl[] = { /* NOTE: order matters here */ 2035 {sizeof(struct dk_cxlflash_attach), (sioctl)cxlflash_disk_attach}, 2036 {sizeof(struct dk_cxlflash_udirect), cxlflash_disk_direct_open}, 2037 {sizeof(struct dk_cxlflash_release), (sioctl)cxlflash_disk_release}, 2038 {sizeof(struct dk_cxlflash_detach), (sioctl)cxlflash_disk_detach}, 2039 {sizeof(struct dk_cxlflash_verify), (sioctl)cxlflash_disk_verify}, 2040 {sizeof(struct dk_cxlflash_recover_afu), (sioctl)cxlflash_afu_recover}, 2041 {sizeof(struct dk_cxlflash_manage_lun), (sioctl)cxlflash_manage_lun}, 2042 {sizeof(struct dk_cxlflash_uvirtual), cxlflash_disk_virtual_open}, 2043 {sizeof(struct dk_cxlflash_resize), (sioctl)cxlflash_vlun_resize}, 2044 {sizeof(struct dk_cxlflash_clone), (sioctl)cxlflash_disk_clone}, 2045 }; 2046 2047 /* Hold read semaphore so we can drain if needed */ 2048 down_read(&cfg->ioctl_rwsem); 2049 2050 /* Restrict command set to physical support only for internal LUN */ 2051 if (afu->internal_lun) 2052 switch (cmd) { 2053 case DK_CXLFLASH_RELEASE: 2054 case DK_CXLFLASH_USER_VIRTUAL: 2055 case DK_CXLFLASH_VLUN_RESIZE: 2056 case DK_CXLFLASH_VLUN_CLONE: 2057 dev_dbg(dev, "%s: %s not supported for lun_mode=%d\n", 2058 __func__, decode_ioctl(cmd), afu->internal_lun); 2059 rc = -EINVAL; 2060 goto cxlflash_ioctl_exit; 2061 } 2062 2063 switch (cmd) { 2064 case DK_CXLFLASH_ATTACH: 2065 case DK_CXLFLASH_USER_DIRECT: 2066 case DK_CXLFLASH_RELEASE: 2067 case DK_CXLFLASH_DETACH: 2068 case DK_CXLFLASH_VERIFY: 2069 case DK_CXLFLASH_RECOVER_AFU: 2070 case DK_CXLFLASH_USER_VIRTUAL: 2071 case DK_CXLFLASH_VLUN_RESIZE: 2072 case DK_CXLFLASH_VLUN_CLONE: 2073 dev_dbg(dev, "%s: %s (%08X) on dev(%d/%d/%d/%llu)\n", 2074 __func__, decode_ioctl(cmd), cmd, shost->host_no, 2075 sdev->channel, sdev->id, sdev->lun); 2076 rc = ioctl_common(sdev, cmd); 2077 if (unlikely(rc)) 2078 goto cxlflash_ioctl_exit; 2079 2080 /* fall through */ 2081 2082 case DK_CXLFLASH_MANAGE_LUN: 2083 known_ioctl = true; 2084 idx = _IOC_NR(cmd) - _IOC_NR(DK_CXLFLASH_ATTACH); 2085 size = ioctl_tbl[idx].size; 2086 do_ioctl = ioctl_tbl[idx].ioctl; 2087 2088 if (likely(do_ioctl)) 2089 break; 2090 2091 /* fall through */ 2092 default: 2093 rc = -EINVAL; 2094 goto cxlflash_ioctl_exit; 2095 } 2096 2097 if (unlikely(copy_from_user(&buf, arg, size))) { 2098 dev_err(dev, "%s: copy_from_user() fail! " 2099 "size=%lu cmd=%d (%s) arg=%p\n", 2100 __func__, size, cmd, decode_ioctl(cmd), arg); 2101 rc = -EFAULT; 2102 goto cxlflash_ioctl_exit; 2103 } 2104 2105 hdr = (struct dk_cxlflash_hdr *)&buf; 2106 if (hdr->version != DK_CXLFLASH_VERSION_0) { 2107 dev_dbg(dev, "%s: Version %u not supported for %s\n", 2108 __func__, hdr->version, decode_ioctl(cmd)); 2109 rc = -EINVAL; 2110 goto cxlflash_ioctl_exit; 2111 } 2112 2113 if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->rsvd[2] || hdr->return_flags) { 2114 dev_dbg(dev, "%s: Reserved/rflags populated!\n", __func__); 2115 rc = -EINVAL; 2116 goto cxlflash_ioctl_exit; 2117 } 2118 2119 rc = do_ioctl(sdev, (void *)&buf); 2120 if (likely(!rc)) 2121 if (unlikely(copy_to_user(arg, &buf, size))) { 2122 dev_err(dev, "%s: copy_to_user() fail! " 2123 "size=%lu cmd=%d (%s) arg=%p\n", 2124 __func__, size, cmd, decode_ioctl(cmd), arg); 2125 rc = -EFAULT; 2126 } 2127 2128 /* fall through to exit */ 2129 2130cxlflash_ioctl_exit: 2131 up_read(&cfg->ioctl_rwsem); 2132 if (unlikely(rc && known_ioctl)) 2133 dev_err(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " 2134 "returned rc %d\n", __func__, 2135 decode_ioctl(cmd), cmd, shost->host_no, 2136 sdev->channel, sdev->id, sdev->lun, rc); 2137 else 2138 dev_dbg(dev, "%s: ioctl %s (%08X) on dev(%d/%d/%d/%llu) " 2139 "returned rc %d\n", __func__, decode_ioctl(cmd), 2140 cmd, shost->host_no, sdev->channel, sdev->id, 2141 sdev->lun, rc); 2142 return rc; 2143} 2144