root/drivers/target/target_core_file.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. FD_DEV
  2. fd_attach_hba
  3. fd_detach_hba
  4. fd_alloc_device
  5. fd_configure_device
  6. fd_dev_call_rcu
  7. fd_free_device
  8. fd_destroy_device
  9. cmd_rw_aio_complete
  10. fd_execute_rw_aio
  11. fd_do_rw
  12. fd_execute_sync_cache
  13. fd_execute_write_same
  14. fd_do_prot_fill
  15. fd_do_prot_unmap
  16. fd_execute_unmap
  17. fd_execute_rw_buffered
  18. fd_execute_rw
  19. fd_set_configfs_dev_params
  20. fd_show_configfs_dev_params
  21. fd_get_blocks
  22. fd_init_prot
  23. fd_format_prot
  24. fd_free_prot
  25. fd_parse_cdb
  26. fileio_module_init
  27. fileio_module_exit

   1 // SPDX-License-Identifier: GPL-2.0-or-later
   2 /*******************************************************************************
   3  * Filename:  target_core_file.c
   4  *
   5  * This file contains the Storage Engine <-> FILEIO transport specific functions
   6  *
   7  * (c) Copyright 2005-2013 Datera, Inc.
   8  *
   9  * Nicholas A. Bellinger <nab@kernel.org>
  10  *
  11  ******************************************************************************/
  12 
  13 #include <linux/string.h>
  14 #include <linux/parser.h>
  15 #include <linux/timer.h>
  16 #include <linux/blkdev.h>
  17 #include <linux/slab.h>
  18 #include <linux/spinlock.h>
  19 #include <linux/module.h>
  20 #include <linux/vmalloc.h>
  21 #include <linux/falloc.h>
  22 #include <linux/uio.h>
  23 #include <scsi/scsi_proto.h>
  24 #include <asm/unaligned.h>
  25 
  26 #include <target/target_core_base.h>
  27 #include <target/target_core_backend.h>
  28 
  29 #include "target_core_file.h"
  30 
  31 static inline struct fd_dev *FD_DEV(struct se_device *dev)
  32 {
  33         return container_of(dev, struct fd_dev, dev);
  34 }
  35 
  36 static int fd_attach_hba(struct se_hba *hba, u32 host_id)
  37 {
  38         struct fd_host *fd_host;
  39 
  40         fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
  41         if (!fd_host) {
  42                 pr_err("Unable to allocate memory for struct fd_host\n");
  43                 return -ENOMEM;
  44         }
  45 
  46         fd_host->fd_host_id = host_id;
  47 
  48         hba->hba_ptr = fd_host;
  49 
  50         pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
  51                 " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
  52                 TARGET_CORE_VERSION);
  53         pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
  54                 hba->hba_id, fd_host->fd_host_id);
  55 
  56         return 0;
  57 }
  58 
  59 static void fd_detach_hba(struct se_hba *hba)
  60 {
  61         struct fd_host *fd_host = hba->hba_ptr;
  62 
  63         pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
  64                 " Target Core\n", hba->hba_id, fd_host->fd_host_id);
  65 
  66         kfree(fd_host);
  67         hba->hba_ptr = NULL;
  68 }
  69 
  70 static struct se_device *fd_alloc_device(struct se_hba *hba, const char *name)
  71 {
  72         struct fd_dev *fd_dev;
  73         struct fd_host *fd_host = hba->hba_ptr;
  74 
  75         fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
  76         if (!fd_dev) {
  77                 pr_err("Unable to allocate memory for struct fd_dev\n");
  78                 return NULL;
  79         }
  80 
  81         fd_dev->fd_host = fd_host;
  82 
  83         pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
  84 
  85         return &fd_dev->dev;
  86 }
  87 
  88 static int fd_configure_device(struct se_device *dev)
  89 {
  90         struct fd_dev *fd_dev = FD_DEV(dev);
  91         struct fd_host *fd_host = dev->se_hba->hba_ptr;
  92         struct file *file;
  93         struct inode *inode = NULL;
  94         int flags, ret = -EINVAL;
  95 
  96         if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
  97                 pr_err("Missing fd_dev_name=\n");
  98                 return -EINVAL;
  99         }
 100 
 101         /*
 102          * Use O_DSYNC by default instead of O_SYNC to forgo syncing
 103          * of pure timestamp updates.
 104          */
 105         flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
 106 
 107         /*
 108          * Optionally allow fd_buffered_io=1 to be enabled for people
 109          * who want use the fs buffer cache as an WriteCache mechanism.
 110          *
 111          * This means that in event of a hard failure, there is a risk
 112          * of silent data-loss if the SCSI client has *not* performed a
 113          * forced unit access (FUA) write, or issued SYNCHRONIZE_CACHE
 114          * to write-out the entire device cache.
 115          */
 116         if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
 117                 pr_debug("FILEIO: Disabling O_DSYNC, using buffered FILEIO\n");
 118                 flags &= ~O_DSYNC;
 119         }
 120 
 121         file = filp_open(fd_dev->fd_dev_name, flags, 0600);
 122         if (IS_ERR(file)) {
 123                 pr_err("filp_open(%s) failed\n", fd_dev->fd_dev_name);
 124                 ret = PTR_ERR(file);
 125                 goto fail;
 126         }
 127         fd_dev->fd_file = file;
 128         /*
 129          * If using a block backend with this struct file, we extract
 130          * fd_dev->fd_[block,dev]_size from struct block_device.
 131          *
 132          * Otherwise, we use the passed fd_size= from configfs
 133          */
 134         inode = file->f_mapping->host;
 135         if (S_ISBLK(inode->i_mode)) {
 136                 struct request_queue *q = bdev_get_queue(inode->i_bdev);
 137                 unsigned long long dev_size;
 138 
 139                 fd_dev->fd_block_size = bdev_logical_block_size(inode->i_bdev);
 140                 /*
 141                  * Determine the number of bytes from i_size_read() minus
 142                  * one (1) logical sector from underlying struct block_device
 143                  */
 144                 dev_size = (i_size_read(file->f_mapping->host) -
 145                                        fd_dev->fd_block_size);
 146 
 147                 pr_debug("FILEIO: Using size: %llu bytes from struct"
 148                         " block_device blocks: %llu logical_block_size: %d\n",
 149                         dev_size, div_u64(dev_size, fd_dev->fd_block_size),
 150                         fd_dev->fd_block_size);
 151 
 152                 if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
 153                         pr_debug("IFILE: BLOCK Discard support available,"
 154                                  " disabled by default\n");
 155                 /*
 156                  * Enable write same emulation for IBLOCK and use 0xFFFF as
 157                  * the smaller WRITE_SAME(10) only has a two-byte block count.
 158                  */
 159                 dev->dev_attrib.max_write_same_len = 0xFFFF;
 160 
 161                 if (blk_queue_nonrot(q))
 162                         dev->dev_attrib.is_nonrot = 1;
 163         } else {
 164                 if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
 165                         pr_err("FILEIO: Missing fd_dev_size="
 166                                 " parameter, and no backing struct"
 167                                 " block_device\n");
 168                         goto fail;
 169                 }
 170 
 171                 fd_dev->fd_block_size = FD_BLOCKSIZE;
 172                 /*
 173                  * Limit UNMAP emulation to 8k Number of LBAs (NoLB)
 174                  */
 175                 dev->dev_attrib.max_unmap_lba_count = 0x2000;
 176                 /*
 177                  * Currently hardcoded to 1 in Linux/SCSI code..
 178                  */
 179                 dev->dev_attrib.max_unmap_block_desc_count = 1;
 180                 dev->dev_attrib.unmap_granularity = 1;
 181                 dev->dev_attrib.unmap_granularity_alignment = 0;
 182 
 183                 /*
 184                  * Limit WRITE_SAME w/ UNMAP=0 emulation to 8k Number of LBAs (NoLB)
 185                  * based upon struct iovec limit for vfs_writev()
 186                  */
 187                 dev->dev_attrib.max_write_same_len = 0x1000;
 188         }
 189 
 190         dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
 191         dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
 192         dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
 193         dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
 194 
 195         if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
 196                 pr_debug("FILEIO: Forcing setting of emulate_write_cache=1"
 197                         " with FDBD_HAS_BUFFERED_IO_WCE\n");
 198                 dev->dev_attrib.emulate_write_cache = 1;
 199         }
 200 
 201         fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
 202         fd_dev->fd_queue_depth = dev->queue_depth;
 203 
 204         pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
 205                 " %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
 206                         fd_dev->fd_dev_name, fd_dev->fd_dev_size);
 207 
 208         return 0;
 209 fail:
 210         if (fd_dev->fd_file) {
 211                 filp_close(fd_dev->fd_file, NULL);
 212                 fd_dev->fd_file = NULL;
 213         }
 214         return ret;
 215 }
 216 
 217 static void fd_dev_call_rcu(struct rcu_head *p)
 218 {
 219         struct se_device *dev = container_of(p, struct se_device, rcu_head);
 220         struct fd_dev *fd_dev = FD_DEV(dev);
 221 
 222         kfree(fd_dev);
 223 }
 224 
 225 static void fd_free_device(struct se_device *dev)
 226 {
 227         call_rcu(&dev->rcu_head, fd_dev_call_rcu);
 228 }
 229 
 230 static void fd_destroy_device(struct se_device *dev)
 231 {
 232         struct fd_dev *fd_dev = FD_DEV(dev);
 233 
 234         if (fd_dev->fd_file) {
 235                 filp_close(fd_dev->fd_file, NULL);
 236                 fd_dev->fd_file = NULL;
 237         }
 238 }
 239 
 240 struct target_core_file_cmd {
 241         unsigned long   len;
 242         struct se_cmd   *cmd;
 243         struct kiocb    iocb;
 244 };
 245 
 246 static void cmd_rw_aio_complete(struct kiocb *iocb, long ret, long ret2)
 247 {
 248         struct target_core_file_cmd *cmd;
 249 
 250         cmd = container_of(iocb, struct target_core_file_cmd, iocb);
 251 
 252         if (ret != cmd->len)
 253                 target_complete_cmd(cmd->cmd, SAM_STAT_CHECK_CONDITION);
 254         else
 255                 target_complete_cmd(cmd->cmd, SAM_STAT_GOOD);
 256 
 257         kfree(cmd);
 258 }
 259 
 260 static sense_reason_t
 261 fd_execute_rw_aio(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 262               enum dma_data_direction data_direction)
 263 {
 264         int is_write = !(data_direction == DMA_FROM_DEVICE);
 265         struct se_device *dev = cmd->se_dev;
 266         struct fd_dev *fd_dev = FD_DEV(dev);
 267         struct file *file = fd_dev->fd_file;
 268         struct target_core_file_cmd *aio_cmd;
 269         struct iov_iter iter = {};
 270         struct scatterlist *sg;
 271         struct bio_vec *bvec;
 272         ssize_t len = 0;
 273         int ret = 0, i;
 274 
 275         aio_cmd = kmalloc(sizeof(struct target_core_file_cmd), GFP_KERNEL);
 276         if (!aio_cmd)
 277                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 278 
 279         bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
 280         if (!bvec) {
 281                 kfree(aio_cmd);
 282                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 283         }
 284 
 285         for_each_sg(sgl, sg, sgl_nents, i) {
 286                 bvec[i].bv_page = sg_page(sg);
 287                 bvec[i].bv_len = sg->length;
 288                 bvec[i].bv_offset = sg->offset;
 289 
 290                 len += sg->length;
 291         }
 292 
 293         iov_iter_bvec(&iter, is_write, bvec, sgl_nents, len);
 294 
 295         aio_cmd->cmd = cmd;
 296         aio_cmd->len = len;
 297         aio_cmd->iocb.ki_pos = cmd->t_task_lba * dev->dev_attrib.block_size;
 298         aio_cmd->iocb.ki_filp = file;
 299         aio_cmd->iocb.ki_complete = cmd_rw_aio_complete;
 300         aio_cmd->iocb.ki_flags = IOCB_DIRECT;
 301 
 302         if (is_write && (cmd->se_cmd_flags & SCF_FUA))
 303                 aio_cmd->iocb.ki_flags |= IOCB_DSYNC;
 304 
 305         if (is_write)
 306                 ret = call_write_iter(file, &aio_cmd->iocb, &iter);
 307         else
 308                 ret = call_read_iter(file, &aio_cmd->iocb, &iter);
 309 
 310         kfree(bvec);
 311 
 312         if (ret != -EIOCBQUEUED)
 313                 cmd_rw_aio_complete(&aio_cmd->iocb, ret, 0);
 314 
 315         return 0;
 316 }
 317 
 318 static int fd_do_rw(struct se_cmd *cmd, struct file *fd,
 319                     u32 block_size, struct scatterlist *sgl,
 320                     u32 sgl_nents, u32 data_length, int is_write)
 321 {
 322         struct scatterlist *sg;
 323         struct iov_iter iter;
 324         struct bio_vec *bvec;
 325         ssize_t len = 0;
 326         loff_t pos = (cmd->t_task_lba * block_size);
 327         int ret = 0, i;
 328 
 329         bvec = kcalloc(sgl_nents, sizeof(struct bio_vec), GFP_KERNEL);
 330         if (!bvec) {
 331                 pr_err("Unable to allocate fd_do_readv iov[]\n");
 332                 return -ENOMEM;
 333         }
 334 
 335         for_each_sg(sgl, sg, sgl_nents, i) {
 336                 bvec[i].bv_page = sg_page(sg);
 337                 bvec[i].bv_len = sg->length;
 338                 bvec[i].bv_offset = sg->offset;
 339 
 340                 len += sg->length;
 341         }
 342 
 343         iov_iter_bvec(&iter, READ, bvec, sgl_nents, len);
 344         if (is_write)
 345                 ret = vfs_iter_write(fd, &iter, &pos, 0);
 346         else
 347                 ret = vfs_iter_read(fd, &iter, &pos, 0);
 348 
 349         if (is_write) {
 350                 if (ret < 0 || ret != data_length) {
 351                         pr_err("%s() write returned %d\n", __func__, ret);
 352                         if (ret >= 0)
 353                                 ret = -EINVAL;
 354                 }
 355         } else {
 356                 /*
 357                  * Return zeros and GOOD status even if the READ did not return
 358                  * the expected virt_size for struct file w/o a backing struct
 359                  * block_device.
 360                  */
 361                 if (S_ISBLK(file_inode(fd)->i_mode)) {
 362                         if (ret < 0 || ret != data_length) {
 363                                 pr_err("%s() returned %d, expecting %u for "
 364                                                 "S_ISBLK\n", __func__, ret,
 365                                                 data_length);
 366                                 if (ret >= 0)
 367                                         ret = -EINVAL;
 368                         }
 369                 } else {
 370                         if (ret < 0) {
 371                                 pr_err("%s() returned %d for non S_ISBLK\n",
 372                                                 __func__, ret);
 373                         } else if (ret != data_length) {
 374                                 /*
 375                                  * Short read case:
 376                                  * Probably some one truncate file under us.
 377                                  * We must explicitly zero sg-pages to prevent
 378                                  * expose uninizialized pages to userspace.
 379                                  */
 380                                 if (ret < data_length)
 381                                         ret += iov_iter_zero(data_length - ret, &iter);
 382                                 else
 383                                         ret = -EINVAL;
 384                         }
 385                 }
 386         }
 387         kfree(bvec);
 388         return ret;
 389 }
 390 
 391 static sense_reason_t
 392 fd_execute_sync_cache(struct se_cmd *cmd)
 393 {
 394         struct se_device *dev = cmd->se_dev;
 395         struct fd_dev *fd_dev = FD_DEV(dev);
 396         int immed = (cmd->t_task_cdb[1] & 0x2);
 397         loff_t start, end;
 398         int ret;
 399 
 400         /*
 401          * If the Immediate bit is set, queue up the GOOD response
 402          * for this SYNCHRONIZE_CACHE op
 403          */
 404         if (immed)
 405                 target_complete_cmd(cmd, SAM_STAT_GOOD);
 406 
 407         /*
 408          * Determine if we will be flushing the entire device.
 409          */
 410         if (cmd->t_task_lba == 0 && cmd->data_length == 0) {
 411                 start = 0;
 412                 end = LLONG_MAX;
 413         } else {
 414                 start = cmd->t_task_lba * dev->dev_attrib.block_size;
 415                 if (cmd->data_length)
 416                         end = start + cmd->data_length - 1;
 417                 else
 418                         end = LLONG_MAX;
 419         }
 420 
 421         ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
 422         if (ret != 0)
 423                 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
 424 
 425         if (immed)
 426                 return 0;
 427 
 428         if (ret)
 429                 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
 430         else
 431                 target_complete_cmd(cmd, SAM_STAT_GOOD);
 432 
 433         return 0;
 434 }
 435 
 436 static sense_reason_t
 437 fd_execute_write_same(struct se_cmd *cmd)
 438 {
 439         struct se_device *se_dev = cmd->se_dev;
 440         struct fd_dev *fd_dev = FD_DEV(se_dev);
 441         loff_t pos = cmd->t_task_lba * se_dev->dev_attrib.block_size;
 442         sector_t nolb = sbc_get_write_same_sectors(cmd);
 443         struct iov_iter iter;
 444         struct bio_vec *bvec;
 445         unsigned int len = 0, i;
 446         ssize_t ret;
 447 
 448         if (!nolb) {
 449                 target_complete_cmd(cmd, SAM_STAT_GOOD);
 450                 return 0;
 451         }
 452         if (cmd->prot_op) {
 453                 pr_err("WRITE_SAME: Protection information with FILEIO"
 454                        " backends not supported\n");
 455                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 456         }
 457 
 458         if (cmd->t_data_nents > 1 ||
 459             cmd->t_data_sg[0].length != cmd->se_dev->dev_attrib.block_size) {
 460                 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
 461                         " block_size: %u\n",
 462                         cmd->t_data_nents,
 463                         cmd->t_data_sg[0].length,
 464                         cmd->se_dev->dev_attrib.block_size);
 465                 return TCM_INVALID_CDB_FIELD;
 466         }
 467 
 468         bvec = kcalloc(nolb, sizeof(struct bio_vec), GFP_KERNEL);
 469         if (!bvec)
 470                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 471 
 472         for (i = 0; i < nolb; i++) {
 473                 bvec[i].bv_page = sg_page(&cmd->t_data_sg[0]);
 474                 bvec[i].bv_len = cmd->t_data_sg[0].length;
 475                 bvec[i].bv_offset = cmd->t_data_sg[0].offset;
 476 
 477                 len += se_dev->dev_attrib.block_size;
 478         }
 479 
 480         iov_iter_bvec(&iter, READ, bvec, nolb, len);
 481         ret = vfs_iter_write(fd_dev->fd_file, &iter, &pos, 0);
 482 
 483         kfree(bvec);
 484         if (ret < 0 || ret != len) {
 485                 pr_err("vfs_iter_write() returned %zd for write same\n", ret);
 486                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 487         }
 488 
 489         target_complete_cmd(cmd, SAM_STAT_GOOD);
 490         return 0;
 491 }
 492 
 493 static int
 494 fd_do_prot_fill(struct se_device *se_dev, sector_t lba, sector_t nolb,
 495                 void *buf, size_t bufsize)
 496 {
 497         struct fd_dev *fd_dev = FD_DEV(se_dev);
 498         struct file *prot_fd = fd_dev->fd_prot_file;
 499         sector_t prot_length, prot;
 500         loff_t pos = lba * se_dev->prot_length;
 501 
 502         if (!prot_fd) {
 503                 pr_err("Unable to locate fd_dev->fd_prot_file\n");
 504                 return -ENODEV;
 505         }
 506 
 507         prot_length = nolb * se_dev->prot_length;
 508 
 509         for (prot = 0; prot < prot_length;) {
 510                 sector_t len = min_t(sector_t, bufsize, prot_length - prot);
 511                 ssize_t ret = kernel_write(prot_fd, buf, len, &pos);
 512 
 513                 if (ret != len) {
 514                         pr_err("vfs_write to prot file failed: %zd\n", ret);
 515                         return ret < 0 ? ret : -ENODEV;
 516                 }
 517                 prot += ret;
 518         }
 519 
 520         return 0;
 521 }
 522 
 523 static int
 524 fd_do_prot_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 525 {
 526         void *buf;
 527         int rc;
 528 
 529         buf = (void *)__get_free_page(GFP_KERNEL);
 530         if (!buf) {
 531                 pr_err("Unable to allocate FILEIO prot buf\n");
 532                 return -ENOMEM;
 533         }
 534         memset(buf, 0xff, PAGE_SIZE);
 535 
 536         rc = fd_do_prot_fill(cmd->se_dev, lba, nolb, buf, PAGE_SIZE);
 537 
 538         free_page((unsigned long)buf);
 539 
 540         return rc;
 541 }
 542 
 543 static sense_reason_t
 544 fd_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
 545 {
 546         struct file *file = FD_DEV(cmd->se_dev)->fd_file;
 547         struct inode *inode = file->f_mapping->host;
 548         int ret;
 549 
 550         if (!nolb) {
 551                 return 0;
 552         }
 553 
 554         if (cmd->se_dev->dev_attrib.pi_prot_type) {
 555                 ret = fd_do_prot_unmap(cmd, lba, nolb);
 556                 if (ret)
 557                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 558         }
 559 
 560         if (S_ISBLK(inode->i_mode)) {
 561                 /* The backend is block device, use discard */
 562                 struct block_device *bdev = inode->i_bdev;
 563                 struct se_device *dev = cmd->se_dev;
 564 
 565                 ret = blkdev_issue_discard(bdev,
 566                                            target_to_linux_sector(dev, lba),
 567                                            target_to_linux_sector(dev,  nolb),
 568                                            GFP_KERNEL, 0);
 569                 if (ret < 0) {
 570                         pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
 571                                 ret);
 572                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 573                 }
 574         } else {
 575                 /* The backend is normal file, use fallocate */
 576                 struct se_device *se_dev = cmd->se_dev;
 577                 loff_t pos = lba * se_dev->dev_attrib.block_size;
 578                 unsigned int len = nolb * se_dev->dev_attrib.block_size;
 579                 int mode = FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE;
 580 
 581                 if (!file->f_op->fallocate)
 582                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 583 
 584                 ret = file->f_op->fallocate(file, mode, pos, len);
 585                 if (ret < 0) {
 586                         pr_warn("FILEIO: fallocate() failed: %d\n", ret);
 587                         return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 588                 }
 589         }
 590 
 591         return 0;
 592 }
 593 
 594 static sense_reason_t
 595 fd_execute_rw_buffered(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 596               enum dma_data_direction data_direction)
 597 {
 598         struct se_device *dev = cmd->se_dev;
 599         struct fd_dev *fd_dev = FD_DEV(dev);
 600         struct file *file = fd_dev->fd_file;
 601         struct file *pfile = fd_dev->fd_prot_file;
 602         sense_reason_t rc;
 603         int ret = 0;
 604         /*
 605          * Call vectorized fileio functions to map struct scatterlist
 606          * physical memory addresses to struct iovec virtual memory.
 607          */
 608         if (data_direction == DMA_FROM_DEVICE) {
 609                 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
 610                         ret = fd_do_rw(cmd, pfile, dev->prot_length,
 611                                        cmd->t_prot_sg, cmd->t_prot_nents,
 612                                        cmd->prot_length, 0);
 613                         if (ret < 0)
 614                                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 615                 }
 616 
 617                 ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
 618                                sgl, sgl_nents, cmd->data_length, 0);
 619 
 620                 if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type &&
 621                     dev->dev_attrib.pi_prot_verify) {
 622                         u32 sectors = cmd->data_length >>
 623                                         ilog2(dev->dev_attrib.block_size);
 624 
 625                         rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
 626                                             0, cmd->t_prot_sg, 0);
 627                         if (rc)
 628                                 return rc;
 629                 }
 630         } else {
 631                 if (cmd->prot_type && dev->dev_attrib.pi_prot_type &&
 632                     dev->dev_attrib.pi_prot_verify) {
 633                         u32 sectors = cmd->data_length >>
 634                                         ilog2(dev->dev_attrib.block_size);
 635 
 636                         rc = sbc_dif_verify(cmd, cmd->t_task_lba, sectors,
 637                                             0, cmd->t_prot_sg, 0);
 638                         if (rc)
 639                                 return rc;
 640                 }
 641 
 642                 ret = fd_do_rw(cmd, file, dev->dev_attrib.block_size,
 643                                sgl, sgl_nents, cmd->data_length, 1);
 644                 /*
 645                  * Perform implicit vfs_fsync_range() for fd_do_writev() ops
 646                  * for SCSI WRITEs with Forced Unit Access (FUA) set.
 647                  * Allow this to happen independent of WCE=0 setting.
 648                  */
 649                 if (ret > 0 && (cmd->se_cmd_flags & SCF_FUA)) {
 650                         loff_t start = cmd->t_task_lba *
 651                                 dev->dev_attrib.block_size;
 652                         loff_t end;
 653 
 654                         if (cmd->data_length)
 655                                 end = start + cmd->data_length - 1;
 656                         else
 657                                 end = LLONG_MAX;
 658 
 659                         vfs_fsync_range(fd_dev->fd_file, start, end, 1);
 660                 }
 661 
 662                 if (ret > 0 && cmd->prot_type && dev->dev_attrib.pi_prot_type) {
 663                         ret = fd_do_rw(cmd, pfile, dev->prot_length,
 664                                        cmd->t_prot_sg, cmd->t_prot_nents,
 665                                        cmd->prot_length, 1);
 666                         if (ret < 0)
 667                                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 668                 }
 669         }
 670 
 671         if (ret < 0)
 672                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 673 
 674         target_complete_cmd(cmd, SAM_STAT_GOOD);
 675         return 0;
 676 }
 677 
 678 static sense_reason_t
 679 fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
 680               enum dma_data_direction data_direction)
 681 {
 682         struct se_device *dev = cmd->se_dev;
 683         struct fd_dev *fd_dev = FD_DEV(dev);
 684 
 685         /*
 686          * We are currently limited by the number of iovecs (2048) per
 687          * single vfs_[writev,readv] call.
 688          */
 689         if (cmd->data_length > FD_MAX_BYTES) {
 690                 pr_err("FILEIO: Not able to process I/O of %u bytes due to"
 691                        "FD_MAX_BYTES: %u iovec count limitation\n",
 692                         cmd->data_length, FD_MAX_BYTES);
 693                 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 694         }
 695 
 696         if (fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO)
 697                 return fd_execute_rw_aio(cmd, sgl, sgl_nents, data_direction);
 698         return fd_execute_rw_buffered(cmd, sgl, sgl_nents, data_direction);
 699 }
 700 
 701 enum {
 702         Opt_fd_dev_name, Opt_fd_dev_size, Opt_fd_buffered_io,
 703         Opt_fd_async_io, Opt_err
 704 };
 705 
 706 static match_table_t tokens = {
 707         {Opt_fd_dev_name, "fd_dev_name=%s"},
 708         {Opt_fd_dev_size, "fd_dev_size=%s"},
 709         {Opt_fd_buffered_io, "fd_buffered_io=%d"},
 710         {Opt_fd_async_io, "fd_async_io=%d"},
 711         {Opt_err, NULL}
 712 };
 713 
 714 static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
 715                 const char *page, ssize_t count)
 716 {
 717         struct fd_dev *fd_dev = FD_DEV(dev);
 718         char *orig, *ptr, *arg_p, *opts;
 719         substring_t args[MAX_OPT_ARGS];
 720         int ret = 0, arg, token;
 721 
 722         opts = kstrdup(page, GFP_KERNEL);
 723         if (!opts)
 724                 return -ENOMEM;
 725 
 726         orig = opts;
 727 
 728         while ((ptr = strsep(&opts, ",\n")) != NULL) {
 729                 if (!*ptr)
 730                         continue;
 731 
 732                 token = match_token(ptr, tokens, args);
 733                 switch (token) {
 734                 case Opt_fd_dev_name:
 735                         if (match_strlcpy(fd_dev->fd_dev_name, &args[0],
 736                                 FD_MAX_DEV_NAME) == 0) {
 737                                 ret = -EINVAL;
 738                                 break;
 739                         }
 740                         pr_debug("FILEIO: Referencing Path: %s\n",
 741                                         fd_dev->fd_dev_name);
 742                         fd_dev->fbd_flags |= FBDF_HAS_PATH;
 743                         break;
 744                 case Opt_fd_dev_size:
 745                         arg_p = match_strdup(&args[0]);
 746                         if (!arg_p) {
 747                                 ret = -ENOMEM;
 748                                 break;
 749                         }
 750                         ret = kstrtoull(arg_p, 0, &fd_dev->fd_dev_size);
 751                         kfree(arg_p);
 752                         if (ret < 0) {
 753                                 pr_err("kstrtoull() failed for"
 754                                                 " fd_dev_size=\n");
 755                                 goto out;
 756                         }
 757                         pr_debug("FILEIO: Referencing Size: %llu"
 758                                         " bytes\n", fd_dev->fd_dev_size);
 759                         fd_dev->fbd_flags |= FBDF_HAS_SIZE;
 760                         break;
 761                 case Opt_fd_buffered_io:
 762                         ret = match_int(args, &arg);
 763                         if (ret)
 764                                 goto out;
 765                         if (arg != 1) {
 766                                 pr_err("bogus fd_buffered_io=%d value\n", arg);
 767                                 ret = -EINVAL;
 768                                 goto out;
 769                         }
 770 
 771                         pr_debug("FILEIO: Using buffered I/O"
 772                                 " operations for struct fd_dev\n");
 773 
 774                         fd_dev->fbd_flags |= FDBD_HAS_BUFFERED_IO_WCE;
 775                         break;
 776                 case Opt_fd_async_io:
 777                         ret = match_int(args, &arg);
 778                         if (ret)
 779                                 goto out;
 780                         if (arg != 1) {
 781                                 pr_err("bogus fd_async_io=%d value\n", arg);
 782                                 ret = -EINVAL;
 783                                 goto out;
 784                         }
 785 
 786                         pr_debug("FILEIO: Using async I/O"
 787                                 " operations for struct fd_dev\n");
 788 
 789                         fd_dev->fbd_flags |= FDBD_HAS_ASYNC_IO;
 790                         break;
 791                 default:
 792                         break;
 793                 }
 794         }
 795 
 796 out:
 797         kfree(orig);
 798         return (!ret) ? count : ret;
 799 }
 800 
 801 static ssize_t fd_show_configfs_dev_params(struct se_device *dev, char *b)
 802 {
 803         struct fd_dev *fd_dev = FD_DEV(dev);
 804         ssize_t bl = 0;
 805 
 806         bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
 807         bl += sprintf(b + bl, "        File: %s  Size: %llu  Mode: %s Async: %d\n",
 808                 fd_dev->fd_dev_name, fd_dev->fd_dev_size,
 809                 (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) ?
 810                 "Buffered-WCE" : "O_DSYNC",
 811                 !!(fd_dev->fbd_flags & FDBD_HAS_ASYNC_IO));
 812         return bl;
 813 }
 814 
 815 static sector_t fd_get_blocks(struct se_device *dev)
 816 {
 817         struct fd_dev *fd_dev = FD_DEV(dev);
 818         struct file *f = fd_dev->fd_file;
 819         struct inode *i = f->f_mapping->host;
 820         unsigned long long dev_size;
 821         /*
 822          * When using a file that references an underlying struct block_device,
 823          * ensure dev_size is always based on the current inode size in order
 824          * to handle underlying block_device resize operations.
 825          */
 826         if (S_ISBLK(i->i_mode))
 827                 dev_size = i_size_read(i);
 828         else
 829                 dev_size = fd_dev->fd_dev_size;
 830 
 831         return div_u64(dev_size - dev->dev_attrib.block_size,
 832                        dev->dev_attrib.block_size);
 833 }
 834 
 835 static int fd_init_prot(struct se_device *dev)
 836 {
 837         struct fd_dev *fd_dev = FD_DEV(dev);
 838         struct file *prot_file, *file = fd_dev->fd_file;
 839         struct inode *inode;
 840         int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
 841         char buf[FD_MAX_DEV_PROT_NAME];
 842 
 843         if (!file) {
 844                 pr_err("Unable to locate fd_dev->fd_file\n");
 845                 return -ENODEV;
 846         }
 847 
 848         inode = file->f_mapping->host;
 849         if (S_ISBLK(inode->i_mode)) {
 850                 pr_err("FILEIO Protection emulation only supported on"
 851                        " !S_ISBLK\n");
 852                 return -ENOSYS;
 853         }
 854 
 855         if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
 856                 flags &= ~O_DSYNC;
 857 
 858         snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
 859                  fd_dev->fd_dev_name);
 860 
 861         prot_file = filp_open(buf, flags, 0600);
 862         if (IS_ERR(prot_file)) {
 863                 pr_err("filp_open(%s) failed\n", buf);
 864                 ret = PTR_ERR(prot_file);
 865                 return ret;
 866         }
 867         fd_dev->fd_prot_file = prot_file;
 868 
 869         return 0;
 870 }
 871 
 872 static int fd_format_prot(struct se_device *dev)
 873 {
 874         unsigned char *buf;
 875         int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
 876         int ret;
 877 
 878         if (!dev->dev_attrib.pi_prot_type) {
 879                 pr_err("Unable to format_prot while pi_prot_type == 0\n");
 880                 return -ENODEV;
 881         }
 882 
 883         buf = vzalloc(unit_size);
 884         if (!buf) {
 885                 pr_err("Unable to allocate FILEIO prot buf\n");
 886                 return -ENOMEM;
 887         }
 888 
 889         pr_debug("Using FILEIO prot_length: %llu\n",
 890                  (unsigned long long)(dev->transport->get_blocks(dev) + 1) *
 891                                         dev->prot_length);
 892 
 893         memset(buf, 0xff, unit_size);
 894         ret = fd_do_prot_fill(dev, 0, dev->transport->get_blocks(dev) + 1,
 895                               buf, unit_size);
 896         vfree(buf);
 897         return ret;
 898 }
 899 
 900 static void fd_free_prot(struct se_device *dev)
 901 {
 902         struct fd_dev *fd_dev = FD_DEV(dev);
 903 
 904         if (!fd_dev->fd_prot_file)
 905                 return;
 906 
 907         filp_close(fd_dev->fd_prot_file, NULL);
 908         fd_dev->fd_prot_file = NULL;
 909 }
 910 
 911 static struct sbc_ops fd_sbc_ops = {
 912         .execute_rw             = fd_execute_rw,
 913         .execute_sync_cache     = fd_execute_sync_cache,
 914         .execute_write_same     = fd_execute_write_same,
 915         .execute_unmap          = fd_execute_unmap,
 916 };
 917 
 918 static sense_reason_t
 919 fd_parse_cdb(struct se_cmd *cmd)
 920 {
 921         return sbc_parse_cdb(cmd, &fd_sbc_ops);
 922 }
 923 
 924 static const struct target_backend_ops fileio_ops = {
 925         .name                   = "fileio",
 926         .inquiry_prod           = "FILEIO",
 927         .inquiry_rev            = FD_VERSION,
 928         .owner                  = THIS_MODULE,
 929         .attach_hba             = fd_attach_hba,
 930         .detach_hba             = fd_detach_hba,
 931         .alloc_device           = fd_alloc_device,
 932         .configure_device       = fd_configure_device,
 933         .destroy_device         = fd_destroy_device,
 934         .free_device            = fd_free_device,
 935         .parse_cdb              = fd_parse_cdb,
 936         .set_configfs_dev_params = fd_set_configfs_dev_params,
 937         .show_configfs_dev_params = fd_show_configfs_dev_params,
 938         .get_device_type        = sbc_get_device_type,
 939         .get_blocks             = fd_get_blocks,
 940         .init_prot              = fd_init_prot,
 941         .format_prot            = fd_format_prot,
 942         .free_prot              = fd_free_prot,
 943         .tb_dev_attrib_attrs    = sbc_attrib_attrs,
 944 };
 945 
 946 static int __init fileio_module_init(void)
 947 {
 948         return transport_backend_register(&fileio_ops);
 949 }
 950 
 951 static void __exit fileio_module_exit(void)
 952 {
 953         target_backend_unregister(&fileio_ops);
 954 }
 955 
 956 MODULE_DESCRIPTION("TCM FILEIO subsystem plugin");
 957 MODULE_AUTHOR("nab@Linux-iSCSI.org");
 958 MODULE_LICENSE("GPL");
 959 
 960 module_init(fileio_module_init);
 961 module_exit(fileio_module_exit);

/* [<][>][^][v][top][bottom][index][help] */