root/fs/ioctl.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. ioctl_fibmap
  2. fiemap_fill_next_extent
  3. fiemap_check_flags
  4. fiemap_check_ranges
  5. ioctl_fiemap
  6. ioctl_file_clone
  7. ioctl_file_clone_range
  8. logical_to_blk
  9. blk_to_logical
  10. __generic_block_fiemap
  11. generic_block_fiemap
  12. ioctl_preallocate
  13. file_ioctl
  14. ioctl_fionbio
  15. ioctl_fioasync
  16. ioctl_fsfreeze
  17. ioctl_fsthaw
  18. ioctl_file_dedupe_range
  19. do_vfs_ioctl
  20. ksys_ioctl
  21. SYSCALL_DEFINE3
  22. compat_ptr_ioctl

   1 // SPDX-License-Identifier: GPL-2.0
   2 /*
   3  *  linux/fs/ioctl.c
   4  *
   5  *  Copyright (C) 1991, 1992  Linus Torvalds
   6  */
   7 
   8 #include <linux/syscalls.h>
   9 #include <linux/mm.h>
  10 #include <linux/capability.h>
  11 #include <linux/compat.h>
  12 #include <linux/file.h>
  13 #include <linux/fs.h>
  14 #include <linux/security.h>
  15 #include <linux/export.h>
  16 #include <linux/uaccess.h>
  17 #include <linux/writeback.h>
  18 #include <linux/buffer_head.h>
  19 #include <linux/falloc.h>
  20 #include <linux/sched/signal.h>
  21 
  22 #include "internal.h"
  23 
  24 #include <asm/ioctls.h>
  25 
  26 /* So that the fiemap access checks can't overflow on 32 bit machines. */
  27 #define FIEMAP_MAX_EXTENTS      (UINT_MAX / sizeof(struct fiemap_extent))
  28 
  29 /**
  30  * vfs_ioctl - call filesystem specific ioctl methods
  31  * @filp:       open file to invoke ioctl method on
  32  * @cmd:        ioctl command to execute
  33  * @arg:        command-specific argument for ioctl
  34  *
  35  * Invokes filesystem specific ->unlocked_ioctl, if one exists; otherwise
  36  * returns -ENOTTY.
  37  *
  38  * Returns 0 on success, -errno on error.
  39  */
  40 long vfs_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
  41 {
  42         int error = -ENOTTY;
  43 
  44         if (!filp->f_op->unlocked_ioctl)
  45                 goto out;
  46 
  47         error = filp->f_op->unlocked_ioctl(filp, cmd, arg);
  48         if (error == -ENOIOCTLCMD)
  49                 error = -ENOTTY;
  50  out:
  51         return error;
  52 }
  53 EXPORT_SYMBOL(vfs_ioctl);
  54 
  55 static int ioctl_fibmap(struct file *filp, int __user *p)
  56 {
  57         struct address_space *mapping = filp->f_mapping;
  58         int res, block;
  59 
  60         /* do we support this mess? */
  61         if (!mapping->a_ops->bmap)
  62                 return -EINVAL;
  63         if (!capable(CAP_SYS_RAWIO))
  64                 return -EPERM;
  65         res = get_user(block, p);
  66         if (res)
  67                 return res;
  68         res = mapping->a_ops->bmap(mapping, block);
  69         return put_user(res, p);
  70 }
  71 
  72 /**
  73  * fiemap_fill_next_extent - Fiemap helper function
  74  * @fieinfo:    Fiemap context passed into ->fiemap
  75  * @logical:    Extent logical start offset, in bytes
  76  * @phys:       Extent physical start offset, in bytes
  77  * @len:        Extent length, in bytes
  78  * @flags:      FIEMAP_EXTENT flags that describe this extent
  79  *
  80  * Called from file system ->fiemap callback. Will populate extent
  81  * info as passed in via arguments and copy to user memory. On
  82  * success, extent count on fieinfo is incremented.
  83  *
  84  * Returns 0 on success, -errno on error, 1 if this was the last
  85  * extent that will fit in user array.
  86  */
  87 #define SET_UNKNOWN_FLAGS       (FIEMAP_EXTENT_DELALLOC)
  88 #define SET_NO_UNMOUNTED_IO_FLAGS       (FIEMAP_EXTENT_DATA_ENCRYPTED)
  89 #define SET_NOT_ALIGNED_FLAGS   (FIEMAP_EXTENT_DATA_TAIL|FIEMAP_EXTENT_DATA_INLINE)
  90 int fiemap_fill_next_extent(struct fiemap_extent_info *fieinfo, u64 logical,
  91                             u64 phys, u64 len, u32 flags)
  92 {
  93         struct fiemap_extent extent;
  94         struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
  95 
  96         /* only count the extents */
  97         if (fieinfo->fi_extents_max == 0) {
  98                 fieinfo->fi_extents_mapped++;
  99                 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
 100         }
 101 
 102         if (fieinfo->fi_extents_mapped >= fieinfo->fi_extents_max)
 103                 return 1;
 104 
 105         if (flags & SET_UNKNOWN_FLAGS)
 106                 flags |= FIEMAP_EXTENT_UNKNOWN;
 107         if (flags & SET_NO_UNMOUNTED_IO_FLAGS)
 108                 flags |= FIEMAP_EXTENT_ENCODED;
 109         if (flags & SET_NOT_ALIGNED_FLAGS)
 110                 flags |= FIEMAP_EXTENT_NOT_ALIGNED;
 111 
 112         memset(&extent, 0, sizeof(extent));
 113         extent.fe_logical = logical;
 114         extent.fe_physical = phys;
 115         extent.fe_length = len;
 116         extent.fe_flags = flags;
 117 
 118         dest += fieinfo->fi_extents_mapped;
 119         if (copy_to_user(dest, &extent, sizeof(extent)))
 120                 return -EFAULT;
 121 
 122         fieinfo->fi_extents_mapped++;
 123         if (fieinfo->fi_extents_mapped == fieinfo->fi_extents_max)
 124                 return 1;
 125         return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0;
 126 }
 127 EXPORT_SYMBOL(fiemap_fill_next_extent);
 128 
 129 /**
 130  * fiemap_check_flags - check validity of requested flags for fiemap
 131  * @fieinfo:    Fiemap context passed into ->fiemap
 132  * @fs_flags:   Set of fiemap flags that the file system understands
 133  *
 134  * Called from file system ->fiemap callback. This will compute the
 135  * intersection of valid fiemap flags and those that the fs supports. That
 136  * value is then compared against the user supplied flags. In case of bad user
 137  * flags, the invalid values will be written into the fieinfo structure, and
 138  * -EBADR is returned, which tells ioctl_fiemap() to return those values to
 139  * userspace. For this reason, a return code of -EBADR should be preserved.
 140  *
 141  * Returns 0 on success, -EBADR on bad flags.
 142  */
 143 int fiemap_check_flags(struct fiemap_extent_info *fieinfo, u32 fs_flags)
 144 {
 145         u32 incompat_flags;
 146 
 147         incompat_flags = fieinfo->fi_flags & ~(FIEMAP_FLAGS_COMPAT & fs_flags);
 148         if (incompat_flags) {
 149                 fieinfo->fi_flags = incompat_flags;
 150                 return -EBADR;
 151         }
 152         return 0;
 153 }
 154 EXPORT_SYMBOL(fiemap_check_flags);
 155 
 156 static int fiemap_check_ranges(struct super_block *sb,
 157                                u64 start, u64 len, u64 *new_len)
 158 {
 159         u64 maxbytes = (u64) sb->s_maxbytes;
 160 
 161         *new_len = len;
 162 
 163         if (len == 0)
 164                 return -EINVAL;
 165 
 166         if (start > maxbytes)
 167                 return -EFBIG;
 168 
 169         /*
 170          * Shrink request scope to what the fs can actually handle.
 171          */
 172         if (len > maxbytes || (maxbytes - len) < start)
 173                 *new_len = maxbytes - start;
 174 
 175         return 0;
 176 }
 177 
 178 static int ioctl_fiemap(struct file *filp, unsigned long arg)
 179 {
 180         struct fiemap fiemap;
 181         struct fiemap __user *ufiemap = (struct fiemap __user *) arg;
 182         struct fiemap_extent_info fieinfo = { 0, };
 183         struct inode *inode = file_inode(filp);
 184         struct super_block *sb = inode->i_sb;
 185         u64 len;
 186         int error;
 187 
 188         if (!inode->i_op->fiemap)
 189                 return -EOPNOTSUPP;
 190 
 191         if (copy_from_user(&fiemap, ufiemap, sizeof(fiemap)))
 192                 return -EFAULT;
 193 
 194         if (fiemap.fm_extent_count > FIEMAP_MAX_EXTENTS)
 195                 return -EINVAL;
 196 
 197         error = fiemap_check_ranges(sb, fiemap.fm_start, fiemap.fm_length,
 198                                     &len);
 199         if (error)
 200                 return error;
 201 
 202         fieinfo.fi_flags = fiemap.fm_flags;
 203         fieinfo.fi_extents_max = fiemap.fm_extent_count;
 204         fieinfo.fi_extents_start = ufiemap->fm_extents;
 205 
 206         if (fiemap.fm_extent_count != 0 &&
 207             !access_ok(fieinfo.fi_extents_start,
 208                        fieinfo.fi_extents_max * sizeof(struct fiemap_extent)))
 209                 return -EFAULT;
 210 
 211         if (fieinfo.fi_flags & FIEMAP_FLAG_SYNC)
 212                 filemap_write_and_wait(inode->i_mapping);
 213 
 214         error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
 215         fiemap.fm_flags = fieinfo.fi_flags;
 216         fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
 217         if (copy_to_user(ufiemap, &fiemap, sizeof(fiemap)))
 218                 error = -EFAULT;
 219 
 220         return error;
 221 }
 222 
 223 static long ioctl_file_clone(struct file *dst_file, unsigned long srcfd,
 224                              u64 off, u64 olen, u64 destoff)
 225 {
 226         struct fd src_file = fdget(srcfd);
 227         loff_t cloned;
 228         int ret;
 229 
 230         if (!src_file.file)
 231                 return -EBADF;
 232         ret = -EXDEV;
 233         if (src_file.file->f_path.mnt != dst_file->f_path.mnt)
 234                 goto fdput;
 235         cloned = vfs_clone_file_range(src_file.file, off, dst_file, destoff,
 236                                       olen, 0);
 237         if (cloned < 0)
 238                 ret = cloned;
 239         else if (olen && cloned != olen)
 240                 ret = -EINVAL;
 241         else
 242                 ret = 0;
 243 fdput:
 244         fdput(src_file);
 245         return ret;
 246 }
 247 
 248 static long ioctl_file_clone_range(struct file *file, void __user *argp)
 249 {
 250         struct file_clone_range args;
 251 
 252         if (copy_from_user(&args, argp, sizeof(args)))
 253                 return -EFAULT;
 254         return ioctl_file_clone(file, args.src_fd, args.src_offset,
 255                                 args.src_length, args.dest_offset);
 256 }
 257 
 258 #ifdef CONFIG_BLOCK
 259 
 260 static inline sector_t logical_to_blk(struct inode *inode, loff_t offset)
 261 {
 262         return (offset >> inode->i_blkbits);
 263 }
 264 
 265 static inline loff_t blk_to_logical(struct inode *inode, sector_t blk)
 266 {
 267         return (blk << inode->i_blkbits);
 268 }
 269 
 270 /**
 271  * __generic_block_fiemap - FIEMAP for block based inodes (no locking)
 272  * @inode: the inode to map
 273  * @fieinfo: the fiemap info struct that will be passed back to userspace
 274  * @start: where to start mapping in the inode
 275  * @len: how much space to map
 276  * @get_block: the fs's get_block function
 277  *
 278  * This does FIEMAP for block based inodes.  Basically it will just loop
 279  * through get_block until we hit the number of extents we want to map, or we
 280  * go past the end of the file and hit a hole.
 281  *
 282  * If it is possible to have data blocks beyond a hole past @inode->i_size, then
 283  * please do not use this function, it will stop at the first unmapped block
 284  * beyond i_size.
 285  *
 286  * If you use this function directly, you need to do your own locking. Use
 287  * generic_block_fiemap if you want the locking done for you.
 288  */
 289 
 290 int __generic_block_fiemap(struct inode *inode,
 291                            struct fiemap_extent_info *fieinfo, loff_t start,
 292                            loff_t len, get_block_t *get_block)
 293 {
 294         struct buffer_head map_bh;
 295         sector_t start_blk, last_blk;
 296         loff_t isize = i_size_read(inode);
 297         u64 logical = 0, phys = 0, size = 0;
 298         u32 flags = FIEMAP_EXTENT_MERGED;
 299         bool past_eof = false, whole_file = false;
 300         int ret = 0;
 301 
 302         ret = fiemap_check_flags(fieinfo, FIEMAP_FLAG_SYNC);
 303         if (ret)
 304                 return ret;
 305 
 306         /*
 307          * Either the i_mutex or other appropriate locking needs to be held
 308          * since we expect isize to not change at all through the duration of
 309          * this call.
 310          */
 311         if (len >= isize) {
 312                 whole_file = true;
 313                 len = isize;
 314         }
 315 
 316         /*
 317          * Some filesystems can't deal with being asked to map less than
 318          * blocksize, so make sure our len is at least block length.
 319          */
 320         if (logical_to_blk(inode, len) == 0)
 321                 len = blk_to_logical(inode, 1);
 322 
 323         start_blk = logical_to_blk(inode, start);
 324         last_blk = logical_to_blk(inode, start + len - 1);
 325 
 326         do {
 327                 /*
 328                  * we set b_size to the total size we want so it will map as
 329                  * many contiguous blocks as possible at once
 330                  */
 331                 memset(&map_bh, 0, sizeof(struct buffer_head));
 332                 map_bh.b_size = len;
 333 
 334                 ret = get_block(inode, start_blk, &map_bh, 0);
 335                 if (ret)
 336                         break;
 337 
 338                 /* HOLE */
 339                 if (!buffer_mapped(&map_bh)) {
 340                         start_blk++;
 341 
 342                         /*
 343                          * We want to handle the case where there is an
 344                          * allocated block at the front of the file, and then
 345                          * nothing but holes up to the end of the file properly,
 346                          * to make sure that extent at the front gets properly
 347                          * marked with FIEMAP_EXTENT_LAST
 348                          */
 349                         if (!past_eof &&
 350                             blk_to_logical(inode, start_blk) >= isize)
 351                                 past_eof = 1;
 352 
 353                         /*
 354                          * First hole after going past the EOF, this is our
 355                          * last extent
 356                          */
 357                         if (past_eof && size) {
 358                                 flags = FIEMAP_EXTENT_MERGED|FIEMAP_EXTENT_LAST;
 359                                 ret = fiemap_fill_next_extent(fieinfo, logical,
 360                                                               phys, size,
 361                                                               flags);
 362                         } else if (size) {
 363                                 ret = fiemap_fill_next_extent(fieinfo, logical,
 364                                                               phys, size, flags);
 365                                 size = 0;
 366                         }
 367 
 368                         /* if we have holes up to/past EOF then we're done */
 369                         if (start_blk > last_blk || past_eof || ret)
 370                                 break;
 371                 } else {
 372                         /*
 373                          * We have gone over the length of what we wanted to
 374                          * map, and it wasn't the entire file, so add the extent
 375                          * we got last time and exit.
 376                          *
 377                          * This is for the case where say we want to map all the
 378                          * way up to the second to the last block in a file, but
 379                          * the last block is a hole, making the second to last
 380                          * block FIEMAP_EXTENT_LAST.  In this case we want to
 381                          * see if there is a hole after the second to last block
 382                          * so we can mark it properly.  If we found data after
 383                          * we exceeded the length we were requesting, then we
 384                          * are good to go, just add the extent to the fieinfo
 385                          * and break
 386                          */
 387                         if (start_blk > last_blk && !whole_file) {
 388                                 ret = fiemap_fill_next_extent(fieinfo, logical,
 389                                                               phys, size,
 390                                                               flags);
 391                                 break;
 392                         }
 393 
 394                         /*
 395                          * if size != 0 then we know we already have an extent
 396                          * to add, so add it.
 397                          */
 398                         if (size) {
 399                                 ret = fiemap_fill_next_extent(fieinfo, logical,
 400                                                               phys, size,
 401                                                               flags);
 402                                 if (ret)
 403                                         break;
 404                         }
 405 
 406                         logical = blk_to_logical(inode, start_blk);
 407                         phys = blk_to_logical(inode, map_bh.b_blocknr);
 408                         size = map_bh.b_size;
 409                         flags = FIEMAP_EXTENT_MERGED;
 410 
 411                         start_blk += logical_to_blk(inode, size);
 412 
 413                         /*
 414                          * If we are past the EOF, then we need to make sure as
 415                          * soon as we find a hole that the last extent we found
 416                          * is marked with FIEMAP_EXTENT_LAST
 417                          */
 418                         if (!past_eof && logical + size >= isize)
 419                                 past_eof = true;
 420                 }
 421                 cond_resched();
 422                 if (fatal_signal_pending(current)) {
 423                         ret = -EINTR;
 424                         break;
 425                 }
 426 
 427         } while (1);
 428 
 429         /* If ret is 1 then we just hit the end of the extent array */
 430         if (ret == 1)
 431                 ret = 0;
 432 
 433         return ret;
 434 }
 435 EXPORT_SYMBOL(__generic_block_fiemap);
 436 
 437 /**
 438  * generic_block_fiemap - FIEMAP for block based inodes
 439  * @inode: The inode to map
 440  * @fieinfo: The mapping information
 441  * @start: The initial block to map
 442  * @len: The length of the extect to attempt to map
 443  * @get_block: The block mapping function for the fs
 444  *
 445  * Calls __generic_block_fiemap to map the inode, after taking
 446  * the inode's mutex lock.
 447  */
 448 
 449 int generic_block_fiemap(struct inode *inode,
 450                          struct fiemap_extent_info *fieinfo, u64 start,
 451                          u64 len, get_block_t *get_block)
 452 {
 453         int ret;
 454         inode_lock(inode);
 455         ret = __generic_block_fiemap(inode, fieinfo, start, len, get_block);
 456         inode_unlock(inode);
 457         return ret;
 458 }
 459 EXPORT_SYMBOL(generic_block_fiemap);
 460 
 461 #endif  /*  CONFIG_BLOCK  */
 462 
 463 /*
 464  * This provides compatibility with legacy XFS pre-allocation ioctls
 465  * which predate the fallocate syscall.
 466  *
 467  * Only the l_start, l_len and l_whence fields of the 'struct space_resv'
 468  * are used here, rest are ignored.
 469  */
 470 int ioctl_preallocate(struct file *filp, void __user *argp)
 471 {
 472         struct inode *inode = file_inode(filp);
 473         struct space_resv sr;
 474 
 475         if (copy_from_user(&sr, argp, sizeof(sr)))
 476                 return -EFAULT;
 477 
 478         switch (sr.l_whence) {
 479         case SEEK_SET:
 480                 break;
 481         case SEEK_CUR:
 482                 sr.l_start += filp->f_pos;
 483                 break;
 484         case SEEK_END:
 485                 sr.l_start += i_size_read(inode);
 486                 break;
 487         default:
 488                 return -EINVAL;
 489         }
 490 
 491         return vfs_fallocate(filp, FALLOC_FL_KEEP_SIZE, sr.l_start, sr.l_len);
 492 }
 493 
 494 static int file_ioctl(struct file *filp, unsigned int cmd,
 495                 unsigned long arg)
 496 {
 497         struct inode *inode = file_inode(filp);
 498         int __user *p = (int __user *)arg;
 499 
 500         switch (cmd) {
 501         case FIBMAP:
 502                 return ioctl_fibmap(filp, p);
 503         case FIONREAD:
 504                 return put_user(i_size_read(inode) - filp->f_pos, p);
 505         case FS_IOC_RESVSP:
 506         case FS_IOC_RESVSP64:
 507                 return ioctl_preallocate(filp, p);
 508         }
 509 
 510         return vfs_ioctl(filp, cmd, arg);
 511 }
 512 
 513 static int ioctl_fionbio(struct file *filp, int __user *argp)
 514 {
 515         unsigned int flag;
 516         int on, error;
 517 
 518         error = get_user(on, argp);
 519         if (error)
 520                 return error;
 521         flag = O_NONBLOCK;
 522 #ifdef __sparc__
 523         /* SunOS compatibility item. */
 524         if (O_NONBLOCK != O_NDELAY)
 525                 flag |= O_NDELAY;
 526 #endif
 527         spin_lock(&filp->f_lock);
 528         if (on)
 529                 filp->f_flags |= flag;
 530         else
 531                 filp->f_flags &= ~flag;
 532         spin_unlock(&filp->f_lock);
 533         return error;
 534 }
 535 
 536 static int ioctl_fioasync(unsigned int fd, struct file *filp,
 537                           int __user *argp)
 538 {
 539         unsigned int flag;
 540         int on, error;
 541 
 542         error = get_user(on, argp);
 543         if (error)
 544                 return error;
 545         flag = on ? FASYNC : 0;
 546 
 547         /* Did FASYNC state change ? */
 548         if ((flag ^ filp->f_flags) & FASYNC) {
 549                 if (filp->f_op->fasync)
 550                         /* fasync() adjusts filp->f_flags */
 551                         error = filp->f_op->fasync(fd, filp, on);
 552                 else
 553                         error = -ENOTTY;
 554         }
 555         return error < 0 ? error : 0;
 556 }
 557 
 558 static int ioctl_fsfreeze(struct file *filp)
 559 {
 560         struct super_block *sb = file_inode(filp)->i_sb;
 561 
 562         if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
 563                 return -EPERM;
 564 
 565         /* If filesystem doesn't support freeze feature, return. */
 566         if (sb->s_op->freeze_fs == NULL && sb->s_op->freeze_super == NULL)
 567                 return -EOPNOTSUPP;
 568 
 569         /* Freeze */
 570         if (sb->s_op->freeze_super)
 571                 return sb->s_op->freeze_super(sb);
 572         return freeze_super(sb);
 573 }
 574 
 575 static int ioctl_fsthaw(struct file *filp)
 576 {
 577         struct super_block *sb = file_inode(filp)->i_sb;
 578 
 579         if (!ns_capable(sb->s_user_ns, CAP_SYS_ADMIN))
 580                 return -EPERM;
 581 
 582         /* Thaw */
 583         if (sb->s_op->thaw_super)
 584                 return sb->s_op->thaw_super(sb);
 585         return thaw_super(sb);
 586 }
 587 
 588 static int ioctl_file_dedupe_range(struct file *file, void __user *arg)
 589 {
 590         struct file_dedupe_range __user *argp = arg;
 591         struct file_dedupe_range *same = NULL;
 592         int ret;
 593         unsigned long size;
 594         u16 count;
 595 
 596         if (get_user(count, &argp->dest_count)) {
 597                 ret = -EFAULT;
 598                 goto out;
 599         }
 600 
 601         size = offsetof(struct file_dedupe_range __user, info[count]);
 602         if (size > PAGE_SIZE) {
 603                 ret = -ENOMEM;
 604                 goto out;
 605         }
 606 
 607         same = memdup_user(argp, size);
 608         if (IS_ERR(same)) {
 609                 ret = PTR_ERR(same);
 610                 same = NULL;
 611                 goto out;
 612         }
 613 
 614         same->dest_count = count;
 615         ret = vfs_dedupe_file_range(file, same);
 616         if (ret)
 617                 goto out;
 618 
 619         ret = copy_to_user(argp, same, size);
 620         if (ret)
 621                 ret = -EFAULT;
 622 
 623 out:
 624         kfree(same);
 625         return ret;
 626 }
 627 
 628 /*
 629  * When you add any new common ioctls to the switches above and below
 630  * please update compat_sys_ioctl() too.
 631  *
 632  * do_vfs_ioctl() is not for drivers and not intended to be EXPORT_SYMBOL()'d.
 633  * It's just a simple helper for sys_ioctl and compat_sys_ioctl.
 634  */
 635 int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
 636              unsigned long arg)
 637 {
 638         int error = 0;
 639         int __user *argp = (int __user *)arg;
 640         struct inode *inode = file_inode(filp);
 641 
 642         switch (cmd) {
 643         case FIOCLEX:
 644                 set_close_on_exec(fd, 1);
 645                 break;
 646 
 647         case FIONCLEX:
 648                 set_close_on_exec(fd, 0);
 649                 break;
 650 
 651         case FIONBIO:
 652                 error = ioctl_fionbio(filp, argp);
 653                 break;
 654 
 655         case FIOASYNC:
 656                 error = ioctl_fioasync(fd, filp, argp);
 657                 break;
 658 
 659         case FIOQSIZE:
 660                 if (S_ISDIR(inode->i_mode) || S_ISREG(inode->i_mode) ||
 661                     S_ISLNK(inode->i_mode)) {
 662                         loff_t res = inode_get_bytes(inode);
 663                         error = copy_to_user(argp, &res, sizeof(res)) ?
 664                                         -EFAULT : 0;
 665                 } else
 666                         error = -ENOTTY;
 667                 break;
 668 
 669         case FIFREEZE:
 670                 error = ioctl_fsfreeze(filp);
 671                 break;
 672 
 673         case FITHAW:
 674                 error = ioctl_fsthaw(filp);
 675                 break;
 676 
 677         case FS_IOC_FIEMAP:
 678                 return ioctl_fiemap(filp, arg);
 679 
 680         case FIGETBSZ:
 681                 /* anon_bdev filesystems may not have a block size */
 682                 if (!inode->i_sb->s_blocksize)
 683                         return -EINVAL;
 684                 return put_user(inode->i_sb->s_blocksize, argp);
 685 
 686         case FICLONE:
 687                 return ioctl_file_clone(filp, arg, 0, 0, 0);
 688 
 689         case FICLONERANGE:
 690                 return ioctl_file_clone_range(filp, argp);
 691 
 692         case FIDEDUPERANGE:
 693                 return ioctl_file_dedupe_range(filp, argp);
 694 
 695         default:
 696                 if (S_ISREG(inode->i_mode))
 697                         error = file_ioctl(filp, cmd, arg);
 698                 else
 699                         error = vfs_ioctl(filp, cmd, arg);
 700                 break;
 701         }
 702         return error;
 703 }
 704 
 705 int ksys_ioctl(unsigned int fd, unsigned int cmd, unsigned long arg)
 706 {
 707         int error;
 708         struct fd f = fdget(fd);
 709 
 710         if (!f.file)
 711                 return -EBADF;
 712         error = security_file_ioctl(f.file, cmd, arg);
 713         if (!error)
 714                 error = do_vfs_ioctl(f.file, fd, cmd, arg);
 715         fdput(f);
 716         return error;
 717 }
 718 
 719 SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
 720 {
 721         return ksys_ioctl(fd, cmd, arg);
 722 }
 723 
 724 #ifdef CONFIG_COMPAT
 725 /**
 726  * compat_ptr_ioctl - generic implementation of .compat_ioctl file operation
 727  *
 728  * This is not normally called as a function, but instead set in struct
 729  * file_operations as
 730  *
 731  *     .compat_ioctl = compat_ptr_ioctl,
 732  *
 733  * On most architectures, the compat_ptr_ioctl() just passes all arguments
 734  * to the corresponding ->ioctl handler. The exception is arch/s390, where
 735  * compat_ptr() clears the top bit of a 32-bit pointer value, so user space
 736  * pointers to the second 2GB alias the first 2GB, as is the case for
 737  * native 32-bit s390 user space.
 738  *
 739  * The compat_ptr_ioctl() function must therefore be used only with ioctl
 740  * functions that either ignore the argument or pass a pointer to a
 741  * compatible data type.
 742  *
 743  * If any ioctl command handled by fops->unlocked_ioctl passes a plain
 744  * integer instead of a pointer, or any of the passed data types
 745  * is incompatible between 32-bit and 64-bit architectures, a proper
 746  * handler is required instead of compat_ptr_ioctl.
 747  */
 748 long compat_ptr_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 749 {
 750         if (!file->f_op->unlocked_ioctl)
 751                 return -ENOIOCTLCMD;
 752 
 753         return file->f_op->unlocked_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
 754 }
 755 EXPORT_SYMBOL(compat_ptr_ioctl);
 756 #endif

/* [<][>][^][v][top][bottom][index][help] */