root/fs/udf/super.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. udf_sb_lvidiu
  2. udf_mount
  3. udf_alloc_inode
  4. udf_free_in_core_inode
  5. init_once
  6. init_inodecache
  7. destroy_inodecache
  8. init_udf_fs
  9. exit_udf_fs
  10. udf_sb_alloc_partition_maps
  11. udf_sb_free_bitmap
  12. udf_free_partition
  13. udf_sb_free_partitions
  14. udf_show_options
  15. udf_parse_options
  16. udf_remount_fs
  17. identify_vsd
  18. udf_check_vsd
  19. udf_verify_domain_identifier
  20. udf_load_fileset
  21. udf_find_fileset
  22. udf_load_pvoldesc
  23. udf_find_metadata_inode_efe
  24. udf_load_metadata_files
  25. udf_compute_nr_groups
  26. udf_sb_alloc_bitmap
  27. check_partition_desc
  28. udf_fill_partdesc_info
  29. udf_find_vat_block
  30. udf_load_vat
  31. udf_load_partdesc
  32. udf_load_sparable_map
  33. udf_load_logicalvol
  34. udf_load_logicalvolint
  35. handle_partition_descriptor
  36. get_volume_descriptor_record
  37. udf_process_sequence
  38. udf_load_sequence
  39. udf_check_anchor_block
  40. udf_scan_anchors
  41. udf_find_anchor
  42. udf_load_vrs
  43. udf_finalize_lvid
  44. udf_open_lvid
  45. udf_close_lvid
  46. lvid_get_unique_id
  47. udf_fill_super
  48. _udf_err
  49. _udf_warn
  50. udf_put_super
  51. udf_sync_fs
  52. udf_statfs
  53. udf_count_free_bitmap
  54. udf_count_free_table
  55. udf_count_free

   1 /*
   2  * super.c
   3  *
   4  * PURPOSE
   5  *  Super block routines for the OSTA-UDF(tm) filesystem.
   6  *
   7  * DESCRIPTION
   8  *  OSTA-UDF(tm) = Optical Storage Technology Association
   9  *  Universal Disk Format.
  10  *
  11  *  This code is based on version 2.00 of the UDF specification,
  12  *  and revision 3 of the ECMA 167 standard [equivalent to ISO 13346].
  13  *    http://www.osta.org/
  14  *    http://www.ecma.ch/
  15  *    http://www.iso.org/
  16  *
  17  * COPYRIGHT
  18  *  This file is distributed under the terms of the GNU General Public
  19  *  License (GPL). Copies of the GPL can be obtained from:
  20  *    ftp://prep.ai.mit.edu/pub/gnu/GPL
  21  *  Each contributing author retains all rights to their own work.
  22  *
  23  *  (C) 1998 Dave Boynton
  24  *  (C) 1998-2004 Ben Fennema
  25  *  (C) 2000 Stelias Computing Inc
  26  *
  27  * HISTORY
  28  *
  29  *  09/24/98 dgb  changed to allow compiling outside of kernel, and
  30  *                added some debugging.
  31  *  10/01/98 dgb  updated to allow (some) possibility of compiling w/2.0.34
  32  *  10/16/98      attempting some multi-session support
  33  *  10/17/98      added freespace count for "df"
  34  *  11/11/98 gr   added novrs option
  35  *  11/26/98 dgb  added fileset,anchor mount options
  36  *  12/06/98 blf  really hosed things royally. vat/sparing support. sequenced
  37  *                vol descs. rewrote option handling based on isofs
  38  *  12/20/98      find the free space bitmap (if it exists)
  39  */
  40 
  41 #include "udfdecl.h"
  42 
  43 #include <linux/blkdev.h>
  44 #include <linux/slab.h>
  45 #include <linux/kernel.h>
  46 #include <linux/module.h>
  47 #include <linux/parser.h>
  48 #include <linux/stat.h>
  49 #include <linux/cdrom.h>
  50 #include <linux/nls.h>
  51 #include <linux/vfs.h>
  52 #include <linux/vmalloc.h>
  53 #include <linux/errno.h>
  54 #include <linux/mount.h>
  55 #include <linux/seq_file.h>
  56 #include <linux/bitmap.h>
  57 #include <linux/crc-itu-t.h>
  58 #include <linux/log2.h>
  59 #include <asm/byteorder.h>
  60 
  61 #include "udf_sb.h"
  62 #include "udf_i.h"
  63 
  64 #include <linux/init.h>
  65 #include <linux/uaccess.h>
  66 
  67 enum {
  68         VDS_POS_PRIMARY_VOL_DESC,
  69         VDS_POS_UNALLOC_SPACE_DESC,
  70         VDS_POS_LOGICAL_VOL_DESC,
  71         VDS_POS_IMP_USE_VOL_DESC,
  72         VDS_POS_LENGTH
  73 };
  74 
  75 #define VSD_FIRST_SECTOR_OFFSET         32768
  76 #define VSD_MAX_SECTOR_OFFSET           0x800000
  77 
  78 /*
  79  * Maximum number of Terminating Descriptor / Logical Volume Integrity
  80  * Descriptor redirections. The chosen numbers are arbitrary - just that we
  81  * hopefully don't limit any real use of rewritten inode on write-once media
  82  * but avoid looping for too long on corrupted media.
  83  */
  84 #define UDF_MAX_TD_NESTING 64
  85 #define UDF_MAX_LVID_NESTING 1000
  86 
  87 enum { UDF_MAX_LINKS = 0xffff };
  88 
  89 /* These are the "meat" - everything else is stuffing */
  90 static int udf_fill_super(struct super_block *, void *, int);
  91 static void udf_put_super(struct super_block *);
  92 static int udf_sync_fs(struct super_block *, int);
  93 static int udf_remount_fs(struct super_block *, int *, char *);
  94 static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
  95 static void udf_open_lvid(struct super_block *);
  96 static void udf_close_lvid(struct super_block *);
  97 static unsigned int udf_count_free(struct super_block *);
  98 static int udf_statfs(struct dentry *, struct kstatfs *);
  99 static int udf_show_options(struct seq_file *, struct dentry *);
 100 
 101 struct logicalVolIntegrityDescImpUse *udf_sb_lvidiu(struct super_block *sb)
 102 {
 103         struct logicalVolIntegrityDesc *lvid;
 104         unsigned int partnum;
 105         unsigned int offset;
 106 
 107         if (!UDF_SB(sb)->s_lvid_bh)
 108                 return NULL;
 109         lvid = (struct logicalVolIntegrityDesc *)UDF_SB(sb)->s_lvid_bh->b_data;
 110         partnum = le32_to_cpu(lvid->numOfPartitions);
 111         if ((sb->s_blocksize - sizeof(struct logicalVolIntegrityDescImpUse) -
 112              offsetof(struct logicalVolIntegrityDesc, impUse)) /
 113              (2 * sizeof(uint32_t)) < partnum) {
 114                 udf_err(sb, "Logical volume integrity descriptor corrupted "
 115                         "(numOfPartitions = %u)!\n", partnum);
 116                 return NULL;
 117         }
 118         /* The offset is to skip freeSpaceTable and sizeTable arrays */
 119         offset = partnum * 2 * sizeof(uint32_t);
 120         return (struct logicalVolIntegrityDescImpUse *)&(lvid->impUse[offset]);
 121 }
 122 
 123 /* UDF filesystem type */
 124 static struct dentry *udf_mount(struct file_system_type *fs_type,
 125                       int flags, const char *dev_name, void *data)
 126 {
 127         return mount_bdev(fs_type, flags, dev_name, data, udf_fill_super);
 128 }
 129 
 130 static struct file_system_type udf_fstype = {
 131         .owner          = THIS_MODULE,
 132         .name           = "udf",
 133         .mount          = udf_mount,
 134         .kill_sb        = kill_block_super,
 135         .fs_flags       = FS_REQUIRES_DEV,
 136 };
 137 MODULE_ALIAS_FS("udf");
 138 
 139 static struct kmem_cache *udf_inode_cachep;
 140 
 141 static struct inode *udf_alloc_inode(struct super_block *sb)
 142 {
 143         struct udf_inode_info *ei;
 144         ei = kmem_cache_alloc(udf_inode_cachep, GFP_KERNEL);
 145         if (!ei)
 146                 return NULL;
 147 
 148         ei->i_unique = 0;
 149         ei->i_lenExtents = 0;
 150         ei->i_lenStreams = 0;
 151         ei->i_next_alloc_block = 0;
 152         ei->i_next_alloc_goal = 0;
 153         ei->i_strat4096 = 0;
 154         ei->i_streamdir = 0;
 155         init_rwsem(&ei->i_data_sem);
 156         ei->cached_extent.lstart = -1;
 157         spin_lock_init(&ei->i_extent_cache_lock);
 158 
 159         return &ei->vfs_inode;
 160 }
 161 
 162 static void udf_free_in_core_inode(struct inode *inode)
 163 {
 164         kmem_cache_free(udf_inode_cachep, UDF_I(inode));
 165 }
 166 
 167 static void init_once(void *foo)
 168 {
 169         struct udf_inode_info *ei = (struct udf_inode_info *)foo;
 170 
 171         ei->i_ext.i_data = NULL;
 172         inode_init_once(&ei->vfs_inode);
 173 }
 174 
 175 static int __init init_inodecache(void)
 176 {
 177         udf_inode_cachep = kmem_cache_create("udf_inode_cache",
 178                                              sizeof(struct udf_inode_info),
 179                                              0, (SLAB_RECLAIM_ACCOUNT |
 180                                                  SLAB_MEM_SPREAD |
 181                                                  SLAB_ACCOUNT),
 182                                              init_once);
 183         if (!udf_inode_cachep)
 184                 return -ENOMEM;
 185         return 0;
 186 }
 187 
 188 static void destroy_inodecache(void)
 189 {
 190         /*
 191          * Make sure all delayed rcu free inodes are flushed before we
 192          * destroy cache.
 193          */
 194         rcu_barrier();
 195         kmem_cache_destroy(udf_inode_cachep);
 196 }
 197 
 198 /* Superblock operations */
 199 static const struct super_operations udf_sb_ops = {
 200         .alloc_inode    = udf_alloc_inode,
 201         .free_inode     = udf_free_in_core_inode,
 202         .write_inode    = udf_write_inode,
 203         .evict_inode    = udf_evict_inode,
 204         .put_super      = udf_put_super,
 205         .sync_fs        = udf_sync_fs,
 206         .statfs         = udf_statfs,
 207         .remount_fs     = udf_remount_fs,
 208         .show_options   = udf_show_options,
 209 };
 210 
 211 struct udf_options {
 212         unsigned char novrs;
 213         unsigned int blocksize;
 214         unsigned int session;
 215         unsigned int lastblock;
 216         unsigned int anchor;
 217         unsigned int flags;
 218         umode_t umask;
 219         kgid_t gid;
 220         kuid_t uid;
 221         umode_t fmode;
 222         umode_t dmode;
 223         struct nls_table *nls_map;
 224 };
 225 
 226 static int __init init_udf_fs(void)
 227 {
 228         int err;
 229 
 230         err = init_inodecache();
 231         if (err)
 232                 goto out1;
 233         err = register_filesystem(&udf_fstype);
 234         if (err)
 235                 goto out;
 236 
 237         return 0;
 238 
 239 out:
 240         destroy_inodecache();
 241 
 242 out1:
 243         return err;
 244 }
 245 
 246 static void __exit exit_udf_fs(void)
 247 {
 248         unregister_filesystem(&udf_fstype);
 249         destroy_inodecache();
 250 }
 251 
 252 static int udf_sb_alloc_partition_maps(struct super_block *sb, u32 count)
 253 {
 254         struct udf_sb_info *sbi = UDF_SB(sb);
 255 
 256         sbi->s_partmaps = kcalloc(count, sizeof(*sbi->s_partmaps), GFP_KERNEL);
 257         if (!sbi->s_partmaps) {
 258                 sbi->s_partitions = 0;
 259                 return -ENOMEM;
 260         }
 261 
 262         sbi->s_partitions = count;
 263         return 0;
 264 }
 265 
 266 static void udf_sb_free_bitmap(struct udf_bitmap *bitmap)
 267 {
 268         int i;
 269         int nr_groups = bitmap->s_nr_groups;
 270 
 271         for (i = 0; i < nr_groups; i++)
 272                 brelse(bitmap->s_block_bitmap[i]);
 273 
 274         kvfree(bitmap);
 275 }
 276 
 277 static void udf_free_partition(struct udf_part_map *map)
 278 {
 279         int i;
 280         struct udf_meta_data *mdata;
 281 
 282         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
 283                 iput(map->s_uspace.s_table);
 284         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
 285                 udf_sb_free_bitmap(map->s_uspace.s_bitmap);
 286         if (map->s_partition_type == UDF_SPARABLE_MAP15)
 287                 for (i = 0; i < 4; i++)
 288                         brelse(map->s_type_specific.s_sparing.s_spar_map[i]);
 289         else if (map->s_partition_type == UDF_METADATA_MAP25) {
 290                 mdata = &map->s_type_specific.s_metadata;
 291                 iput(mdata->s_metadata_fe);
 292                 mdata->s_metadata_fe = NULL;
 293 
 294                 iput(mdata->s_mirror_fe);
 295                 mdata->s_mirror_fe = NULL;
 296 
 297                 iput(mdata->s_bitmap_fe);
 298                 mdata->s_bitmap_fe = NULL;
 299         }
 300 }
 301 
 302 static void udf_sb_free_partitions(struct super_block *sb)
 303 {
 304         struct udf_sb_info *sbi = UDF_SB(sb);
 305         int i;
 306 
 307         if (!sbi->s_partmaps)
 308                 return;
 309         for (i = 0; i < sbi->s_partitions; i++)
 310                 udf_free_partition(&sbi->s_partmaps[i]);
 311         kfree(sbi->s_partmaps);
 312         sbi->s_partmaps = NULL;
 313 }
 314 
 315 static int udf_show_options(struct seq_file *seq, struct dentry *root)
 316 {
 317         struct super_block *sb = root->d_sb;
 318         struct udf_sb_info *sbi = UDF_SB(sb);
 319 
 320         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT))
 321                 seq_puts(seq, ",nostrict");
 322         if (UDF_QUERY_FLAG(sb, UDF_FLAG_BLOCKSIZE_SET))
 323                 seq_printf(seq, ",bs=%lu", sb->s_blocksize);
 324         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNHIDE))
 325                 seq_puts(seq, ",unhide");
 326         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UNDELETE))
 327                 seq_puts(seq, ",undelete");
 328         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_USE_AD_IN_ICB))
 329                 seq_puts(seq, ",noadinicb");
 330         if (UDF_QUERY_FLAG(sb, UDF_FLAG_USE_SHORT_AD))
 331                 seq_puts(seq, ",shortad");
 332         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_FORGET))
 333                 seq_puts(seq, ",uid=forget");
 334         if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_FORGET))
 335                 seq_puts(seq, ",gid=forget");
 336         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UID_SET))
 337                 seq_printf(seq, ",uid=%u", from_kuid(&init_user_ns, sbi->s_uid));
 338         if (UDF_QUERY_FLAG(sb, UDF_FLAG_GID_SET))
 339                 seq_printf(seq, ",gid=%u", from_kgid(&init_user_ns, sbi->s_gid));
 340         if (sbi->s_umask != 0)
 341                 seq_printf(seq, ",umask=%ho", sbi->s_umask);
 342         if (sbi->s_fmode != UDF_INVALID_MODE)
 343                 seq_printf(seq, ",mode=%ho", sbi->s_fmode);
 344         if (sbi->s_dmode != UDF_INVALID_MODE)
 345                 seq_printf(seq, ",dmode=%ho", sbi->s_dmode);
 346         if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
 347                 seq_printf(seq, ",session=%d", sbi->s_session);
 348         if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
 349                 seq_printf(seq, ",lastblock=%u", sbi->s_last_block);
 350         if (sbi->s_anchor != 0)
 351                 seq_printf(seq, ",anchor=%u", sbi->s_anchor);
 352         if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8))
 353                 seq_puts(seq, ",utf8");
 354         if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP) && sbi->s_nls_map)
 355                 seq_printf(seq, ",iocharset=%s", sbi->s_nls_map->charset);
 356 
 357         return 0;
 358 }
 359 
 360 /*
 361  * udf_parse_options
 362  *
 363  * PURPOSE
 364  *      Parse mount options.
 365  *
 366  * DESCRIPTION
 367  *      The following mount options are supported:
 368  *
 369  *      gid=            Set the default group.
 370  *      umask=          Set the default umask.
 371  *      mode=           Set the default file permissions.
 372  *      dmode=          Set the default directory permissions.
 373  *      uid=            Set the default user.
 374  *      bs=             Set the block size.
 375  *      unhide          Show otherwise hidden files.
 376  *      undelete        Show deleted files in lists.
 377  *      adinicb         Embed data in the inode (default)
 378  *      noadinicb       Don't embed data in the inode
 379  *      shortad         Use short ad's
 380  *      longad          Use long ad's (default)
 381  *      nostrict        Unset strict conformance
 382  *      iocharset=      Set the NLS character set
 383  *
 384  *      The remaining are for debugging and disaster recovery:
 385  *
 386  *      novrs           Skip volume sequence recognition
 387  *
 388  *      The following expect a offset from 0.
 389  *
 390  *      session=        Set the CDROM session (default= last session)
 391  *      anchor=         Override standard anchor location. (default= 256)
 392  *      volume=         Override the VolumeDesc location. (unused)
 393  *      partition=      Override the PartitionDesc location. (unused)
 394  *      lastblock=      Set the last block of the filesystem/
 395  *
 396  *      The following expect a offset from the partition root.
 397  *
 398  *      fileset=        Override the fileset block location. (unused)
 399  *      rootdir=        Override the root directory location. (unused)
 400  *              WARNING: overriding the rootdir to a non-directory may
 401  *              yield highly unpredictable results.
 402  *
 403  * PRE-CONDITIONS
 404  *      options         Pointer to mount options string.
 405  *      uopts           Pointer to mount options variable.
 406  *
 407  * POST-CONDITIONS
 408  *      <return>        1       Mount options parsed okay.
 409  *      <return>        0       Error parsing mount options.
 410  *
 411  * HISTORY
 412  *      July 1, 1997 - Andrew E. Mileski
 413  *      Written, tested, and released.
 414  */
 415 
 416 enum {
 417         Opt_novrs, Opt_nostrict, Opt_bs, Opt_unhide, Opt_undelete,
 418         Opt_noadinicb, Opt_adinicb, Opt_shortad, Opt_longad,
 419         Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
 420         Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
 421         Opt_rootdir, Opt_utf8, Opt_iocharset,
 422         Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
 423         Opt_fmode, Opt_dmode
 424 };
 425 
 426 static const match_table_t tokens = {
 427         {Opt_novrs,     "novrs"},
 428         {Opt_nostrict,  "nostrict"},
 429         {Opt_bs,        "bs=%u"},
 430         {Opt_unhide,    "unhide"},
 431         {Opt_undelete,  "undelete"},
 432         {Opt_noadinicb, "noadinicb"},
 433         {Opt_adinicb,   "adinicb"},
 434         {Opt_shortad,   "shortad"},
 435         {Opt_longad,    "longad"},
 436         {Opt_uforget,   "uid=forget"},
 437         {Opt_uignore,   "uid=ignore"},
 438         {Opt_gforget,   "gid=forget"},
 439         {Opt_gignore,   "gid=ignore"},
 440         {Opt_gid,       "gid=%u"},
 441         {Opt_uid,       "uid=%u"},
 442         {Opt_umask,     "umask=%o"},
 443         {Opt_session,   "session=%u"},
 444         {Opt_lastblock, "lastblock=%u"},
 445         {Opt_anchor,    "anchor=%u"},
 446         {Opt_volume,    "volume=%u"},
 447         {Opt_partition, "partition=%u"},
 448         {Opt_fileset,   "fileset=%u"},
 449         {Opt_rootdir,   "rootdir=%u"},
 450         {Opt_utf8,      "utf8"},
 451         {Opt_iocharset, "iocharset=%s"},
 452         {Opt_fmode,     "mode=%o"},
 453         {Opt_dmode,     "dmode=%o"},
 454         {Opt_err,       NULL}
 455 };
 456 
 457 static int udf_parse_options(char *options, struct udf_options *uopt,
 458                              bool remount)
 459 {
 460         char *p;
 461         int option;
 462 
 463         uopt->novrs = 0;
 464         uopt->session = 0xFFFFFFFF;
 465         uopt->lastblock = 0;
 466         uopt->anchor = 0;
 467 
 468         if (!options)
 469                 return 1;
 470 
 471         while ((p = strsep(&options, ",")) != NULL) {
 472                 substring_t args[MAX_OPT_ARGS];
 473                 int token;
 474                 unsigned n;
 475                 if (!*p)
 476                         continue;
 477 
 478                 token = match_token(p, tokens, args);
 479                 switch (token) {
 480                 case Opt_novrs:
 481                         uopt->novrs = 1;
 482                         break;
 483                 case Opt_bs:
 484                         if (match_int(&args[0], &option))
 485                                 return 0;
 486                         n = option;
 487                         if (n != 512 && n != 1024 && n != 2048 && n != 4096)
 488                                 return 0;
 489                         uopt->blocksize = n;
 490                         uopt->flags |= (1 << UDF_FLAG_BLOCKSIZE_SET);
 491                         break;
 492                 case Opt_unhide:
 493                         uopt->flags |= (1 << UDF_FLAG_UNHIDE);
 494                         break;
 495                 case Opt_undelete:
 496                         uopt->flags |= (1 << UDF_FLAG_UNDELETE);
 497                         break;
 498                 case Opt_noadinicb:
 499                         uopt->flags &= ~(1 << UDF_FLAG_USE_AD_IN_ICB);
 500                         break;
 501                 case Opt_adinicb:
 502                         uopt->flags |= (1 << UDF_FLAG_USE_AD_IN_ICB);
 503                         break;
 504                 case Opt_shortad:
 505                         uopt->flags |= (1 << UDF_FLAG_USE_SHORT_AD);
 506                         break;
 507                 case Opt_longad:
 508                         uopt->flags &= ~(1 << UDF_FLAG_USE_SHORT_AD);
 509                         break;
 510                 case Opt_gid:
 511                         if (match_int(args, &option))
 512                                 return 0;
 513                         uopt->gid = make_kgid(current_user_ns(), option);
 514                         if (!gid_valid(uopt->gid))
 515                                 return 0;
 516                         uopt->flags |= (1 << UDF_FLAG_GID_SET);
 517                         break;
 518                 case Opt_uid:
 519                         if (match_int(args, &option))
 520                                 return 0;
 521                         uopt->uid = make_kuid(current_user_ns(), option);
 522                         if (!uid_valid(uopt->uid))
 523                                 return 0;
 524                         uopt->flags |= (1 << UDF_FLAG_UID_SET);
 525                         break;
 526                 case Opt_umask:
 527                         if (match_octal(args, &option))
 528                                 return 0;
 529                         uopt->umask = option;
 530                         break;
 531                 case Opt_nostrict:
 532                         uopt->flags &= ~(1 << UDF_FLAG_STRICT);
 533                         break;
 534                 case Opt_session:
 535                         if (match_int(args, &option))
 536                                 return 0;
 537                         uopt->session = option;
 538                         if (!remount)
 539                                 uopt->flags |= (1 << UDF_FLAG_SESSION_SET);
 540                         break;
 541                 case Opt_lastblock:
 542                         if (match_int(args, &option))
 543                                 return 0;
 544                         uopt->lastblock = option;
 545                         if (!remount)
 546                                 uopt->flags |= (1 << UDF_FLAG_LASTBLOCK_SET);
 547                         break;
 548                 case Opt_anchor:
 549                         if (match_int(args, &option))
 550                                 return 0;
 551                         uopt->anchor = option;
 552                         break;
 553                 case Opt_volume:
 554                 case Opt_partition:
 555                 case Opt_fileset:
 556                 case Opt_rootdir:
 557                         /* Ignored (never implemented properly) */
 558                         break;
 559                 case Opt_utf8:
 560                         uopt->flags |= (1 << UDF_FLAG_UTF8);
 561                         break;
 562                 case Opt_iocharset:
 563                         if (!remount) {
 564                                 if (uopt->nls_map)
 565                                         unload_nls(uopt->nls_map);
 566                                 /*
 567                                  * load_nls() failure is handled later in
 568                                  * udf_fill_super() after all options are
 569                                  * parsed.
 570                                  */
 571                                 uopt->nls_map = load_nls(args[0].from);
 572                                 uopt->flags |= (1 << UDF_FLAG_NLS_MAP);
 573                         }
 574                         break;
 575                 case Opt_uforget:
 576                         uopt->flags |= (1 << UDF_FLAG_UID_FORGET);
 577                         break;
 578                 case Opt_uignore:
 579                 case Opt_gignore:
 580                         /* These options are superseeded by uid=<number> */
 581                         break;
 582                 case Opt_gforget:
 583                         uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
 584                         break;
 585                 case Opt_fmode:
 586                         if (match_octal(args, &option))
 587                                 return 0;
 588                         uopt->fmode = option & 0777;
 589                         break;
 590                 case Opt_dmode:
 591                         if (match_octal(args, &option))
 592                                 return 0;
 593                         uopt->dmode = option & 0777;
 594                         break;
 595                 default:
 596                         pr_err("bad mount option \"%s\" or missing value\n", p);
 597                         return 0;
 598                 }
 599         }
 600         return 1;
 601 }
 602 
 603 static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
 604 {
 605         struct udf_options uopt;
 606         struct udf_sb_info *sbi = UDF_SB(sb);
 607         int error = 0;
 608 
 609         if (!(*flags & SB_RDONLY) && UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
 610                 return -EACCES;
 611 
 612         sync_filesystem(sb);
 613 
 614         uopt.flags = sbi->s_flags;
 615         uopt.uid   = sbi->s_uid;
 616         uopt.gid   = sbi->s_gid;
 617         uopt.umask = sbi->s_umask;
 618         uopt.fmode = sbi->s_fmode;
 619         uopt.dmode = sbi->s_dmode;
 620         uopt.nls_map = NULL;
 621 
 622         if (!udf_parse_options(options, &uopt, true))
 623                 return -EINVAL;
 624 
 625         write_lock(&sbi->s_cred_lock);
 626         sbi->s_flags = uopt.flags;
 627         sbi->s_uid   = uopt.uid;
 628         sbi->s_gid   = uopt.gid;
 629         sbi->s_umask = uopt.umask;
 630         sbi->s_fmode = uopt.fmode;
 631         sbi->s_dmode = uopt.dmode;
 632         write_unlock(&sbi->s_cred_lock);
 633 
 634         if ((bool)(*flags & SB_RDONLY) == sb_rdonly(sb))
 635                 goto out_unlock;
 636 
 637         if (*flags & SB_RDONLY)
 638                 udf_close_lvid(sb);
 639         else
 640                 udf_open_lvid(sb);
 641 
 642 out_unlock:
 643         return error;
 644 }
 645 
 646 /*
 647  * Check VSD descriptor. Returns -1 in case we are at the end of volume
 648  * recognition area, 0 if the descriptor is valid but non-interesting, 1 if
 649  * we found one of NSR descriptors we are looking for.
 650  */
 651 static int identify_vsd(const struct volStructDesc *vsd)
 652 {
 653         int ret = 0;
 654 
 655         if (!memcmp(vsd->stdIdent, VSD_STD_ID_CD001, VSD_STD_ID_LEN)) {
 656                 switch (vsd->structType) {
 657                 case 0:
 658                         udf_debug("ISO9660 Boot Record found\n");
 659                         break;
 660                 case 1:
 661                         udf_debug("ISO9660 Primary Volume Descriptor found\n");
 662                         break;
 663                 case 2:
 664                         udf_debug("ISO9660 Supplementary Volume Descriptor found\n");
 665                         break;
 666                 case 3:
 667                         udf_debug("ISO9660 Volume Partition Descriptor found\n");
 668                         break;
 669                 case 255:
 670                         udf_debug("ISO9660 Volume Descriptor Set Terminator found\n");
 671                         break;
 672                 default:
 673                         udf_debug("ISO9660 VRS (%u) found\n", vsd->structType);
 674                         break;
 675                 }
 676         } else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BEA01, VSD_STD_ID_LEN))
 677                 ; /* ret = 0 */
 678         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR02, VSD_STD_ID_LEN))
 679                 ret = 1;
 680         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_NSR03, VSD_STD_ID_LEN))
 681                 ret = 1;
 682         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_BOOT2, VSD_STD_ID_LEN))
 683                 ; /* ret = 0 */
 684         else if (!memcmp(vsd->stdIdent, VSD_STD_ID_CDW02, VSD_STD_ID_LEN))
 685                 ; /* ret = 0 */
 686         else {
 687                 /* TEA01 or invalid id : end of volume recognition area */
 688                 ret = -1;
 689         }
 690 
 691         return ret;
 692 }
 693 
 694 /*
 695  * Check Volume Structure Descriptors (ECMA 167 2/9.1)
 696  * We also check any "CD-ROM Volume Descriptor Set" (ECMA 167 2/8.3.1)
 697  * @return   1 if NSR02 or NSR03 found,
 698  *          -1 if first sector read error, 0 otherwise
 699  */
 700 static int udf_check_vsd(struct super_block *sb)
 701 {
 702         struct volStructDesc *vsd = NULL;
 703         loff_t sector = VSD_FIRST_SECTOR_OFFSET;
 704         int sectorsize;
 705         struct buffer_head *bh = NULL;
 706         int nsr = 0;
 707         struct udf_sb_info *sbi;
 708 
 709         sbi = UDF_SB(sb);
 710         if (sb->s_blocksize < sizeof(struct volStructDesc))
 711                 sectorsize = sizeof(struct volStructDesc);
 712         else
 713                 sectorsize = sb->s_blocksize;
 714 
 715         sector += (((loff_t)sbi->s_session) << sb->s_blocksize_bits);
 716 
 717         udf_debug("Starting at sector %u (%lu byte sectors)\n",
 718                   (unsigned int)(sector >> sb->s_blocksize_bits),
 719                   sb->s_blocksize);
 720         /* Process the sequence (if applicable). The hard limit on the sector
 721          * offset is arbitrary, hopefully large enough so that all valid UDF
 722          * filesystems will be recognised. There is no mention of an upper
 723          * bound to the size of the volume recognition area in the standard.
 724          *  The limit will prevent the code to read all the sectors of a
 725          * specially crafted image (like a bluray disc full of CD001 sectors),
 726          * potentially causing minutes or even hours of uninterruptible I/O
 727          * activity. This actually happened with uninitialised SSD partitions
 728          * (all 0xFF) before the check for the limit and all valid IDs were
 729          * added */
 730         for (; !nsr && sector < VSD_MAX_SECTOR_OFFSET; sector += sectorsize) {
 731                 /* Read a block */
 732                 bh = udf_tread(sb, sector >> sb->s_blocksize_bits);
 733                 if (!bh)
 734                         break;
 735 
 736                 vsd = (struct volStructDesc *)(bh->b_data +
 737                                               (sector & (sb->s_blocksize - 1)));
 738                 nsr = identify_vsd(vsd);
 739                 /* Found NSR or end? */
 740                 if (nsr) {
 741                         brelse(bh);
 742                         break;
 743                 }
 744                 /*
 745                  * Special handling for improperly formatted VRS (e.g., Win10)
 746                  * where components are separated by 2048 bytes even though
 747                  * sectors are 4K
 748                  */
 749                 if (sb->s_blocksize == 4096) {
 750                         nsr = identify_vsd(vsd + 1);
 751                         /* Ignore unknown IDs... */
 752                         if (nsr < 0)
 753                                 nsr = 0;
 754                 }
 755                 brelse(bh);
 756         }
 757 
 758         if (nsr > 0)
 759                 return 1;
 760         else if (!bh && sector - (sbi->s_session << sb->s_blocksize_bits) ==
 761                         VSD_FIRST_SECTOR_OFFSET)
 762                 return -1;
 763         else
 764                 return 0;
 765 }
 766 
 767 static int udf_verify_domain_identifier(struct super_block *sb,
 768                                         struct regid *ident, char *dname)
 769 {
 770         struct domainEntityIDSuffix *suffix;
 771 
 772         if (memcmp(ident->ident, UDF_ID_COMPLIANT, strlen(UDF_ID_COMPLIANT))) {
 773                 udf_warn(sb, "Not OSTA UDF compliant %s descriptor.\n", dname);
 774                 goto force_ro;
 775         }
 776         if (ident->flags & (1 << ENTITYID_FLAGS_DIRTY)) {
 777                 udf_warn(sb, "Possibly not OSTA UDF compliant %s descriptor.\n",
 778                          dname);
 779                 goto force_ro;
 780         }
 781         suffix = (struct domainEntityIDSuffix *)ident->identSuffix;
 782         if (suffix->flags & (1 << ENTITYIDSUFFIX_FLAGS_HARDWRITEPROTECT) ||
 783             suffix->flags & (1 << ENTITYIDSUFFIX_FLAGS_SOFTWRITEPROTECT)) {
 784                 if (!sb_rdonly(sb)) {
 785                         udf_warn(sb, "Descriptor for %s marked write protected."
 786                                  " Forcing read only mount.\n", dname);
 787                 }
 788                 goto force_ro;
 789         }
 790         return 0;
 791 
 792 force_ro:
 793         if (!sb_rdonly(sb))
 794                 return -EACCES;
 795         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
 796         return 0;
 797 }
 798 
 799 static int udf_load_fileset(struct super_block *sb, struct fileSetDesc *fset,
 800                             struct kernel_lb_addr *root)
 801 {
 802         int ret;
 803 
 804         ret = udf_verify_domain_identifier(sb, &fset->domainIdent, "file set");
 805         if (ret < 0)
 806                 return ret;
 807 
 808         *root = lelb_to_cpu(fset->rootDirectoryICB.extLocation);
 809         UDF_SB(sb)->s_serial_number = le16_to_cpu(fset->descTag.tagSerialNum);
 810 
 811         udf_debug("Rootdir at block=%u, partition=%u\n",
 812                   root->logicalBlockNum, root->partitionReferenceNum);
 813         return 0;
 814 }
 815 
 816 static int udf_find_fileset(struct super_block *sb,
 817                             struct kernel_lb_addr *fileset,
 818                             struct kernel_lb_addr *root)
 819 {
 820         struct buffer_head *bh = NULL;
 821         uint16_t ident;
 822         int ret;
 823 
 824         if (fileset->logicalBlockNum == 0xFFFFFFFF &&
 825             fileset->partitionReferenceNum == 0xFFFF)
 826                 return -EINVAL;
 827 
 828         bh = udf_read_ptagged(sb, fileset, 0, &ident);
 829         if (!bh)
 830                 return -EIO;
 831         if (ident != TAG_IDENT_FSD) {
 832                 brelse(bh);
 833                 return -EINVAL;
 834         }
 835 
 836         udf_debug("Fileset at block=%u, partition=%u\n",
 837                   fileset->logicalBlockNum, fileset->partitionReferenceNum);
 838 
 839         UDF_SB(sb)->s_partition = fileset->partitionReferenceNum;
 840         ret = udf_load_fileset(sb, (struct fileSetDesc *)bh->b_data, root);
 841         brelse(bh);
 842         return ret;
 843 }
 844 
 845 /*
 846  * Load primary Volume Descriptor Sequence
 847  *
 848  * Return <0 on error, 0 on success. -EAGAIN is special meaning next sequence
 849  * should be tried.
 850  */
 851 static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
 852 {
 853         struct primaryVolDesc *pvoldesc;
 854         uint8_t *outstr;
 855         struct buffer_head *bh;
 856         uint16_t ident;
 857         int ret = -ENOMEM;
 858         struct timestamp *ts;
 859 
 860         outstr = kmalloc(128, GFP_NOFS);
 861         if (!outstr)
 862                 return -ENOMEM;
 863 
 864         bh = udf_read_tagged(sb, block, block, &ident);
 865         if (!bh) {
 866                 ret = -EAGAIN;
 867                 goto out2;
 868         }
 869 
 870         if (ident != TAG_IDENT_PVD) {
 871                 ret = -EIO;
 872                 goto out_bh;
 873         }
 874 
 875         pvoldesc = (struct primaryVolDesc *)bh->b_data;
 876 
 877         udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
 878                               pvoldesc->recordingDateAndTime);
 879         ts = &pvoldesc->recordingDateAndTime;
 880         udf_debug("recording time %04u/%02u/%02u %02u:%02u (%x)\n",
 881                   le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
 882                   ts->minute, le16_to_cpu(ts->typeAndTimezone));
 883 
 884         ret = udf_dstrCS0toChar(sb, outstr, 31, pvoldesc->volIdent, 32);
 885         if (ret < 0) {
 886                 strcpy(UDF_SB(sb)->s_volume_ident, "InvalidName");
 887                 pr_warn("incorrect volume identification, setting to "
 888                         "'InvalidName'\n");
 889         } else {
 890                 strncpy(UDF_SB(sb)->s_volume_ident, outstr, ret);
 891         }
 892         udf_debug("volIdent[] = '%s'\n", UDF_SB(sb)->s_volume_ident);
 893 
 894         ret = udf_dstrCS0toChar(sb, outstr, 127, pvoldesc->volSetIdent, 128);
 895         if (ret < 0) {
 896                 ret = 0;
 897                 goto out_bh;
 898         }
 899         outstr[ret] = 0;
 900         udf_debug("volSetIdent[] = '%s'\n", outstr);
 901 
 902         ret = 0;
 903 out_bh:
 904         brelse(bh);
 905 out2:
 906         kfree(outstr);
 907         return ret;
 908 }
 909 
 910 struct inode *udf_find_metadata_inode_efe(struct super_block *sb,
 911                                         u32 meta_file_loc, u32 partition_ref)
 912 {
 913         struct kernel_lb_addr addr;
 914         struct inode *metadata_fe;
 915 
 916         addr.logicalBlockNum = meta_file_loc;
 917         addr.partitionReferenceNum = partition_ref;
 918 
 919         metadata_fe = udf_iget_special(sb, &addr);
 920 
 921         if (IS_ERR(metadata_fe)) {
 922                 udf_warn(sb, "metadata inode efe not found\n");
 923                 return metadata_fe;
 924         }
 925         if (UDF_I(metadata_fe)->i_alloc_type != ICBTAG_FLAG_AD_SHORT) {
 926                 udf_warn(sb, "metadata inode efe does not have short allocation descriptors!\n");
 927                 iput(metadata_fe);
 928                 return ERR_PTR(-EIO);
 929         }
 930 
 931         return metadata_fe;
 932 }
 933 
 934 static int udf_load_metadata_files(struct super_block *sb, int partition,
 935                                    int type1_index)
 936 {
 937         struct udf_sb_info *sbi = UDF_SB(sb);
 938         struct udf_part_map *map;
 939         struct udf_meta_data *mdata;
 940         struct kernel_lb_addr addr;
 941         struct inode *fe;
 942 
 943         map = &sbi->s_partmaps[partition];
 944         mdata = &map->s_type_specific.s_metadata;
 945         mdata->s_phys_partition_ref = type1_index;
 946 
 947         /* metadata address */
 948         udf_debug("Metadata file location: block = %u part = %u\n",
 949                   mdata->s_meta_file_loc, mdata->s_phys_partition_ref);
 950 
 951         fe = udf_find_metadata_inode_efe(sb, mdata->s_meta_file_loc,
 952                                          mdata->s_phys_partition_ref);
 953         if (IS_ERR(fe)) {
 954                 /* mirror file entry */
 955                 udf_debug("Mirror metadata file location: block = %u part = %u\n",
 956                           mdata->s_mirror_file_loc, mdata->s_phys_partition_ref);
 957 
 958                 fe = udf_find_metadata_inode_efe(sb, mdata->s_mirror_file_loc,
 959                                                  mdata->s_phys_partition_ref);
 960 
 961                 if (IS_ERR(fe)) {
 962                         udf_err(sb, "Both metadata and mirror metadata inode efe can not found\n");
 963                         return PTR_ERR(fe);
 964                 }
 965                 mdata->s_mirror_fe = fe;
 966         } else
 967                 mdata->s_metadata_fe = fe;
 968 
 969 
 970         /*
 971          * bitmap file entry
 972          * Note:
 973          * Load only if bitmap file location differs from 0xFFFFFFFF (DCN-5102)
 974         */
 975         if (mdata->s_bitmap_file_loc != 0xFFFFFFFF) {
 976                 addr.logicalBlockNum = mdata->s_bitmap_file_loc;
 977                 addr.partitionReferenceNum = mdata->s_phys_partition_ref;
 978 
 979                 udf_debug("Bitmap file location: block = %u part = %u\n",
 980                           addr.logicalBlockNum, addr.partitionReferenceNum);
 981 
 982                 fe = udf_iget_special(sb, &addr);
 983                 if (IS_ERR(fe)) {
 984                         if (sb_rdonly(sb))
 985                                 udf_warn(sb, "bitmap inode efe not found but it's ok since the disc is mounted read-only\n");
 986                         else {
 987                                 udf_err(sb, "bitmap inode efe not found and attempted read-write mount\n");
 988                                 return PTR_ERR(fe);
 989                         }
 990                 } else
 991                         mdata->s_bitmap_fe = fe;
 992         }
 993 
 994         udf_debug("udf_load_metadata_files Ok\n");
 995         return 0;
 996 }
 997 
 998 int udf_compute_nr_groups(struct super_block *sb, u32 partition)
 999 {
1000         struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
1001         return DIV_ROUND_UP(map->s_partition_len +
1002                             (sizeof(struct spaceBitmapDesc) << 3),
1003                             sb->s_blocksize * 8);
1004 }
1005 
1006 static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
1007 {
1008         struct udf_bitmap *bitmap;
1009         int nr_groups;
1010         int size;
1011 
1012         nr_groups = udf_compute_nr_groups(sb, index);
1013         size = sizeof(struct udf_bitmap) +
1014                 (sizeof(struct buffer_head *) * nr_groups);
1015 
1016         if (size <= PAGE_SIZE)
1017                 bitmap = kzalloc(size, GFP_KERNEL);
1018         else
1019                 bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
1020 
1021         if (!bitmap)
1022                 return NULL;
1023 
1024         bitmap->s_nr_groups = nr_groups;
1025         return bitmap;
1026 }
1027 
1028 static int check_partition_desc(struct super_block *sb,
1029                                 struct partitionDesc *p,
1030                                 struct udf_part_map *map)
1031 {
1032         bool umap, utable, fmap, ftable;
1033         struct partitionHeaderDesc *phd;
1034 
1035         switch (le32_to_cpu(p->accessType)) {
1036         case PD_ACCESS_TYPE_READ_ONLY:
1037         case PD_ACCESS_TYPE_WRITE_ONCE:
1038         case PD_ACCESS_TYPE_NONE:
1039                 goto force_ro;
1040         }
1041 
1042         /* No Partition Header Descriptor? */
1043         if (strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR02) &&
1044             strcmp(p->partitionContents.ident, PD_PARTITION_CONTENTS_NSR03))
1045                 goto force_ro;
1046 
1047         phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1048         utable = phd->unallocSpaceTable.extLength;
1049         umap = phd->unallocSpaceBitmap.extLength;
1050         ftable = phd->freedSpaceTable.extLength;
1051         fmap = phd->freedSpaceBitmap.extLength;
1052 
1053         /* No allocation info? */
1054         if (!utable && !umap && !ftable && !fmap)
1055                 goto force_ro;
1056 
1057         /* We don't support blocks that require erasing before overwrite */
1058         if (ftable || fmap)
1059                 goto force_ro;
1060         /* UDF 2.60: 2.3.3 - no mixing of tables & bitmaps, no VAT. */
1061         if (utable && umap)
1062                 goto force_ro;
1063 
1064         if (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1065             map->s_partition_type == UDF_VIRTUAL_MAP20)
1066                 goto force_ro;
1067 
1068         return 0;
1069 force_ro:
1070         if (!sb_rdonly(sb))
1071                 return -EACCES;
1072         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1073         return 0;
1074 }
1075 
1076 static int udf_fill_partdesc_info(struct super_block *sb,
1077                 struct partitionDesc *p, int p_index)
1078 {
1079         struct udf_part_map *map;
1080         struct udf_sb_info *sbi = UDF_SB(sb);
1081         struct partitionHeaderDesc *phd;
1082         int err;
1083 
1084         map = &sbi->s_partmaps[p_index];
1085 
1086         map->s_partition_len = le32_to_cpu(p->partitionLength); /* blocks */
1087         map->s_partition_root = le32_to_cpu(p->partitionStartingLocation);
1088 
1089         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_READ_ONLY))
1090                 map->s_partition_flags |= UDF_PART_FLAG_READ_ONLY;
1091         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_WRITE_ONCE))
1092                 map->s_partition_flags |= UDF_PART_FLAG_WRITE_ONCE;
1093         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_REWRITABLE))
1094                 map->s_partition_flags |= UDF_PART_FLAG_REWRITABLE;
1095         if (p->accessType == cpu_to_le32(PD_ACCESS_TYPE_OVERWRITABLE))
1096                 map->s_partition_flags |= UDF_PART_FLAG_OVERWRITABLE;
1097 
1098         udf_debug("Partition (%d type %x) starts at physical %u, block length %u\n",
1099                   p_index, map->s_partition_type,
1100                   map->s_partition_root, map->s_partition_len);
1101 
1102         err = check_partition_desc(sb, p, map);
1103         if (err)
1104                 return err;
1105 
1106         /*
1107          * Skip loading allocation info it we cannot ever write to the fs.
1108          * This is a correctness thing as we may have decided to force ro mount
1109          * to avoid allocation info we don't support.
1110          */
1111         if (UDF_QUERY_FLAG(sb, UDF_FLAG_RW_INCOMPAT))
1112                 return 0;
1113 
1114         phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
1115         if (phd->unallocSpaceTable.extLength) {
1116                 struct kernel_lb_addr loc = {
1117                         .logicalBlockNum = le32_to_cpu(
1118                                 phd->unallocSpaceTable.extPosition),
1119                         .partitionReferenceNum = p_index,
1120                 };
1121                 struct inode *inode;
1122 
1123                 inode = udf_iget_special(sb, &loc);
1124                 if (IS_ERR(inode)) {
1125                         udf_debug("cannot load unallocSpaceTable (part %d)\n",
1126                                   p_index);
1127                         return PTR_ERR(inode);
1128                 }
1129                 map->s_uspace.s_table = inode;
1130                 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_TABLE;
1131                 udf_debug("unallocSpaceTable (part %d) @ %lu\n",
1132                           p_index, map->s_uspace.s_table->i_ino);
1133         }
1134 
1135         if (phd->unallocSpaceBitmap.extLength) {
1136                 struct udf_bitmap *bitmap = udf_sb_alloc_bitmap(sb, p_index);
1137                 if (!bitmap)
1138                         return -ENOMEM;
1139                 map->s_uspace.s_bitmap = bitmap;
1140                 bitmap->s_extPosition = le32_to_cpu(
1141                                 phd->unallocSpaceBitmap.extPosition);
1142                 map->s_partition_flags |= UDF_PART_FLAG_UNALLOC_BITMAP;
1143                 udf_debug("unallocSpaceBitmap (part %d) @ %u\n",
1144                           p_index, bitmap->s_extPosition);
1145         }
1146 
1147         return 0;
1148 }
1149 
1150 static void udf_find_vat_block(struct super_block *sb, int p_index,
1151                                int type1_index, sector_t start_block)
1152 {
1153         struct udf_sb_info *sbi = UDF_SB(sb);
1154         struct udf_part_map *map = &sbi->s_partmaps[p_index];
1155         sector_t vat_block;
1156         struct kernel_lb_addr ino;
1157         struct inode *inode;
1158 
1159         /*
1160          * VAT file entry is in the last recorded block. Some broken disks have
1161          * it a few blocks before so try a bit harder...
1162          */
1163         ino.partitionReferenceNum = type1_index;
1164         for (vat_block = start_block;
1165              vat_block >= map->s_partition_root &&
1166              vat_block >= start_block - 3; vat_block--) {
1167                 ino.logicalBlockNum = vat_block - map->s_partition_root;
1168                 inode = udf_iget_special(sb, &ino);
1169                 if (!IS_ERR(inode)) {
1170                         sbi->s_vat_inode = inode;
1171                         break;
1172                 }
1173         }
1174 }
1175 
1176 static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
1177 {
1178         struct udf_sb_info *sbi = UDF_SB(sb);
1179         struct udf_part_map *map = &sbi->s_partmaps[p_index];
1180         struct buffer_head *bh = NULL;
1181         struct udf_inode_info *vati;
1182         uint32_t pos;
1183         struct virtualAllocationTable20 *vat20;
1184         sector_t blocks = i_size_read(sb->s_bdev->bd_inode) >>
1185                           sb->s_blocksize_bits;
1186 
1187         udf_find_vat_block(sb, p_index, type1_index, sbi->s_last_block);
1188         if (!sbi->s_vat_inode &&
1189             sbi->s_last_block != blocks - 1) {
1190                 pr_notice("Failed to read VAT inode from the last recorded block (%lu), retrying with the last block of the device (%lu).\n",
1191                           (unsigned long)sbi->s_last_block,
1192                           (unsigned long)blocks - 1);
1193                 udf_find_vat_block(sb, p_index, type1_index, blocks - 1);
1194         }
1195         if (!sbi->s_vat_inode)
1196                 return -EIO;
1197 
1198         if (map->s_partition_type == UDF_VIRTUAL_MAP15) {
1199                 map->s_type_specific.s_virtual.s_start_offset = 0;
1200                 map->s_type_specific.s_virtual.s_num_entries =
1201                         (sbi->s_vat_inode->i_size - 36) >> 2;
1202         } else if (map->s_partition_type == UDF_VIRTUAL_MAP20) {
1203                 vati = UDF_I(sbi->s_vat_inode);
1204                 if (vati->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
1205                         pos = udf_block_map(sbi->s_vat_inode, 0);
1206                         bh = sb_bread(sb, pos);
1207                         if (!bh)
1208                                 return -EIO;
1209                         vat20 = (struct virtualAllocationTable20 *)bh->b_data;
1210                 } else {
1211                         vat20 = (struct virtualAllocationTable20 *)
1212                                                         vati->i_ext.i_data;
1213                 }
1214 
1215                 map->s_type_specific.s_virtual.s_start_offset =
1216                         le16_to_cpu(vat20->lengthHeader);
1217                 map->s_type_specific.s_virtual.s_num_entries =
1218                         (sbi->s_vat_inode->i_size -
1219                                 map->s_type_specific.s_virtual.
1220                                         s_start_offset) >> 2;
1221                 brelse(bh);
1222         }
1223         return 0;
1224 }
1225 
1226 /*
1227  * Load partition descriptor block
1228  *
1229  * Returns <0 on error, 0 on success, -EAGAIN is special - try next descriptor
1230  * sequence.
1231  */
1232 static int udf_load_partdesc(struct super_block *sb, sector_t block)
1233 {
1234         struct buffer_head *bh;
1235         struct partitionDesc *p;
1236         struct udf_part_map *map;
1237         struct udf_sb_info *sbi = UDF_SB(sb);
1238         int i, type1_idx;
1239         uint16_t partitionNumber;
1240         uint16_t ident;
1241         int ret;
1242 
1243         bh = udf_read_tagged(sb, block, block, &ident);
1244         if (!bh)
1245                 return -EAGAIN;
1246         if (ident != TAG_IDENT_PD) {
1247                 ret = 0;
1248                 goto out_bh;
1249         }
1250 
1251         p = (struct partitionDesc *)bh->b_data;
1252         partitionNumber = le16_to_cpu(p->partitionNumber);
1253 
1254         /* First scan for TYPE1 and SPARABLE partitions */
1255         for (i = 0; i < sbi->s_partitions; i++) {
1256                 map = &sbi->s_partmaps[i];
1257                 udf_debug("Searching map: (%u == %u)\n",
1258                           map->s_partition_num, partitionNumber);
1259                 if (map->s_partition_num == partitionNumber &&
1260                     (map->s_partition_type == UDF_TYPE1_MAP15 ||
1261                      map->s_partition_type == UDF_SPARABLE_MAP15))
1262                         break;
1263         }
1264 
1265         if (i >= sbi->s_partitions) {
1266                 udf_debug("Partition (%u) not found in partition map\n",
1267                           partitionNumber);
1268                 ret = 0;
1269                 goto out_bh;
1270         }
1271 
1272         ret = udf_fill_partdesc_info(sb, p, i);
1273         if (ret < 0)
1274                 goto out_bh;
1275 
1276         /*
1277          * Now rescan for VIRTUAL or METADATA partitions when SPARABLE and
1278          * PHYSICAL partitions are already set up
1279          */
1280         type1_idx = i;
1281         map = NULL; /* supress 'maybe used uninitialized' warning */
1282         for (i = 0; i < sbi->s_partitions; i++) {
1283                 map = &sbi->s_partmaps[i];
1284 
1285                 if (map->s_partition_num == partitionNumber &&
1286                     (map->s_partition_type == UDF_VIRTUAL_MAP15 ||
1287                      map->s_partition_type == UDF_VIRTUAL_MAP20 ||
1288                      map->s_partition_type == UDF_METADATA_MAP25))
1289                         break;
1290         }
1291 
1292         if (i >= sbi->s_partitions) {
1293                 ret = 0;
1294                 goto out_bh;
1295         }
1296 
1297         ret = udf_fill_partdesc_info(sb, p, i);
1298         if (ret < 0)
1299                 goto out_bh;
1300 
1301         if (map->s_partition_type == UDF_METADATA_MAP25) {
1302                 ret = udf_load_metadata_files(sb, i, type1_idx);
1303                 if (ret < 0) {
1304                         udf_err(sb, "error loading MetaData partition map %d\n",
1305                                 i);
1306                         goto out_bh;
1307                 }
1308         } else {
1309                 /*
1310                  * If we have a partition with virtual map, we don't handle
1311                  * writing to it (we overwrite blocks instead of relocating
1312                  * them).
1313                  */
1314                 if (!sb_rdonly(sb)) {
1315                         ret = -EACCES;
1316                         goto out_bh;
1317                 }
1318                 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1319                 ret = udf_load_vat(sb, i, type1_idx);
1320                 if (ret < 0)
1321                         goto out_bh;
1322         }
1323         ret = 0;
1324 out_bh:
1325         /* In case loading failed, we handle cleanup in udf_fill_super */
1326         brelse(bh);
1327         return ret;
1328 }
1329 
1330 static int udf_load_sparable_map(struct super_block *sb,
1331                                  struct udf_part_map *map,
1332                                  struct sparablePartitionMap *spm)
1333 {
1334         uint32_t loc;
1335         uint16_t ident;
1336         struct sparingTable *st;
1337         struct udf_sparing_data *sdata = &map->s_type_specific.s_sparing;
1338         int i;
1339         struct buffer_head *bh;
1340 
1341         map->s_partition_type = UDF_SPARABLE_MAP15;
1342         sdata->s_packet_len = le16_to_cpu(spm->packetLength);
1343         if (!is_power_of_2(sdata->s_packet_len)) {
1344                 udf_err(sb, "error loading logical volume descriptor: "
1345                         "Invalid packet length %u\n",
1346                         (unsigned)sdata->s_packet_len);
1347                 return -EIO;
1348         }
1349         if (spm->numSparingTables > 4) {
1350                 udf_err(sb, "error loading logical volume descriptor: "
1351                         "Too many sparing tables (%d)\n",
1352                         (int)spm->numSparingTables);
1353                 return -EIO;
1354         }
1355 
1356         for (i = 0; i < spm->numSparingTables; i++) {
1357                 loc = le32_to_cpu(spm->locSparingTable[i]);
1358                 bh = udf_read_tagged(sb, loc, loc, &ident);
1359                 if (!bh)
1360                         continue;
1361 
1362                 st = (struct sparingTable *)bh->b_data;
1363                 if (ident != 0 ||
1364                     strncmp(st->sparingIdent.ident, UDF_ID_SPARING,
1365                             strlen(UDF_ID_SPARING)) ||
1366                     sizeof(*st) + le16_to_cpu(st->reallocationTableLen) >
1367                                                         sb->s_blocksize) {
1368                         brelse(bh);
1369                         continue;
1370                 }
1371 
1372                 sdata->s_spar_map[i] = bh;
1373         }
1374         map->s_partition_func = udf_get_pblock_spar15;
1375         return 0;
1376 }
1377 
1378 static int udf_load_logicalvol(struct super_block *sb, sector_t block,
1379                                struct kernel_lb_addr *fileset)
1380 {
1381         struct logicalVolDesc *lvd;
1382         int i, offset;
1383         uint8_t type;
1384         struct udf_sb_info *sbi = UDF_SB(sb);
1385         struct genericPartitionMap *gpm;
1386         uint16_t ident;
1387         struct buffer_head *bh;
1388         unsigned int table_len;
1389         int ret;
1390 
1391         bh = udf_read_tagged(sb, block, block, &ident);
1392         if (!bh)
1393                 return -EAGAIN;
1394         BUG_ON(ident != TAG_IDENT_LVD);
1395         lvd = (struct logicalVolDesc *)bh->b_data;
1396         table_len = le32_to_cpu(lvd->mapTableLength);
1397         if (table_len > sb->s_blocksize - sizeof(*lvd)) {
1398                 udf_err(sb, "error loading logical volume descriptor: "
1399                         "Partition table too long (%u > %lu)\n", table_len,
1400                         sb->s_blocksize - sizeof(*lvd));
1401                 ret = -EIO;
1402                 goto out_bh;
1403         }
1404 
1405         ret = udf_verify_domain_identifier(sb, &lvd->domainIdent,
1406                                            "logical volume");
1407         if (ret)
1408                 goto out_bh;
1409         ret = udf_sb_alloc_partition_maps(sb, le32_to_cpu(lvd->numPartitionMaps));
1410         if (ret)
1411                 goto out_bh;
1412 
1413         for (i = 0, offset = 0;
1414              i < sbi->s_partitions && offset < table_len;
1415              i++, offset += gpm->partitionMapLength) {
1416                 struct udf_part_map *map = &sbi->s_partmaps[i];
1417                 gpm = (struct genericPartitionMap *)
1418                                 &(lvd->partitionMaps[offset]);
1419                 type = gpm->partitionMapType;
1420                 if (type == 1) {
1421                         struct genericPartitionMap1 *gpm1 =
1422                                 (struct genericPartitionMap1 *)gpm;
1423                         map->s_partition_type = UDF_TYPE1_MAP15;
1424                         map->s_volumeseqnum = le16_to_cpu(gpm1->volSeqNum);
1425                         map->s_partition_num = le16_to_cpu(gpm1->partitionNum);
1426                         map->s_partition_func = NULL;
1427                 } else if (type == 2) {
1428                         struct udfPartitionMap2 *upm2 =
1429                                                 (struct udfPartitionMap2 *)gpm;
1430                         if (!strncmp(upm2->partIdent.ident, UDF_ID_VIRTUAL,
1431                                                 strlen(UDF_ID_VIRTUAL))) {
1432                                 u16 suf =
1433                                         le16_to_cpu(((__le16 *)upm2->partIdent.
1434                                                         identSuffix)[0]);
1435                                 if (suf < 0x0200) {
1436                                         map->s_partition_type =
1437                                                         UDF_VIRTUAL_MAP15;
1438                                         map->s_partition_func =
1439                                                         udf_get_pblock_virt15;
1440                                 } else {
1441                                         map->s_partition_type =
1442                                                         UDF_VIRTUAL_MAP20;
1443                                         map->s_partition_func =
1444                                                         udf_get_pblock_virt20;
1445                                 }
1446                         } else if (!strncmp(upm2->partIdent.ident,
1447                                                 UDF_ID_SPARABLE,
1448                                                 strlen(UDF_ID_SPARABLE))) {
1449                                 ret = udf_load_sparable_map(sb, map,
1450                                         (struct sparablePartitionMap *)gpm);
1451                                 if (ret < 0)
1452                                         goto out_bh;
1453                         } else if (!strncmp(upm2->partIdent.ident,
1454                                                 UDF_ID_METADATA,
1455                                                 strlen(UDF_ID_METADATA))) {
1456                                 struct udf_meta_data *mdata =
1457                                         &map->s_type_specific.s_metadata;
1458                                 struct metadataPartitionMap *mdm =
1459                                                 (struct metadataPartitionMap *)
1460                                                 &(lvd->partitionMaps[offset]);
1461                                 udf_debug("Parsing Logical vol part %d type %u  id=%s\n",
1462                                           i, type, UDF_ID_METADATA);
1463 
1464                                 map->s_partition_type = UDF_METADATA_MAP25;
1465                                 map->s_partition_func = udf_get_pblock_meta25;
1466 
1467                                 mdata->s_meta_file_loc   =
1468                                         le32_to_cpu(mdm->metadataFileLoc);
1469                                 mdata->s_mirror_file_loc =
1470                                         le32_to_cpu(mdm->metadataMirrorFileLoc);
1471                                 mdata->s_bitmap_file_loc =
1472                                         le32_to_cpu(mdm->metadataBitmapFileLoc);
1473                                 mdata->s_alloc_unit_size =
1474                                         le32_to_cpu(mdm->allocUnitSize);
1475                                 mdata->s_align_unit_size =
1476                                         le16_to_cpu(mdm->alignUnitSize);
1477                                 if (mdm->flags & 0x01)
1478                                         mdata->s_flags |= MF_DUPLICATE_MD;
1479 
1480                                 udf_debug("Metadata Ident suffix=0x%x\n",
1481                                           le16_to_cpu(*(__le16 *)
1482                                                       mdm->partIdent.identSuffix));
1483                                 udf_debug("Metadata part num=%u\n",
1484                                           le16_to_cpu(mdm->partitionNum));
1485                                 udf_debug("Metadata part alloc unit size=%u\n",
1486                                           le32_to_cpu(mdm->allocUnitSize));
1487                                 udf_debug("Metadata file loc=%u\n",
1488                                           le32_to_cpu(mdm->metadataFileLoc));
1489                                 udf_debug("Mirror file loc=%u\n",
1490                                           le32_to_cpu(mdm->metadataMirrorFileLoc));
1491                                 udf_debug("Bitmap file loc=%u\n",
1492                                           le32_to_cpu(mdm->metadataBitmapFileLoc));
1493                                 udf_debug("Flags: %d %u\n",
1494                                           mdata->s_flags, mdm->flags);
1495                         } else {
1496                                 udf_debug("Unknown ident: %s\n",
1497                                           upm2->partIdent.ident);
1498                                 continue;
1499                         }
1500                         map->s_volumeseqnum = le16_to_cpu(upm2->volSeqNum);
1501                         map->s_partition_num = le16_to_cpu(upm2->partitionNum);
1502                 }
1503                 udf_debug("Partition (%d:%u) type %u on volume %u\n",
1504                           i, map->s_partition_num, type, map->s_volumeseqnum);
1505         }
1506 
1507         if (fileset) {
1508                 struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
1509 
1510                 *fileset = lelb_to_cpu(la->extLocation);
1511                 udf_debug("FileSet found in LogicalVolDesc at block=%u, partition=%u\n",
1512                           fileset->logicalBlockNum,
1513                           fileset->partitionReferenceNum);
1514         }
1515         if (lvd->integritySeqExt.extLength)
1516                 udf_load_logicalvolint(sb, leea_to_cpu(lvd->integritySeqExt));
1517         ret = 0;
1518 
1519         if (!sbi->s_lvid_bh) {
1520                 /* We can't generate unique IDs without a valid LVID */
1521                 if (sb_rdonly(sb)) {
1522                         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
1523                 } else {
1524                         udf_warn(sb, "Damaged or missing LVID, forcing "
1525                                      "readonly mount\n");
1526                         ret = -EACCES;
1527                 }
1528         }
1529 out_bh:
1530         brelse(bh);
1531         return ret;
1532 }
1533 
1534 /*
1535  * Find the prevailing Logical Volume Integrity Descriptor.
1536  */
1537 static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
1538 {
1539         struct buffer_head *bh, *final_bh;
1540         uint16_t ident;
1541         struct udf_sb_info *sbi = UDF_SB(sb);
1542         struct logicalVolIntegrityDesc *lvid;
1543         int indirections = 0;
1544 
1545         while (++indirections <= UDF_MAX_LVID_NESTING) {
1546                 final_bh = NULL;
1547                 while (loc.extLength > 0 &&
1548                         (bh = udf_read_tagged(sb, loc.extLocation,
1549                                         loc.extLocation, &ident))) {
1550                         if (ident != TAG_IDENT_LVID) {
1551                                 brelse(bh);
1552                                 break;
1553                         }
1554 
1555                         brelse(final_bh);
1556                         final_bh = bh;
1557 
1558                         loc.extLength -= sb->s_blocksize;
1559                         loc.extLocation++;
1560                 }
1561 
1562                 if (!final_bh)
1563                         return;
1564 
1565                 brelse(sbi->s_lvid_bh);
1566                 sbi->s_lvid_bh = final_bh;
1567 
1568                 lvid = (struct logicalVolIntegrityDesc *)final_bh->b_data;
1569                 if (lvid->nextIntegrityExt.extLength == 0)
1570                         return;
1571 
1572                 loc = leea_to_cpu(lvid->nextIntegrityExt);
1573         }
1574 
1575         udf_warn(sb, "Too many LVID indirections (max %u), ignoring.\n",
1576                 UDF_MAX_LVID_NESTING);
1577         brelse(sbi->s_lvid_bh);
1578         sbi->s_lvid_bh = NULL;
1579 }
1580 
1581 /*
1582  * Step for reallocation of table of partition descriptor sequence numbers.
1583  * Must be power of 2.
1584  */
1585 #define PART_DESC_ALLOC_STEP 32
1586 
1587 struct part_desc_seq_scan_data {
1588         struct udf_vds_record rec;
1589         u32 partnum;
1590 };
1591 
1592 struct desc_seq_scan_data {
1593         struct udf_vds_record vds[VDS_POS_LENGTH];
1594         unsigned int size_part_descs;
1595         unsigned int num_part_descs;
1596         struct part_desc_seq_scan_data *part_descs_loc;
1597 };
1598 
1599 static struct udf_vds_record *handle_partition_descriptor(
1600                                 struct buffer_head *bh,
1601                                 struct desc_seq_scan_data *data)
1602 {
1603         struct partitionDesc *desc = (struct partitionDesc *)bh->b_data;
1604         int partnum;
1605         int i;
1606 
1607         partnum = le16_to_cpu(desc->partitionNumber);
1608         for (i = 0; i < data->num_part_descs; i++)
1609                 if (partnum == data->part_descs_loc[i].partnum)
1610                         return &(data->part_descs_loc[i].rec);
1611         if (data->num_part_descs >= data->size_part_descs) {
1612                 struct part_desc_seq_scan_data *new_loc;
1613                 unsigned int new_size = ALIGN(partnum, PART_DESC_ALLOC_STEP);
1614 
1615                 new_loc = kcalloc(new_size, sizeof(*new_loc), GFP_KERNEL);
1616                 if (!new_loc)
1617                         return ERR_PTR(-ENOMEM);
1618                 memcpy(new_loc, data->part_descs_loc,
1619                        data->size_part_descs * sizeof(*new_loc));
1620                 kfree(data->part_descs_loc);
1621                 data->part_descs_loc = new_loc;
1622                 data->size_part_descs = new_size;
1623         }
1624         return &(data->part_descs_loc[data->num_part_descs++].rec);
1625 }
1626 
1627 
1628 static struct udf_vds_record *get_volume_descriptor_record(uint16_t ident,
1629                 struct buffer_head *bh, struct desc_seq_scan_data *data)
1630 {
1631         switch (ident) {
1632         case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1633                 return &(data->vds[VDS_POS_PRIMARY_VOL_DESC]);
1634         case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1635                 return &(data->vds[VDS_POS_IMP_USE_VOL_DESC]);
1636         case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1637                 return &(data->vds[VDS_POS_LOGICAL_VOL_DESC]);
1638         case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1639                 return &(data->vds[VDS_POS_UNALLOC_SPACE_DESC]);
1640         case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1641                 return handle_partition_descriptor(bh, data);
1642         }
1643         return NULL;
1644 }
1645 
1646 /*
1647  * Process a main/reserve volume descriptor sequence.
1648  *   @block             First block of first extent of the sequence.
1649  *   @lastblock         Lastblock of first extent of the sequence.
1650  *   @fileset           There we store extent containing root fileset
1651  *
1652  * Returns <0 on error, 0 on success. -EAGAIN is special - try next descriptor
1653  * sequence
1654  */
1655 static noinline int udf_process_sequence(
1656                 struct super_block *sb,
1657                 sector_t block, sector_t lastblock,
1658                 struct kernel_lb_addr *fileset)
1659 {
1660         struct buffer_head *bh = NULL;
1661         struct udf_vds_record *curr;
1662         struct generic_desc *gd;
1663         struct volDescPtr *vdp;
1664         bool done = false;
1665         uint32_t vdsn;
1666         uint16_t ident;
1667         int ret;
1668         unsigned int indirections = 0;
1669         struct desc_seq_scan_data data;
1670         unsigned int i;
1671 
1672         memset(data.vds, 0, sizeof(struct udf_vds_record) * VDS_POS_LENGTH);
1673         data.size_part_descs = PART_DESC_ALLOC_STEP;
1674         data.num_part_descs = 0;
1675         data.part_descs_loc = kcalloc(data.size_part_descs,
1676                                       sizeof(*data.part_descs_loc),
1677                                       GFP_KERNEL);
1678         if (!data.part_descs_loc)
1679                 return -ENOMEM;
1680 
1681         /*
1682          * Read the main descriptor sequence and find which descriptors
1683          * are in it.
1684          */
1685         for (; (!done && block <= lastblock); block++) {
1686                 bh = udf_read_tagged(sb, block, block, &ident);
1687                 if (!bh)
1688                         break;
1689 
1690                 /* Process each descriptor (ISO 13346 3/8.3-8.4) */
1691                 gd = (struct generic_desc *)bh->b_data;
1692                 vdsn = le32_to_cpu(gd->volDescSeqNum);
1693                 switch (ident) {
1694                 case TAG_IDENT_VDP: /* ISO 13346 3/10.3 */
1695                         if (++indirections > UDF_MAX_TD_NESTING) {
1696                                 udf_err(sb, "too many Volume Descriptor "
1697                                         "Pointers (max %u supported)\n",
1698                                         UDF_MAX_TD_NESTING);
1699                                 brelse(bh);
1700                                 return -EIO;
1701                         }
1702 
1703                         vdp = (struct volDescPtr *)bh->b_data;
1704                         block = le32_to_cpu(vdp->nextVolDescSeqExt.extLocation);
1705                         lastblock = le32_to_cpu(
1706                                 vdp->nextVolDescSeqExt.extLength) >>
1707                                 sb->s_blocksize_bits;
1708                         lastblock += block - 1;
1709                         /* For loop is going to increment 'block' again */
1710                         block--;
1711                         break;
1712                 case TAG_IDENT_PVD: /* ISO 13346 3/10.1 */
1713                 case TAG_IDENT_IUVD: /* ISO 13346 3/10.4 */
1714                 case TAG_IDENT_LVD: /* ISO 13346 3/10.6 */
1715                 case TAG_IDENT_USD: /* ISO 13346 3/10.8 */
1716                 case TAG_IDENT_PD: /* ISO 13346 3/10.5 */
1717                         curr = get_volume_descriptor_record(ident, bh, &data);
1718                         if (IS_ERR(curr)) {
1719                                 brelse(bh);
1720                                 return PTR_ERR(curr);
1721                         }
1722                         /* Descriptor we don't care about? */
1723                         if (!curr)
1724                                 break;
1725                         if (vdsn >= curr->volDescSeqNum) {
1726                                 curr->volDescSeqNum = vdsn;
1727                                 curr->block = block;
1728                         }
1729                         break;
1730                 case TAG_IDENT_TD: /* ISO 13346 3/10.9 */
1731                         done = true;
1732                         break;
1733                 }
1734                 brelse(bh);
1735         }
1736         /*
1737          * Now read interesting descriptors again and process them
1738          * in a suitable order
1739          */
1740         if (!data.vds[VDS_POS_PRIMARY_VOL_DESC].block) {
1741                 udf_err(sb, "Primary Volume Descriptor not found!\n");
1742                 return -EAGAIN;
1743         }
1744         ret = udf_load_pvoldesc(sb, data.vds[VDS_POS_PRIMARY_VOL_DESC].block);
1745         if (ret < 0)
1746                 return ret;
1747 
1748         if (data.vds[VDS_POS_LOGICAL_VOL_DESC].block) {
1749                 ret = udf_load_logicalvol(sb,
1750                                 data.vds[VDS_POS_LOGICAL_VOL_DESC].block,
1751                                 fileset);
1752                 if (ret < 0)
1753                         return ret;
1754         }
1755 
1756         /* Now handle prevailing Partition Descriptors */
1757         for (i = 0; i < data.num_part_descs; i++) {
1758                 ret = udf_load_partdesc(sb, data.part_descs_loc[i].rec.block);
1759                 if (ret < 0)
1760                         return ret;
1761         }
1762 
1763         return 0;
1764 }
1765 
1766 /*
1767  * Load Volume Descriptor Sequence described by anchor in bh
1768  *
1769  * Returns <0 on error, 0 on success
1770  */
1771 static int udf_load_sequence(struct super_block *sb, struct buffer_head *bh,
1772                              struct kernel_lb_addr *fileset)
1773 {
1774         struct anchorVolDescPtr *anchor;
1775         sector_t main_s, main_e, reserve_s, reserve_e;
1776         int ret;
1777 
1778         anchor = (struct anchorVolDescPtr *)bh->b_data;
1779 
1780         /* Locate the main sequence */
1781         main_s = le32_to_cpu(anchor->mainVolDescSeqExt.extLocation);
1782         main_e = le32_to_cpu(anchor->mainVolDescSeqExt.extLength);
1783         main_e = main_e >> sb->s_blocksize_bits;
1784         main_e += main_s - 1;
1785 
1786         /* Locate the reserve sequence */
1787         reserve_s = le32_to_cpu(anchor->reserveVolDescSeqExt.extLocation);
1788         reserve_e = le32_to_cpu(anchor->reserveVolDescSeqExt.extLength);
1789         reserve_e = reserve_e >> sb->s_blocksize_bits;
1790         reserve_e += reserve_s - 1;
1791 
1792         /* Process the main & reserve sequences */
1793         /* responsible for finding the PartitionDesc(s) */
1794         ret = udf_process_sequence(sb, main_s, main_e, fileset);
1795         if (ret != -EAGAIN)
1796                 return ret;
1797         udf_sb_free_partitions(sb);
1798         ret = udf_process_sequence(sb, reserve_s, reserve_e, fileset);
1799         if (ret < 0) {
1800                 udf_sb_free_partitions(sb);
1801                 /* No sequence was OK, return -EIO */
1802                 if (ret == -EAGAIN)
1803                         ret = -EIO;
1804         }
1805         return ret;
1806 }
1807 
1808 /*
1809  * Check whether there is an anchor block in the given block and
1810  * load Volume Descriptor Sequence if so.
1811  *
1812  * Returns <0 on error, 0 on success, -EAGAIN is special - try next anchor
1813  * block
1814  */
1815 static int udf_check_anchor_block(struct super_block *sb, sector_t block,
1816                                   struct kernel_lb_addr *fileset)
1817 {
1818         struct buffer_head *bh;
1819         uint16_t ident;
1820         int ret;
1821 
1822         if (UDF_QUERY_FLAG(sb, UDF_FLAG_VARCONV) &&
1823             udf_fixed_to_variable(block) >=
1824             i_size_read(sb->s_bdev->bd_inode) >> sb->s_blocksize_bits)
1825                 return -EAGAIN;
1826 
1827         bh = udf_read_tagged(sb, block, block, &ident);
1828         if (!bh)
1829                 return -EAGAIN;
1830         if (ident != TAG_IDENT_AVDP) {
1831                 brelse(bh);
1832                 return -EAGAIN;
1833         }
1834         ret = udf_load_sequence(sb, bh, fileset);
1835         brelse(bh);
1836         return ret;
1837 }
1838 
1839 /*
1840  * Search for an anchor volume descriptor pointer.
1841  *
1842  * Returns < 0 on error, 0 on success. -EAGAIN is special - try next set
1843  * of anchors.
1844  */
1845 static int udf_scan_anchors(struct super_block *sb, sector_t *lastblock,
1846                             struct kernel_lb_addr *fileset)
1847 {
1848         sector_t last[6];
1849         int i;
1850         struct udf_sb_info *sbi = UDF_SB(sb);
1851         int last_count = 0;
1852         int ret;
1853 
1854         /* First try user provided anchor */
1855         if (sbi->s_anchor) {
1856                 ret = udf_check_anchor_block(sb, sbi->s_anchor, fileset);
1857                 if (ret != -EAGAIN)
1858                         return ret;
1859         }
1860         /*
1861          * according to spec, anchor is in either:
1862          *     block 256
1863          *     lastblock-256
1864          *     lastblock
1865          *  however, if the disc isn't closed, it could be 512.
1866          */
1867         ret = udf_check_anchor_block(sb, sbi->s_session + 256, fileset);
1868         if (ret != -EAGAIN)
1869                 return ret;
1870         /*
1871          * The trouble is which block is the last one. Drives often misreport
1872          * this so we try various possibilities.
1873          */
1874         last[last_count++] = *lastblock;
1875         if (*lastblock >= 1)
1876                 last[last_count++] = *lastblock - 1;
1877         last[last_count++] = *lastblock + 1;
1878         if (*lastblock >= 2)
1879                 last[last_count++] = *lastblock - 2;
1880         if (*lastblock >= 150)
1881                 last[last_count++] = *lastblock - 150;
1882         if (*lastblock >= 152)
1883                 last[last_count++] = *lastblock - 152;
1884 
1885         for (i = 0; i < last_count; i++) {
1886                 if (last[i] >= i_size_read(sb->s_bdev->bd_inode) >>
1887                                 sb->s_blocksize_bits)
1888                         continue;
1889                 ret = udf_check_anchor_block(sb, last[i], fileset);
1890                 if (ret != -EAGAIN) {
1891                         if (!ret)
1892                                 *lastblock = last[i];
1893                         return ret;
1894                 }
1895                 if (last[i] < 256)
1896                         continue;
1897                 ret = udf_check_anchor_block(sb, last[i] - 256, fileset);
1898                 if (ret != -EAGAIN) {
1899                         if (!ret)
1900                                 *lastblock = last[i];
1901                         return ret;
1902                 }
1903         }
1904 
1905         /* Finally try block 512 in case media is open */
1906         return udf_check_anchor_block(sb, sbi->s_session + 512, fileset);
1907 }
1908 
1909 /*
1910  * Find an anchor volume descriptor and load Volume Descriptor Sequence from
1911  * area specified by it. The function expects sbi->s_lastblock to be the last
1912  * block on the media.
1913  *
1914  * Return <0 on error, 0 if anchor found. -EAGAIN is special meaning anchor
1915  * was not found.
1916  */
1917 static int udf_find_anchor(struct super_block *sb,
1918                            struct kernel_lb_addr *fileset)
1919 {
1920         struct udf_sb_info *sbi = UDF_SB(sb);
1921         sector_t lastblock = sbi->s_last_block;
1922         int ret;
1923 
1924         ret = udf_scan_anchors(sb, &lastblock, fileset);
1925         if (ret != -EAGAIN)
1926                 goto out;
1927 
1928         /* No anchor found? Try VARCONV conversion of block numbers */
1929         UDF_SET_FLAG(sb, UDF_FLAG_VARCONV);
1930         lastblock = udf_variable_to_fixed(sbi->s_last_block);
1931         /* Firstly, we try to not convert number of the last block */
1932         ret = udf_scan_anchors(sb, &lastblock, fileset);
1933         if (ret != -EAGAIN)
1934                 goto out;
1935 
1936         lastblock = sbi->s_last_block;
1937         /* Secondly, we try with converted number of the last block */
1938         ret = udf_scan_anchors(sb, &lastblock, fileset);
1939         if (ret < 0) {
1940                 /* VARCONV didn't help. Clear it. */
1941                 UDF_CLEAR_FLAG(sb, UDF_FLAG_VARCONV);
1942         }
1943 out:
1944         if (ret == 0)
1945                 sbi->s_last_block = lastblock;
1946         return ret;
1947 }
1948 
1949 /*
1950  * Check Volume Structure Descriptor, find Anchor block and load Volume
1951  * Descriptor Sequence.
1952  *
1953  * Returns < 0 on error, 0 on success. -EAGAIN is special meaning anchor
1954  * block was not found.
1955  */
1956 static int udf_load_vrs(struct super_block *sb, struct udf_options *uopt,
1957                         int silent, struct kernel_lb_addr *fileset)
1958 {
1959         struct udf_sb_info *sbi = UDF_SB(sb);
1960         int nsr = 0;
1961         int ret;
1962 
1963         if (!sb_set_blocksize(sb, uopt->blocksize)) {
1964                 if (!silent)
1965                         udf_warn(sb, "Bad block size\n");
1966                 return -EINVAL;
1967         }
1968         sbi->s_last_block = uopt->lastblock;
1969         if (!uopt->novrs) {
1970                 /* Check that it is NSR02 compliant */
1971                 nsr = udf_check_vsd(sb);
1972                 if (!nsr) {
1973                         if (!silent)
1974                                 udf_warn(sb, "No VRS found\n");
1975                         return -EINVAL;
1976                 }
1977                 if (nsr == -1)
1978                         udf_debug("Failed to read sector at offset %d. "
1979                                   "Assuming open disc. Skipping validity "
1980                                   "check\n", VSD_FIRST_SECTOR_OFFSET);
1981                 if (!sbi->s_last_block)
1982                         sbi->s_last_block = udf_get_last_block(sb);
1983         } else {
1984                 udf_debug("Validity check skipped because of novrs option\n");
1985         }
1986 
1987         /* Look for anchor block and load Volume Descriptor Sequence */
1988         sbi->s_anchor = uopt->anchor;
1989         ret = udf_find_anchor(sb, fileset);
1990         if (ret < 0) {
1991                 if (!silent && ret == -EAGAIN)
1992                         udf_warn(sb, "No anchor found\n");
1993                 return ret;
1994         }
1995         return 0;
1996 }
1997 
1998 static void udf_finalize_lvid(struct logicalVolIntegrityDesc *lvid)
1999 {
2000         struct timespec64 ts;
2001 
2002         ktime_get_real_ts64(&ts);
2003         udf_time_to_disk_stamp(&lvid->recordingDateAndTime, ts);
2004         lvid->descTag.descCRC = cpu_to_le16(
2005                 crc_itu_t(0, (char *)lvid + sizeof(struct tag),
2006                         le16_to_cpu(lvid->descTag.descCRCLength)));
2007         lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
2008 }
2009 
2010 static void udf_open_lvid(struct super_block *sb)
2011 {
2012         struct udf_sb_info *sbi = UDF_SB(sb);
2013         struct buffer_head *bh = sbi->s_lvid_bh;
2014         struct logicalVolIntegrityDesc *lvid;
2015         struct logicalVolIntegrityDescImpUse *lvidiu;
2016 
2017         if (!bh)
2018                 return;
2019         lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2020         lvidiu = udf_sb_lvidiu(sb);
2021         if (!lvidiu)
2022                 return;
2023 
2024         mutex_lock(&sbi->s_alloc_mutex);
2025         lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2026         lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2027         if (le32_to_cpu(lvid->integrityType) == LVID_INTEGRITY_TYPE_CLOSE)
2028                 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_OPEN);
2029         else
2030                 UDF_SET_FLAG(sb, UDF_FLAG_INCONSISTENT);
2031 
2032         udf_finalize_lvid(lvid);
2033         mark_buffer_dirty(bh);
2034         sbi->s_lvid_dirty = 0;
2035         mutex_unlock(&sbi->s_alloc_mutex);
2036         /* Make opening of filesystem visible on the media immediately */
2037         sync_dirty_buffer(bh);
2038 }
2039 
2040 static void udf_close_lvid(struct super_block *sb)
2041 {
2042         struct udf_sb_info *sbi = UDF_SB(sb);
2043         struct buffer_head *bh = sbi->s_lvid_bh;
2044         struct logicalVolIntegrityDesc *lvid;
2045         struct logicalVolIntegrityDescImpUse *lvidiu;
2046 
2047         if (!bh)
2048                 return;
2049         lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2050         lvidiu = udf_sb_lvidiu(sb);
2051         if (!lvidiu)
2052                 return;
2053 
2054         mutex_lock(&sbi->s_alloc_mutex);
2055         lvidiu->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
2056         lvidiu->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
2057         if (UDF_MAX_WRITE_VERSION > le16_to_cpu(lvidiu->maxUDFWriteRev))
2058                 lvidiu->maxUDFWriteRev = cpu_to_le16(UDF_MAX_WRITE_VERSION);
2059         if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFReadRev))
2060                 lvidiu->minUDFReadRev = cpu_to_le16(sbi->s_udfrev);
2061         if (sbi->s_udfrev > le16_to_cpu(lvidiu->minUDFWriteRev))
2062                 lvidiu->minUDFWriteRev = cpu_to_le16(sbi->s_udfrev);
2063         if (!UDF_QUERY_FLAG(sb, UDF_FLAG_INCONSISTENT))
2064                 lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
2065 
2066         /*
2067          * We set buffer uptodate unconditionally here to avoid spurious
2068          * warnings from mark_buffer_dirty() when previous EIO has marked
2069          * the buffer as !uptodate
2070          */
2071         set_buffer_uptodate(bh);
2072         udf_finalize_lvid(lvid);
2073         mark_buffer_dirty(bh);
2074         sbi->s_lvid_dirty = 0;
2075         mutex_unlock(&sbi->s_alloc_mutex);
2076         /* Make closing of filesystem visible on the media immediately */
2077         sync_dirty_buffer(bh);
2078 }
2079 
2080 u64 lvid_get_unique_id(struct super_block *sb)
2081 {
2082         struct buffer_head *bh;
2083         struct udf_sb_info *sbi = UDF_SB(sb);
2084         struct logicalVolIntegrityDesc *lvid;
2085         struct logicalVolHeaderDesc *lvhd;
2086         u64 uniqueID;
2087         u64 ret;
2088 
2089         bh = sbi->s_lvid_bh;
2090         if (!bh)
2091                 return 0;
2092 
2093         lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2094         lvhd = (struct logicalVolHeaderDesc *)lvid->logicalVolContentsUse;
2095 
2096         mutex_lock(&sbi->s_alloc_mutex);
2097         ret = uniqueID = le64_to_cpu(lvhd->uniqueID);
2098         if (!(++uniqueID & 0xFFFFFFFF))
2099                 uniqueID += 16;
2100         lvhd->uniqueID = cpu_to_le64(uniqueID);
2101         udf_updated_lvid(sb);
2102         mutex_unlock(&sbi->s_alloc_mutex);
2103 
2104         return ret;
2105 }
2106 
2107 static int udf_fill_super(struct super_block *sb, void *options, int silent)
2108 {
2109         int ret = -EINVAL;
2110         struct inode *inode = NULL;
2111         struct udf_options uopt;
2112         struct kernel_lb_addr rootdir, fileset;
2113         struct udf_sb_info *sbi;
2114         bool lvid_open = false;
2115 
2116         uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
2117         /* By default we'll use overflow[ug]id when UDF inode [ug]id == -1 */
2118         uopt.uid = make_kuid(current_user_ns(), overflowuid);
2119         uopt.gid = make_kgid(current_user_ns(), overflowgid);
2120         uopt.umask = 0;
2121         uopt.fmode = UDF_INVALID_MODE;
2122         uopt.dmode = UDF_INVALID_MODE;
2123         uopt.nls_map = NULL;
2124 
2125         sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2126         if (!sbi)
2127                 return -ENOMEM;
2128 
2129         sb->s_fs_info = sbi;
2130 
2131         mutex_init(&sbi->s_alloc_mutex);
2132 
2133         if (!udf_parse_options((char *)options, &uopt, false))
2134                 goto parse_options_failure;
2135 
2136         if (uopt.flags & (1 << UDF_FLAG_UTF8) &&
2137             uopt.flags & (1 << UDF_FLAG_NLS_MAP)) {
2138                 udf_err(sb, "utf8 cannot be combined with iocharset\n");
2139                 goto parse_options_failure;
2140         }
2141         if ((uopt.flags & (1 << UDF_FLAG_NLS_MAP)) && !uopt.nls_map) {
2142                 uopt.nls_map = load_nls_default();
2143                 if (!uopt.nls_map)
2144                         uopt.flags &= ~(1 << UDF_FLAG_NLS_MAP);
2145                 else
2146                         udf_debug("Using default NLS map\n");
2147         }
2148         if (!(uopt.flags & (1 << UDF_FLAG_NLS_MAP)))
2149                 uopt.flags |= (1 << UDF_FLAG_UTF8);
2150 
2151         fileset.logicalBlockNum = 0xFFFFFFFF;
2152         fileset.partitionReferenceNum = 0xFFFF;
2153 
2154         sbi->s_flags = uopt.flags;
2155         sbi->s_uid = uopt.uid;
2156         sbi->s_gid = uopt.gid;
2157         sbi->s_umask = uopt.umask;
2158         sbi->s_fmode = uopt.fmode;
2159         sbi->s_dmode = uopt.dmode;
2160         sbi->s_nls_map = uopt.nls_map;
2161         rwlock_init(&sbi->s_cred_lock);
2162 
2163         if (uopt.session == 0xFFFFFFFF)
2164                 sbi->s_session = udf_get_last_session(sb);
2165         else
2166                 sbi->s_session = uopt.session;
2167 
2168         udf_debug("Multi-session=%d\n", sbi->s_session);
2169 
2170         /* Fill in the rest of the superblock */
2171         sb->s_op = &udf_sb_ops;
2172         sb->s_export_op = &udf_export_ops;
2173 
2174         sb->s_magic = UDF_SUPER_MAGIC;
2175         sb->s_time_gran = 1000;
2176 
2177         if (uopt.flags & (1 << UDF_FLAG_BLOCKSIZE_SET)) {
2178                 ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2179         } else {
2180                 uopt.blocksize = bdev_logical_block_size(sb->s_bdev);
2181                 while (uopt.blocksize <= 4096) {
2182                         ret = udf_load_vrs(sb, &uopt, silent, &fileset);
2183                         if (ret < 0) {
2184                                 if (!silent && ret != -EACCES) {
2185                                         pr_notice("Scanning with blocksize %u failed\n",
2186                                                   uopt.blocksize);
2187                                 }
2188                                 brelse(sbi->s_lvid_bh);
2189                                 sbi->s_lvid_bh = NULL;
2190                                 /*
2191                                  * EACCES is special - we want to propagate to
2192                                  * upper layers that we cannot handle RW mount.
2193                                  */
2194                                 if (ret == -EACCES)
2195                                         break;
2196                         } else
2197                                 break;
2198 
2199                         uopt.blocksize <<= 1;
2200                 }
2201         }
2202         if (ret < 0) {
2203                 if (ret == -EAGAIN) {
2204                         udf_warn(sb, "No partition found (1)\n");
2205                         ret = -EINVAL;
2206                 }
2207                 goto error_out;
2208         }
2209 
2210         udf_debug("Lastblock=%u\n", sbi->s_last_block);
2211 
2212         if (sbi->s_lvid_bh) {
2213                 struct logicalVolIntegrityDescImpUse *lvidiu =
2214                                                         udf_sb_lvidiu(sb);
2215                 uint16_t minUDFReadRev;
2216                 uint16_t minUDFWriteRev;
2217 
2218                 if (!lvidiu) {
2219                         ret = -EINVAL;
2220                         goto error_out;
2221                 }
2222                 minUDFReadRev = le16_to_cpu(lvidiu->minUDFReadRev);
2223                 minUDFWriteRev = le16_to_cpu(lvidiu->minUDFWriteRev);
2224                 if (minUDFReadRev > UDF_MAX_READ_VERSION) {
2225                         udf_err(sb, "minUDFReadRev=%x (max is %x)\n",
2226                                 minUDFReadRev,
2227                                 UDF_MAX_READ_VERSION);
2228                         ret = -EINVAL;
2229                         goto error_out;
2230                 } else if (minUDFWriteRev > UDF_MAX_WRITE_VERSION) {
2231                         if (!sb_rdonly(sb)) {
2232                                 ret = -EACCES;
2233                                 goto error_out;
2234                         }
2235                         UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2236                 }
2237 
2238                 sbi->s_udfrev = minUDFWriteRev;
2239 
2240                 if (minUDFReadRev >= UDF_VERS_USE_EXTENDED_FE)
2241                         UDF_SET_FLAG(sb, UDF_FLAG_USE_EXTENDED_FE);
2242                 if (minUDFReadRev >= UDF_VERS_USE_STREAMS)
2243                         UDF_SET_FLAG(sb, UDF_FLAG_USE_STREAMS);
2244         }
2245 
2246         if (!sbi->s_partitions) {
2247                 udf_warn(sb, "No partition found (2)\n");
2248                 ret = -EINVAL;
2249                 goto error_out;
2250         }
2251 
2252         if (sbi->s_partmaps[sbi->s_partition].s_partition_flags &
2253                         UDF_PART_FLAG_READ_ONLY) {
2254                 if (!sb_rdonly(sb)) {
2255                         ret = -EACCES;
2256                         goto error_out;
2257                 }
2258                 UDF_SET_FLAG(sb, UDF_FLAG_RW_INCOMPAT);
2259         }
2260 
2261         ret = udf_find_fileset(sb, &fileset, &rootdir);
2262         if (ret < 0) {
2263                 udf_warn(sb, "No fileset found\n");
2264                 goto error_out;
2265         }
2266 
2267         if (!silent) {
2268                 struct timestamp ts;
2269                 udf_time_to_disk_stamp(&ts, sbi->s_record_time);
2270                 udf_info("Mounting volume '%s', timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
2271                          sbi->s_volume_ident,
2272                          le16_to_cpu(ts.year), ts.month, ts.day,
2273                          ts.hour, ts.minute, le16_to_cpu(ts.typeAndTimezone));
2274         }
2275         if (!sb_rdonly(sb)) {
2276                 udf_open_lvid(sb);
2277                 lvid_open = true;
2278         }
2279 
2280         /* Assign the root inode */
2281         /* assign inodes by physical block number */
2282         /* perhaps it's not extensible enough, but for now ... */
2283         inode = udf_iget(sb, &rootdir);
2284         if (IS_ERR(inode)) {
2285                 udf_err(sb, "Error in udf_iget, block=%u, partition=%u\n",
2286                        rootdir.logicalBlockNum, rootdir.partitionReferenceNum);
2287                 ret = PTR_ERR(inode);
2288                 goto error_out;
2289         }
2290 
2291         /* Allocate a dentry for the root inode */
2292         sb->s_root = d_make_root(inode);
2293         if (!sb->s_root) {
2294                 udf_err(sb, "Couldn't allocate root dentry\n");
2295                 ret = -ENOMEM;
2296                 goto error_out;
2297         }
2298         sb->s_maxbytes = MAX_LFS_FILESIZE;
2299         sb->s_max_links = UDF_MAX_LINKS;
2300         return 0;
2301 
2302 error_out:
2303         iput(sbi->s_vat_inode);
2304 parse_options_failure:
2305         if (uopt.nls_map)
2306                 unload_nls(uopt.nls_map);
2307         if (lvid_open)
2308                 udf_close_lvid(sb);
2309         brelse(sbi->s_lvid_bh);
2310         udf_sb_free_partitions(sb);
2311         kfree(sbi);
2312         sb->s_fs_info = NULL;
2313 
2314         return ret;
2315 }
2316 
2317 void _udf_err(struct super_block *sb, const char *function,
2318               const char *fmt, ...)
2319 {
2320         struct va_format vaf;
2321         va_list args;
2322 
2323         va_start(args, fmt);
2324 
2325         vaf.fmt = fmt;
2326         vaf.va = &args;
2327 
2328         pr_err("error (device %s): %s: %pV", sb->s_id, function, &vaf);
2329 
2330         va_end(args);
2331 }
2332 
2333 void _udf_warn(struct super_block *sb, const char *function,
2334                const char *fmt, ...)
2335 {
2336         struct va_format vaf;
2337         va_list args;
2338 
2339         va_start(args, fmt);
2340 
2341         vaf.fmt = fmt;
2342         vaf.va = &args;
2343 
2344         pr_warn("warning (device %s): %s: %pV", sb->s_id, function, &vaf);
2345 
2346         va_end(args);
2347 }
2348 
2349 static void udf_put_super(struct super_block *sb)
2350 {
2351         struct udf_sb_info *sbi;
2352 
2353         sbi = UDF_SB(sb);
2354 
2355         iput(sbi->s_vat_inode);
2356         if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP))
2357                 unload_nls(sbi->s_nls_map);
2358         if (!sb_rdonly(sb))
2359                 udf_close_lvid(sb);
2360         brelse(sbi->s_lvid_bh);
2361         udf_sb_free_partitions(sb);
2362         mutex_destroy(&sbi->s_alloc_mutex);
2363         kfree(sb->s_fs_info);
2364         sb->s_fs_info = NULL;
2365 }
2366 
2367 static int udf_sync_fs(struct super_block *sb, int wait)
2368 {
2369         struct udf_sb_info *sbi = UDF_SB(sb);
2370 
2371         mutex_lock(&sbi->s_alloc_mutex);
2372         if (sbi->s_lvid_dirty) {
2373                 struct buffer_head *bh = sbi->s_lvid_bh;
2374                 struct logicalVolIntegrityDesc *lvid;
2375 
2376                 lvid = (struct logicalVolIntegrityDesc *)bh->b_data;
2377                 udf_finalize_lvid(lvid);
2378 
2379                 /*
2380                  * Blockdevice will be synced later so we don't have to submit
2381                  * the buffer for IO
2382                  */
2383                 mark_buffer_dirty(bh);
2384                 sbi->s_lvid_dirty = 0;
2385         }
2386         mutex_unlock(&sbi->s_alloc_mutex);
2387 
2388         return 0;
2389 }
2390 
2391 static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
2392 {
2393         struct super_block *sb = dentry->d_sb;
2394         struct udf_sb_info *sbi = UDF_SB(sb);
2395         struct logicalVolIntegrityDescImpUse *lvidiu;
2396         u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
2397 
2398         lvidiu = udf_sb_lvidiu(sb);
2399         buf->f_type = UDF_SUPER_MAGIC;
2400         buf->f_bsize = sb->s_blocksize;
2401         buf->f_blocks = sbi->s_partmaps[sbi->s_partition].s_partition_len;
2402         buf->f_bfree = udf_count_free(sb);
2403         buf->f_bavail = buf->f_bfree;
2404         buf->f_files = (lvidiu != NULL ? (le32_to_cpu(lvidiu->numFiles) +
2405                                           le32_to_cpu(lvidiu->numDirs)) : 0)
2406                         + buf->f_bfree;
2407         buf->f_ffree = buf->f_bfree;
2408         buf->f_namelen = UDF_NAME_LEN;
2409         buf->f_fsid.val[0] = (u32)id;
2410         buf->f_fsid.val[1] = (u32)(id >> 32);
2411 
2412         return 0;
2413 }
2414 
2415 static unsigned int udf_count_free_bitmap(struct super_block *sb,
2416                                           struct udf_bitmap *bitmap)
2417 {
2418         struct buffer_head *bh = NULL;
2419         unsigned int accum = 0;
2420         int index;
2421         udf_pblk_t block = 0, newblock;
2422         struct kernel_lb_addr loc;
2423         uint32_t bytes;
2424         uint8_t *ptr;
2425         uint16_t ident;
2426         struct spaceBitmapDesc *bm;
2427 
2428         loc.logicalBlockNum = bitmap->s_extPosition;
2429         loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
2430         bh = udf_read_ptagged(sb, &loc, 0, &ident);
2431 
2432         if (!bh) {
2433                 udf_err(sb, "udf_count_free failed\n");
2434                 goto out;
2435         } else if (ident != TAG_IDENT_SBD) {
2436                 brelse(bh);
2437                 udf_err(sb, "udf_count_free failed\n");
2438                 goto out;
2439         }
2440 
2441         bm = (struct spaceBitmapDesc *)bh->b_data;
2442         bytes = le32_to_cpu(bm->numOfBytes);
2443         index = sizeof(struct spaceBitmapDesc); /* offset in first block only */
2444         ptr = (uint8_t *)bh->b_data;
2445 
2446         while (bytes > 0) {
2447                 u32 cur_bytes = min_t(u32, bytes, sb->s_blocksize - index);
2448                 accum += bitmap_weight((const unsigned long *)(ptr + index),
2449                                         cur_bytes * 8);
2450                 bytes -= cur_bytes;
2451                 if (bytes) {
2452                         brelse(bh);
2453                         newblock = udf_get_lb_pblock(sb, &loc, ++block);
2454                         bh = udf_tread(sb, newblock);
2455                         if (!bh) {
2456                                 udf_debug("read failed\n");
2457                                 goto out;
2458                         }
2459                         index = 0;
2460                         ptr = (uint8_t *)bh->b_data;
2461                 }
2462         }
2463         brelse(bh);
2464 out:
2465         return accum;
2466 }
2467 
2468 static unsigned int udf_count_free_table(struct super_block *sb,
2469                                          struct inode *table)
2470 {
2471         unsigned int accum = 0;
2472         uint32_t elen;
2473         struct kernel_lb_addr eloc;
2474         int8_t etype;
2475         struct extent_position epos;
2476 
2477         mutex_lock(&UDF_SB(sb)->s_alloc_mutex);
2478         epos.block = UDF_I(table)->i_location;
2479         epos.offset = sizeof(struct unallocSpaceEntry);
2480         epos.bh = NULL;
2481 
2482         while ((etype = udf_next_aext(table, &epos, &eloc, &elen, 1)) != -1)
2483                 accum += (elen >> table->i_sb->s_blocksize_bits);
2484 
2485         brelse(epos.bh);
2486         mutex_unlock(&UDF_SB(sb)->s_alloc_mutex);
2487 
2488         return accum;
2489 }
2490 
2491 static unsigned int udf_count_free(struct super_block *sb)
2492 {
2493         unsigned int accum = 0;
2494         struct udf_sb_info *sbi = UDF_SB(sb);
2495         struct udf_part_map *map;
2496         unsigned int part = sbi->s_partition;
2497         int ptype = sbi->s_partmaps[part].s_partition_type;
2498 
2499         if (ptype == UDF_METADATA_MAP25) {
2500                 part = sbi->s_partmaps[part].s_type_specific.s_metadata.
2501                                                         s_phys_partition_ref;
2502         } else if (ptype == UDF_VIRTUAL_MAP15 || ptype == UDF_VIRTUAL_MAP20) {
2503                 /*
2504                  * Filesystems with VAT are append-only and we cannot write to
2505                  * them. Let's just report 0 here.
2506                  */
2507                 return 0;
2508         }
2509 
2510         if (sbi->s_lvid_bh) {
2511                 struct logicalVolIntegrityDesc *lvid =
2512                         (struct logicalVolIntegrityDesc *)
2513                         sbi->s_lvid_bh->b_data;
2514                 if (le32_to_cpu(lvid->numOfPartitions) > part) {
2515                         accum = le32_to_cpu(
2516                                         lvid->freeSpaceTable[part]);
2517                         if (accum == 0xFFFFFFFF)
2518                                 accum = 0;
2519                 }
2520         }
2521 
2522         if (accum)
2523                 return accum;
2524 
2525         map = &sbi->s_partmaps[part];
2526         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
2527                 accum += udf_count_free_bitmap(sb,
2528                                                map->s_uspace.s_bitmap);
2529         }
2530         if (accum)
2531                 return accum;
2532 
2533         if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
2534                 accum += udf_count_free_table(sb,
2535                                               map->s_uspace.s_table);
2536         }
2537         return accum;
2538 }
2539 
2540 MODULE_AUTHOR("Ben Fennema");
2541 MODULE_DESCRIPTION("Universal Disk Format Filesystem");
2542 MODULE_LICENSE("GPL");
2543 module_init(init_udf_fs)
2544 module_exit(exit_udf_fs)

/* [<][>][^][v][top][bottom][index][help] */