root/fs/ceph/locks.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. secure_addr
  2. ceph_flock_init
  3. ceph_fl_copy_lock
  4. ceph_fl_release_lock
  5. ceph_lock_message
  6. ceph_lock_wait_for_completion
  7. ceph_lock
  8. ceph_flock
  9. ceph_count_locks
  10. lock_to_ceph_filelock
  11. ceph_encode_locks_to_buffer
  12. ceph_locks_to_pagelist

   1 // SPDX-License-Identifier: GPL-2.0
   2 #include <linux/ceph/ceph_debug.h>
   3 
   4 #include <linux/file.h>
   5 #include <linux/namei.h>
   6 #include <linux/random.h>
   7 
   8 #include "super.h"
   9 #include "mds_client.h"
  10 #include <linux/ceph/pagelist.h>
  11 
  12 static u64 lock_secret;
  13 static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
  14                                          struct ceph_mds_request *req);
  15 
  16 static inline u64 secure_addr(void *addr)
  17 {
  18         u64 v = lock_secret ^ (u64)(unsigned long)addr;
  19         /*
  20          * Set the most significant bit, so that MDS knows the 'owner'
  21          * is sufficient to identify the owner of lock. (old code uses
  22          * both 'owner' and 'pid')
  23          */
  24         v |= (1ULL << 63);
  25         return v;
  26 }
  27 
  28 void __init ceph_flock_init(void)
  29 {
  30         get_random_bytes(&lock_secret, sizeof(lock_secret));
  31 }
  32 
  33 static void ceph_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
  34 {
  35         struct ceph_file_info *fi = dst->fl_file->private_data;
  36         struct inode *inode = file_inode(dst->fl_file);
  37         atomic_inc(&ceph_inode(inode)->i_filelock_ref);
  38         atomic_inc(&fi->num_locks);
  39 }
  40 
  41 static void ceph_fl_release_lock(struct file_lock *fl)
  42 {
  43         struct ceph_file_info *fi = fl->fl_file->private_data;
  44         struct inode *inode = file_inode(fl->fl_file);
  45         struct ceph_inode_info *ci = ceph_inode(inode);
  46         atomic_dec(&fi->num_locks);
  47         if (atomic_dec_and_test(&ci->i_filelock_ref)) {
  48                 /* clear error when all locks are released */
  49                 spin_lock(&ci->i_ceph_lock);
  50                 ci->i_ceph_flags &= ~CEPH_I_ERROR_FILELOCK;
  51                 spin_unlock(&ci->i_ceph_lock);
  52         }
  53 }
  54 
  55 static const struct file_lock_operations ceph_fl_lock_ops = {
  56         .fl_copy_lock = ceph_fl_copy_lock,
  57         .fl_release_private = ceph_fl_release_lock,
  58 };
  59 
  60 /**
  61  * Implement fcntl and flock locking functions.
  62  */
  63 static int ceph_lock_message(u8 lock_type, u16 operation, struct inode *inode,
  64                              int cmd, u8 wait, struct file_lock *fl)
  65 {
  66         struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
  67         struct ceph_mds_request *req;
  68         int err;
  69         u64 length = 0;
  70         u64 owner;
  71 
  72         if (operation == CEPH_MDS_OP_SETFILELOCK) {
  73                 /*
  74                  * increasing i_filelock_ref closes race window between
  75                  * handling request reply and adding file_lock struct to
  76                  * inode. Otherwise, auth caps may get trimmed in the
  77                  * window. Caller function will decrease the counter.
  78                  */
  79                 fl->fl_ops = &ceph_fl_lock_ops;
  80                 fl->fl_ops->fl_copy_lock(fl, NULL);
  81         }
  82 
  83         if (operation != CEPH_MDS_OP_SETFILELOCK || cmd == CEPH_LOCK_UNLOCK)
  84                 wait = 0;
  85 
  86         req = ceph_mdsc_create_request(mdsc, operation, USE_AUTH_MDS);
  87         if (IS_ERR(req))
  88                 return PTR_ERR(req);
  89         req->r_inode = inode;
  90         ihold(inode);
  91         req->r_num_caps = 1;
  92 
  93         /* mds requires start and length rather than start and end */
  94         if (LLONG_MAX == fl->fl_end)
  95                 length = 0;
  96         else
  97                 length = fl->fl_end - fl->fl_start + 1;
  98 
  99         owner = secure_addr(fl->fl_owner);
 100 
 101         dout("ceph_lock_message: rule: %d, op: %d, owner: %llx, pid: %llu, "
 102              "start: %llu, length: %llu, wait: %d, type: %d\n", (int)lock_type,
 103              (int)operation, owner, (u64)fl->fl_pid, fl->fl_start, length,
 104              wait, fl->fl_type);
 105 
 106         req->r_args.filelock_change.rule = lock_type;
 107         req->r_args.filelock_change.type = cmd;
 108         req->r_args.filelock_change.owner = cpu_to_le64(owner);
 109         req->r_args.filelock_change.pid = cpu_to_le64((u64)fl->fl_pid);
 110         req->r_args.filelock_change.start = cpu_to_le64(fl->fl_start);
 111         req->r_args.filelock_change.length = cpu_to_le64(length);
 112         req->r_args.filelock_change.wait = wait;
 113 
 114         if (wait)
 115                 req->r_wait_for_completion = ceph_lock_wait_for_completion;
 116 
 117         err = ceph_mdsc_do_request(mdsc, inode, req);
 118         if (!err && operation == CEPH_MDS_OP_GETFILELOCK) {
 119                 fl->fl_pid = -le64_to_cpu(req->r_reply_info.filelock_reply->pid);
 120                 if (CEPH_LOCK_SHARED == req->r_reply_info.filelock_reply->type)
 121                         fl->fl_type = F_RDLCK;
 122                 else if (CEPH_LOCK_EXCL == req->r_reply_info.filelock_reply->type)
 123                         fl->fl_type = F_WRLCK;
 124                 else
 125                         fl->fl_type = F_UNLCK;
 126 
 127                 fl->fl_start = le64_to_cpu(req->r_reply_info.filelock_reply->start);
 128                 length = le64_to_cpu(req->r_reply_info.filelock_reply->start) +
 129                                                  le64_to_cpu(req->r_reply_info.filelock_reply->length);
 130                 if (length >= 1)
 131                         fl->fl_end = length -1;
 132                 else
 133                         fl->fl_end = 0;
 134 
 135         }
 136         ceph_mdsc_put_request(req);
 137         dout("ceph_lock_message: rule: %d, op: %d, pid: %llu, start: %llu, "
 138              "length: %llu, wait: %d, type: %d, err code %d\n", (int)lock_type,
 139              (int)operation, (u64)fl->fl_pid, fl->fl_start,
 140              length, wait, fl->fl_type, err);
 141         return err;
 142 }
 143 
 144 static int ceph_lock_wait_for_completion(struct ceph_mds_client *mdsc,
 145                                          struct ceph_mds_request *req)
 146 {
 147         struct ceph_mds_request *intr_req;
 148         struct inode *inode = req->r_inode;
 149         int err, lock_type;
 150 
 151         BUG_ON(req->r_op != CEPH_MDS_OP_SETFILELOCK);
 152         if (req->r_args.filelock_change.rule == CEPH_LOCK_FCNTL)
 153                 lock_type = CEPH_LOCK_FCNTL_INTR;
 154         else if (req->r_args.filelock_change.rule == CEPH_LOCK_FLOCK)
 155                 lock_type = CEPH_LOCK_FLOCK_INTR;
 156         else
 157                 BUG_ON(1);
 158         BUG_ON(req->r_args.filelock_change.type == CEPH_LOCK_UNLOCK);
 159 
 160         err = wait_for_completion_interruptible(&req->r_completion);
 161         if (!err)
 162                 return 0;
 163 
 164         dout("ceph_lock_wait_for_completion: request %llu was interrupted\n",
 165              req->r_tid);
 166 
 167         mutex_lock(&mdsc->mutex);
 168         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
 169                 err = 0;
 170         } else {
 171                 /*
 172                  * ensure we aren't running concurrently with
 173                  * ceph_fill_trace or ceph_readdir_prepopulate, which
 174                  * rely on locks (dir mutex) held by our caller.
 175                  */
 176                 mutex_lock(&req->r_fill_mutex);
 177                 req->r_err = err;
 178                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
 179                 mutex_unlock(&req->r_fill_mutex);
 180 
 181                 if (!req->r_session) {
 182                         // haven't sent the request
 183                         err = 0;
 184                 }
 185         }
 186         mutex_unlock(&mdsc->mutex);
 187         if (!err)
 188                 return 0;
 189 
 190         intr_req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_SETFILELOCK,
 191                                             USE_AUTH_MDS);
 192         if (IS_ERR(intr_req))
 193                 return PTR_ERR(intr_req);
 194 
 195         intr_req->r_inode = inode;
 196         ihold(inode);
 197         intr_req->r_num_caps = 1;
 198 
 199         intr_req->r_args.filelock_change = req->r_args.filelock_change;
 200         intr_req->r_args.filelock_change.rule = lock_type;
 201         intr_req->r_args.filelock_change.type = CEPH_LOCK_UNLOCK;
 202 
 203         err = ceph_mdsc_do_request(mdsc, inode, intr_req);
 204         ceph_mdsc_put_request(intr_req);
 205 
 206         if (err && err != -ERESTARTSYS)
 207                 return err;
 208 
 209         wait_for_completion_killable(&req->r_safe_completion);
 210         return 0;
 211 }
 212 
 213 /**
 214  * Attempt to set an fcntl lock.
 215  * For now, this just goes away to the server. Later it may be more awesome.
 216  */
 217 int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
 218 {
 219         struct inode *inode = file_inode(file);
 220         struct ceph_inode_info *ci = ceph_inode(inode);
 221         int err = 0;
 222         u16 op = CEPH_MDS_OP_SETFILELOCK;
 223         u8 wait = 0;
 224         u8 lock_cmd;
 225 
 226         if (!(fl->fl_flags & FL_POSIX))
 227                 return -ENOLCK;
 228         /* No mandatory locks */
 229         if (__mandatory_lock(file->f_mapping->host) && fl->fl_type != F_UNLCK)
 230                 return -ENOLCK;
 231 
 232         dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
 233 
 234         /* set wait bit as appropriate, then make command as Ceph expects it*/
 235         if (IS_GETLK(cmd))
 236                 op = CEPH_MDS_OP_GETFILELOCK;
 237         else if (IS_SETLKW(cmd))
 238                 wait = 1;
 239 
 240         spin_lock(&ci->i_ceph_lock);
 241         if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
 242                 err = -EIO;
 243         }
 244         spin_unlock(&ci->i_ceph_lock);
 245         if (err < 0) {
 246                 if (op == CEPH_MDS_OP_SETFILELOCK && F_UNLCK == fl->fl_type)
 247                         posix_lock_file(file, fl, NULL);
 248                 return err;
 249         }
 250 
 251         if (F_RDLCK == fl->fl_type)
 252                 lock_cmd = CEPH_LOCK_SHARED;
 253         else if (F_WRLCK == fl->fl_type)
 254                 lock_cmd = CEPH_LOCK_EXCL;
 255         else
 256                 lock_cmd = CEPH_LOCK_UNLOCK;
 257 
 258         err = ceph_lock_message(CEPH_LOCK_FCNTL, op, inode, lock_cmd, wait, fl);
 259         if (!err) {
 260                 if (op == CEPH_MDS_OP_SETFILELOCK) {
 261                         dout("mds locked, locking locally\n");
 262                         err = posix_lock_file(file, fl, NULL);
 263                         if (err) {
 264                                 /* undo! This should only happen if
 265                                  * the kernel detects local
 266                                  * deadlock. */
 267                                 ceph_lock_message(CEPH_LOCK_FCNTL, op, inode,
 268                                                   CEPH_LOCK_UNLOCK, 0, fl);
 269                                 dout("got %d on posix_lock_file, undid lock\n",
 270                                      err);
 271                         }
 272                 }
 273         }
 274         return err;
 275 }
 276 
 277 int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
 278 {
 279         struct inode *inode = file_inode(file);
 280         struct ceph_inode_info *ci = ceph_inode(inode);
 281         int err = 0;
 282         u8 wait = 0;
 283         u8 lock_cmd;
 284 
 285         if (!(fl->fl_flags & FL_FLOCK))
 286                 return -ENOLCK;
 287         /* No mandatory locks */
 288         if (fl->fl_type & LOCK_MAND)
 289                 return -EOPNOTSUPP;
 290 
 291         dout("ceph_flock, fl_file: %p\n", fl->fl_file);
 292 
 293         spin_lock(&ci->i_ceph_lock);
 294         if (ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) {
 295                 err = -EIO;
 296         }
 297         spin_unlock(&ci->i_ceph_lock);
 298         if (err < 0) {
 299                 if (F_UNLCK == fl->fl_type)
 300                         locks_lock_file_wait(file, fl);
 301                 return err;
 302         }
 303 
 304         if (IS_SETLKW(cmd))
 305                 wait = 1;
 306 
 307         if (F_RDLCK == fl->fl_type)
 308                 lock_cmd = CEPH_LOCK_SHARED;
 309         else if (F_WRLCK == fl->fl_type)
 310                 lock_cmd = CEPH_LOCK_EXCL;
 311         else
 312                 lock_cmd = CEPH_LOCK_UNLOCK;
 313 
 314         err = ceph_lock_message(CEPH_LOCK_FLOCK, CEPH_MDS_OP_SETFILELOCK,
 315                                 inode, lock_cmd, wait, fl);
 316         if (!err) {
 317                 err = locks_lock_file_wait(file, fl);
 318                 if (err) {
 319                         ceph_lock_message(CEPH_LOCK_FLOCK,
 320                                           CEPH_MDS_OP_SETFILELOCK,
 321                                           inode, CEPH_LOCK_UNLOCK, 0, fl);
 322                         dout("got %d on locks_lock_file_wait, undid lock\n", err);
 323                 }
 324         }
 325         return err;
 326 }
 327 
 328 /*
 329  * Fills in the passed counter variables, so you can prepare pagelist metadata
 330  * before calling ceph_encode_locks.
 331  */
 332 void ceph_count_locks(struct inode *inode, int *fcntl_count, int *flock_count)
 333 {
 334         struct file_lock *lock;
 335         struct file_lock_context *ctx;
 336 
 337         *fcntl_count = 0;
 338         *flock_count = 0;
 339 
 340         ctx = inode->i_flctx;
 341         if (ctx) {
 342                 spin_lock(&ctx->flc_lock);
 343                 list_for_each_entry(lock, &ctx->flc_posix, fl_list)
 344                         ++(*fcntl_count);
 345                 list_for_each_entry(lock, &ctx->flc_flock, fl_list)
 346                         ++(*flock_count);
 347                 spin_unlock(&ctx->flc_lock);
 348         }
 349         dout("counted %d flock locks and %d fcntl locks\n",
 350              *flock_count, *fcntl_count);
 351 }
 352 
 353 /*
 354  * Given a pointer to a lock, convert it to a ceph filelock
 355  */
 356 static int lock_to_ceph_filelock(struct file_lock *lock,
 357                                  struct ceph_filelock *cephlock)
 358 {
 359         int err = 0;
 360         cephlock->start = cpu_to_le64(lock->fl_start);
 361         cephlock->length = cpu_to_le64(lock->fl_end - lock->fl_start + 1);
 362         cephlock->client = cpu_to_le64(0);
 363         cephlock->pid = cpu_to_le64((u64)lock->fl_pid);
 364         cephlock->owner = cpu_to_le64(secure_addr(lock->fl_owner));
 365 
 366         switch (lock->fl_type) {
 367         case F_RDLCK:
 368                 cephlock->type = CEPH_LOCK_SHARED;
 369                 break;
 370         case F_WRLCK:
 371                 cephlock->type = CEPH_LOCK_EXCL;
 372                 break;
 373         case F_UNLCK:
 374                 cephlock->type = CEPH_LOCK_UNLOCK;
 375                 break;
 376         default:
 377                 dout("Have unknown lock type %d\n", lock->fl_type);
 378                 err = -EINVAL;
 379         }
 380 
 381         return err;
 382 }
 383 
 384 /**
 385  * Encode the flock and fcntl locks for the given inode into the ceph_filelock
 386  * array. Must be called with inode->i_lock already held.
 387  * If we encounter more of a specific lock type than expected, return -ENOSPC.
 388  */
 389 int ceph_encode_locks_to_buffer(struct inode *inode,
 390                                 struct ceph_filelock *flocks,
 391                                 int num_fcntl_locks, int num_flock_locks)
 392 {
 393         struct file_lock *lock;
 394         struct file_lock_context *ctx = inode->i_flctx;
 395         int err = 0;
 396         int seen_fcntl = 0;
 397         int seen_flock = 0;
 398         int l = 0;
 399 
 400         dout("encoding %d flock and %d fcntl locks\n", num_flock_locks,
 401              num_fcntl_locks);
 402 
 403         if (!ctx)
 404                 return 0;
 405 
 406         spin_lock(&ctx->flc_lock);
 407         list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
 408                 ++seen_fcntl;
 409                 if (seen_fcntl > num_fcntl_locks) {
 410                         err = -ENOSPC;
 411                         goto fail;
 412                 }
 413                 err = lock_to_ceph_filelock(lock, &flocks[l]);
 414                 if (err)
 415                         goto fail;
 416                 ++l;
 417         }
 418         list_for_each_entry(lock, &ctx->flc_flock, fl_list) {
 419                 ++seen_flock;
 420                 if (seen_flock > num_flock_locks) {
 421                         err = -ENOSPC;
 422                         goto fail;
 423                 }
 424                 err = lock_to_ceph_filelock(lock, &flocks[l]);
 425                 if (err)
 426                         goto fail;
 427                 ++l;
 428         }
 429 fail:
 430         spin_unlock(&ctx->flc_lock);
 431         return err;
 432 }
 433 
 434 /**
 435  * Copy the encoded flock and fcntl locks into the pagelist.
 436  * Format is: #fcntl locks, sequential fcntl locks, #flock locks,
 437  * sequential flock locks.
 438  * Returns zero on success.
 439  */
 440 int ceph_locks_to_pagelist(struct ceph_filelock *flocks,
 441                            struct ceph_pagelist *pagelist,
 442                            int num_fcntl_locks, int num_flock_locks)
 443 {
 444         int err = 0;
 445         __le32 nlocks;
 446 
 447         nlocks = cpu_to_le32(num_fcntl_locks);
 448         err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
 449         if (err)
 450                 goto out_fail;
 451 
 452         if (num_fcntl_locks > 0) {
 453                 err = ceph_pagelist_append(pagelist, flocks,
 454                                            num_fcntl_locks * sizeof(*flocks));
 455                 if (err)
 456                         goto out_fail;
 457         }
 458 
 459         nlocks = cpu_to_le32(num_flock_locks);
 460         err = ceph_pagelist_append(pagelist, &nlocks, sizeof(nlocks));
 461         if (err)
 462                 goto out_fail;
 463 
 464         if (num_flock_locks > 0) {
 465                 err = ceph_pagelist_append(pagelist, &flocks[num_fcntl_locks],
 466                                            num_flock_locks * sizeof(*flocks));
 467         }
 468 out_fail:
 469         return err;
 470 }

/* [<][>][^][v][top][bottom][index][help] */