This source file includes following definitions.
- atomic_inc_return_safe
- atomic_dec_return_safe
- rbd_dev_id_to_minor
- minor_to_rbd_dev_id
- __rbd_is_lock_owner
- rbd_is_lock_owner
- supported_features_show
- rbd_bus_is_visible
- rbd_root_dev_release
- __printf
- pending_result_dec
- rbd_open
- rbd_release
- rbd_ioctl_set_ro
- rbd_ioctl
- rbd_compat_ioctl
- rbd_client_create
- __rbd_get_client
- rbd_client_find
- parse_rbd_opts_token
- obj_op_name
- rbd_client_release
- rbd_put_client
- rbd_get_client
- rbd_image_format_valid
- rbd_dev_ondisk_valid
- rbd_obj_bytes
- rbd_init_layout
- rbd_header_from_disk
- _rbd_dev_v1_snap_name
- snapid_compare_reverse
- rbd_dev_snap_index
- rbd_dev_v1_snap_name
- rbd_snap_name
- rbd_snap_size
- rbd_snap_features
- rbd_dev_mapping_set
- rbd_dev_mapping_clear
- zero_bvec
- zero_bios
- zero_bvecs
- rbd_obj_zero_range
- rbd_obj_request_put
- rbd_img_request_put
- rbd_img_obj_request_add
- rbd_img_obj_request_del
- rbd_osd_submit
- img_request_layered_set
- img_request_layered_clear
- img_request_layered_test
- rbd_obj_is_entire
- rbd_obj_is_tail
- rbd_obj_copyup_enabled
- rbd_obj_img_extents_bytes
- rbd_img_is_write
- rbd_osd_req_callback
- rbd_osd_format_read
- rbd_osd_format_write
- __rbd_obj_add_osd_request
- rbd_obj_add_osd_request
- rbd_obj_request_create
- rbd_obj_request_destroy
- rbd_dev_unparent
- rbd_dev_parent_put
- rbd_dev_parent_get
- rbd_img_request_create
- rbd_img_request_destroy
- __rbd_object_map_index
- __rbd_object_map_get
- __rbd_object_map_set
- rbd_object_map_get
- use_object_map
- rbd_object_map_may_exist
- rbd_object_map_name
- rbd_object_map_lock
- rbd_object_map_unlock
- decode_object_map_header
- __rbd_object_map_load
- rbd_object_map_free
- rbd_object_map_load
- rbd_object_map_open
- rbd_object_map_close
- rbd_object_map_update_finish
- rbd_object_map_callback
- update_needed
- rbd_cls_object_map_update
- rbd_object_map_update
- prune_extents
- rbd_obj_calc_img_extents
- rbd_osd_setup_data
- rbd_osd_setup_stat
- rbd_osd_setup_copyup
- rbd_obj_init_read
- __rbd_osd_setup_write_ops
- rbd_obj_init_write
- truncate_or_zero_opcode
- __rbd_osd_setup_discard_ops
- rbd_obj_init_discard
- __rbd_osd_setup_zeroout_ops
- rbd_obj_init_zeroout
- count_write_ops
- rbd_osd_setup_write_ops
- __rbd_img_fill_request
- alloc_object_extent
- rbd_layout_is_fancy
- rbd_img_fill_request_nocopy
- rbd_img_fill_request
- rbd_img_fill_nodata
- set_bio_pos
- count_bio_bvecs
- copy_bio_bvecs
- __rbd_img_fill_from_bio
- rbd_img_fill_from_bio
- set_bvec_pos
- count_bvecs
- copy_bvecs
- __rbd_img_fill_from_bvecs
- rbd_img_fill_from_bvecs
- rbd_img_handle_request_work
- rbd_img_schedule
- rbd_obj_may_exist
- rbd_obj_read_object
- rbd_obj_read_from_parent
- rbd_obj_advance_read
- rbd_obj_write_is_noop
- rbd_obj_write_pre_object_map
- rbd_obj_write_object
- is_zero_bvecs
- rbd_obj_copyup_empty_snapc
- rbd_obj_copyup_current_snapc
- setup_copyup_bvecs
- rbd_obj_copyup_read_parent
- rbd_obj_copyup_object_maps
- rbd_obj_copyup_write_object
- rbd_obj_advance_copyup
- rbd_obj_write_post_object_map
- rbd_obj_advance_write
- __rbd_obj_handle_request
- rbd_obj_handle_request
- need_exclusive_lock
- rbd_lock_add_request
- rbd_lock_del_request
- rbd_img_exclusive_lock
- rbd_img_object_requests
- rbd_img_advance
- __rbd_img_handle_request
- rbd_img_handle_request
- rbd_cid_equal
- rbd_get_cid
- rbd_set_owner_cid
- format_lock_cookie
- __rbd_lock
- rbd_lock
- rbd_unlock
- __rbd_notify_op_lock
- rbd_notify_op_lock
- rbd_notify_acquired_lock
- rbd_notify_released_lock
- rbd_request_lock
- wake_lock_waiters
- get_lock_owner_info
- find_watcher
- rbd_try_lock
- rbd_post_acquire_action
- rbd_try_acquire_lock
- rbd_acquire_lock
- rbd_quiesce_lock
- rbd_pre_release_action
- __rbd_release_lock
- rbd_release_lock
- rbd_release_lock_work
- maybe_kick_acquire
- rbd_handle_acquired_lock
- rbd_handle_released_lock
- rbd_handle_request_lock
- __rbd_acknowledge_notify
- rbd_acknowledge_notify
- rbd_acknowledge_notify_result
- rbd_watch_cb
- rbd_watch_errcb
- __rbd_register_watch
- __rbd_unregister_watch
- rbd_register_watch
- cancel_tasks_sync
- rbd_unregister_watch
- rbd_reacquire_lock
- rbd_reregister_watch
- rbd_obj_method_sync
- rbd_queue_workfn
- rbd_queue_rq
- rbd_free_disk
- rbd_obj_read_sync
- rbd_dev_v1_header_info
- rbd_exists_validate
- rbd_dev_update_size
- rbd_dev_refresh
- rbd_init_request
- rbd_init_disk
- dev_to_rbd_dev
- rbd_size_show
- rbd_features_show
- rbd_major_show
- rbd_minor_show
- rbd_client_addr_show
- rbd_client_id_show
- rbd_cluster_fsid_show
- rbd_config_info_show
- rbd_pool_show
- rbd_pool_id_show
- rbd_pool_ns_show
- rbd_name_show
- rbd_image_id_show
- rbd_snap_show
- rbd_snap_id_show
- rbd_parent_show
- rbd_image_refresh
- rbd_spec_get
- rbd_spec_put
- rbd_spec_alloc
- rbd_spec_free
- rbd_dev_free
- rbd_dev_release
- __rbd_dev_create
- rbd_dev_create
- rbd_dev_destroy
- _rbd_dev_v2_snap_size
- rbd_dev_v2_image_size
- rbd_dev_v2_object_prefix
- _rbd_dev_v2_snap_features
- rbd_dev_v2_features
- rbd_dev_v2_get_flags
- decode_parent_image_spec
- __get_parent_info
- __get_parent_info_legacy
- get_parent_info
- rbd_dev_v2_parent_info
- rbd_dev_v2_striping_info
- rbd_dev_v2_data_pool
- rbd_dev_image_name
- rbd_v1_snap_id_by_name
- rbd_v2_snap_id_by_name
- rbd_snap_id_by_name
- rbd_spec_fill_snap_id
- rbd_spec_fill_names
- rbd_dev_v2_snap_context
- rbd_dev_v2_snap_name
- rbd_dev_v2_header_info
- rbd_dev_header_info
- next_token
- dup_token
- rbd_add_parse_args
- rbd_dev_image_unlock
- rbd_add_acquire_lock
- rbd_dev_image_id
- rbd_dev_unprobe
- rbd_dev_v2_header_onetime
- rbd_dev_probe_parent
- rbd_dev_device_release
- rbd_dev_device_setup
- rbd_dev_header_name
- rbd_dev_image_release
- rbd_dev_image_probe
- do_rbd_add
- add_store
- add_single_major_store
- rbd_dev_remove_parent
- do_rbd_remove
- remove_store
- remove_single_major_store
- rbd_sysfs_init
- rbd_sysfs_cleanup
- rbd_slab_init
- rbd_slab_exit
- rbd_init
- rbd_exit
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
39
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
44 #include <linux/fs.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
49
50 #include "rbd_types.h"
51
52 #define RBD_DEBUG
53
54
55
56
57
58
59
60 static int atomic_inc_return_safe(atomic_t *v)
61 {
62 unsigned int counter;
63
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
66 return (int)counter;
67
68 atomic_dec(v);
69
70 return -EINVAL;
71 }
72
73
74 static int atomic_dec_return_safe(atomic_t *v)
75 {
76 int counter;
77
78 counter = atomic_dec_return(v);
79 if (counter >= 0)
80 return counter;
81
82 atomic_inc(v);
83
84 return -EINVAL;
85 }
86
87 #define RBD_DRV_NAME "rbd"
88
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
91
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
93
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
97
98 #define RBD_MAX_SNAP_COUNT 510
99
100 #define RBD_SNAP_HEAD_NAME "-"
101
102 #define BAD_SNAP_INDEX U32_MAX
103
104
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
107
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
109
110 #define RBD_NOTIFY_TIMEOUT 5
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
112
113
114
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
123
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
132
133
134
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
136
137
138
139
140
141 #define DEV_NAME_LEN 32
142
143
144
145
146 struct rbd_image_header {
147
148 char *object_prefix;
149 __u8 obj_order;
150 u64 stripe_unit;
151 u64 stripe_count;
152 s64 data_pool_id;
153 u64 features;
154
155
156 u64 image_size;
157 struct ceph_snap_context *snapc;
158 char *snap_names;
159 u64 *snap_sizes;
160 };
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187 struct rbd_spec {
188 u64 pool_id;
189 const char *pool_name;
190 const char *pool_ns;
191
192 const char *image_id;
193 const char *image_name;
194
195 u64 snap_id;
196 const char *snap_name;
197
198 struct kref kref;
199 };
200
201
202
203
204 struct rbd_client {
205 struct ceph_client *client;
206 struct kref kref;
207 struct list_head node;
208 };
209
210 struct pending_result {
211 int result;
212 int num_pending;
213 };
214
215 struct rbd_img_request;
216
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA = 1,
219 OBJ_REQUEST_BIO,
220 OBJ_REQUEST_BVECS,
221 OBJ_REQUEST_OWN_BVECS,
222 };
223
224 enum obj_operation_type {
225 OBJ_OP_READ = 1,
226 OBJ_OP_WRITE,
227 OBJ_OP_DISCARD,
228 OBJ_OP_ZEROOUT,
229 };
230
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
236
237 enum rbd_obj_read_state {
238 RBD_OBJ_READ_START = 1,
239 RBD_OBJ_READ_OBJECT,
240 RBD_OBJ_READ_PARENT,
241 };
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268 enum rbd_obj_write_state {
269 RBD_OBJ_WRITE_START = 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
271 RBD_OBJ_WRITE_OBJECT,
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
275 };
276
277 enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
284 };
285
286 struct rbd_obj_request {
287 struct ceph_object_extent ex;
288 unsigned int flags;
289 union {
290 enum rbd_obj_read_state read_state;
291 enum rbd_obj_write_state write_state;
292 };
293
294 struct rbd_img_request *img_request;
295 struct ceph_file_extent *img_extents;
296 u32 num_img_extents;
297
298 union {
299 struct ceph_bio_iter bio_pos;
300 struct {
301 struct ceph_bvec_iter bvec_pos;
302 u32 bvec_count;
303 u32 bvec_idx;
304 };
305 };
306
307 enum rbd_obj_copyup_state copyup_state;
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
310
311 struct list_head osd_reqs;
312
313 struct mutex state_mutex;
314 struct pending_result pending;
315 struct kref kref;
316 };
317
318 enum img_req_flags {
319 IMG_REQ_CHILD,
320 IMG_REQ_LAYERED,
321 };
322
323 enum rbd_img_state {
324 RBD_IMG_START = 1,
325 RBD_IMG_EXCLUSIVE_LOCK,
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
328 };
329
330 struct rbd_img_request {
331 struct rbd_device *rbd_dev;
332 enum obj_operation_type op_type;
333 enum obj_request_type data_type;
334 unsigned long flags;
335 enum rbd_img_state state;
336 union {
337 u64 snap_id;
338 struct ceph_snap_context *snapc;
339 };
340 union {
341 struct request *rq;
342 struct rbd_obj_request *obj_request;
343 };
344
345 struct list_head lock_item;
346 struct list_head object_extents;
347
348 struct mutex state_mutex;
349 struct pending_result pending;
350 struct work_struct work;
351 int work_result;
352 struct kref kref;
353 };
354
355 #define for_each_obj_request(ireq, oreq) \
356 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
357 #define for_each_obj_request_safe(ireq, oreq, n) \
358 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
359
360 enum rbd_watch_state {
361 RBD_WATCH_STATE_UNREGISTERED,
362 RBD_WATCH_STATE_REGISTERED,
363 RBD_WATCH_STATE_ERROR,
364 };
365
366 enum rbd_lock_state {
367 RBD_LOCK_STATE_UNLOCKED,
368 RBD_LOCK_STATE_LOCKED,
369 RBD_LOCK_STATE_RELEASING,
370 };
371
372
373 struct rbd_client_id {
374 u64 gid;
375 u64 handle;
376 };
377
378 struct rbd_mapping {
379 u64 size;
380 u64 features;
381 };
382
383
384
385
386 struct rbd_device {
387 int dev_id;
388
389 int major;
390 int minor;
391 struct gendisk *disk;
392
393 u32 image_format;
394 struct rbd_client *rbd_client;
395
396 char name[DEV_NAME_LEN];
397
398 spinlock_t lock;
399
400 struct rbd_image_header header;
401 unsigned long flags;
402 struct rbd_spec *spec;
403 struct rbd_options *opts;
404 char *config_info;
405
406 struct ceph_object_id header_oid;
407 struct ceph_object_locator header_oloc;
408
409 struct ceph_file_layout layout;
410
411 struct mutex watch_mutex;
412 enum rbd_watch_state watch_state;
413 struct ceph_osd_linger_request *watch_handle;
414 u64 watch_cookie;
415 struct delayed_work watch_dwork;
416
417 struct rw_semaphore lock_rwsem;
418 enum rbd_lock_state lock_state;
419 char lock_cookie[32];
420 struct rbd_client_id owner_cid;
421 struct work_struct acquired_lock_work;
422 struct work_struct released_lock_work;
423 struct delayed_work lock_dwork;
424 struct work_struct unlock_work;
425 spinlock_t lock_lists_lock;
426 struct list_head acquiring_list;
427 struct list_head running_list;
428 struct completion acquire_wait;
429 int acquire_err;
430 struct completion releasing_wait;
431
432 spinlock_t object_map_lock;
433 u8 *object_map;
434 u64 object_map_size;
435 u64 object_map_flags;
436
437 struct workqueue_struct *task_wq;
438
439 struct rbd_spec *parent_spec;
440 u64 parent_overlap;
441 atomic_t parent_ref;
442 struct rbd_device *parent;
443
444
445 struct blk_mq_tag_set tag_set;
446
447
448 struct rw_semaphore header_rwsem;
449
450 struct rbd_mapping mapping;
451
452 struct list_head node;
453
454
455 struct device dev;
456 unsigned long open_count;
457 };
458
459
460
461
462
463
464 enum rbd_dev_flags {
465 RBD_DEV_FLAG_EXISTS,
466 RBD_DEV_FLAG_REMOVING,
467 };
468
469 static DEFINE_MUTEX(client_mutex);
470
471 static LIST_HEAD(rbd_dev_list);
472 static DEFINE_SPINLOCK(rbd_dev_list_lock);
473
474 static LIST_HEAD(rbd_client_list);
475 static DEFINE_SPINLOCK(rbd_client_list_lock);
476
477
478
479 static struct kmem_cache *rbd_img_request_cache;
480 static struct kmem_cache *rbd_obj_request_cache;
481
482 static int rbd_major;
483 static DEFINE_IDA(rbd_dev_id_ida);
484
485 static struct workqueue_struct *rbd_wq;
486
487 static struct ceph_snap_context rbd_empty_snapc = {
488 .nref = REFCOUNT_INIT(1),
489 };
490
491
492
493
494 static bool single_major = true;
495 module_param(single_major, bool, 0444);
496 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
497
498 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
499 static ssize_t remove_store(struct bus_type *bus, const char *buf,
500 size_t count);
501 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
502 size_t count);
503 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
504 size_t count);
505 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
506
507 static int rbd_dev_id_to_minor(int dev_id)
508 {
509 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
510 }
511
512 static int minor_to_rbd_dev_id(int minor)
513 {
514 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
515 }
516
517 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
518 {
519 lockdep_assert_held(&rbd_dev->lock_rwsem);
520
521 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
522 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
523 }
524
525 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
526 {
527 bool is_lock_owner;
528
529 down_read(&rbd_dev->lock_rwsem);
530 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
531 up_read(&rbd_dev->lock_rwsem);
532 return is_lock_owner;
533 }
534
535 static ssize_t supported_features_show(struct bus_type *bus, char *buf)
536 {
537 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
538 }
539
540 static BUS_ATTR_WO(add);
541 static BUS_ATTR_WO(remove);
542 static BUS_ATTR_WO(add_single_major);
543 static BUS_ATTR_WO(remove_single_major);
544 static BUS_ATTR_RO(supported_features);
545
546 static struct attribute *rbd_bus_attrs[] = {
547 &bus_attr_add.attr,
548 &bus_attr_remove.attr,
549 &bus_attr_add_single_major.attr,
550 &bus_attr_remove_single_major.attr,
551 &bus_attr_supported_features.attr,
552 NULL,
553 };
554
555 static umode_t rbd_bus_is_visible(struct kobject *kobj,
556 struct attribute *attr, int index)
557 {
558 if (!single_major &&
559 (attr == &bus_attr_add_single_major.attr ||
560 attr == &bus_attr_remove_single_major.attr))
561 return 0;
562
563 return attr->mode;
564 }
565
566 static const struct attribute_group rbd_bus_group = {
567 .attrs = rbd_bus_attrs,
568 .is_visible = rbd_bus_is_visible,
569 };
570 __ATTRIBUTE_GROUPS(rbd_bus);
571
572 static struct bus_type rbd_bus_type = {
573 .name = "rbd",
574 .bus_groups = rbd_bus_groups,
575 };
576
577 static void rbd_root_dev_release(struct device *dev)
578 {
579 }
580
581 static struct device rbd_root_dev = {
582 .init_name = "rbd",
583 .release = rbd_root_dev_release,
584 };
585
586 static __printf(2, 3)
587 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
588 {
589 struct va_format vaf;
590 va_list args;
591
592 va_start(args, fmt);
593 vaf.fmt = fmt;
594 vaf.va = &args;
595
596 if (!rbd_dev)
597 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
598 else if (rbd_dev->disk)
599 printk(KERN_WARNING "%s: %s: %pV\n",
600 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
601 else if (rbd_dev->spec && rbd_dev->spec->image_name)
602 printk(KERN_WARNING "%s: image %s: %pV\n",
603 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
604 else if (rbd_dev->spec && rbd_dev->spec->image_id)
605 printk(KERN_WARNING "%s: id %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
607 else
608 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
609 RBD_DRV_NAME, rbd_dev, &vaf);
610 va_end(args);
611 }
612
613 #ifdef RBD_DEBUG
614 #define rbd_assert(expr) \
615 if (unlikely(!(expr))) { \
616 printk(KERN_ERR "\nAssertion failure in %s() " \
617 "at line %d:\n\n" \
618 "\trbd_assert(%s);\n\n", \
619 __func__, __LINE__, #expr); \
620 BUG(); \
621 }
622 #else
623 # define rbd_assert(expr) ((void) 0)
624 #endif
625
626 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
627
628 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
629 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
630 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
631 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
632 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
633 u64 snap_id);
634 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
635 u8 *order, u64 *snap_size);
636 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
637 u64 *snap_features);
638 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
639
640 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
641 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
642
643
644
645
646 static bool pending_result_dec(struct pending_result *pending, int *result)
647 {
648 rbd_assert(pending->num_pending > 0);
649
650 if (*result && !pending->result)
651 pending->result = *result;
652 if (--pending->num_pending)
653 return false;
654
655 *result = pending->result;
656 return true;
657 }
658
659 static int rbd_open(struct block_device *bdev, fmode_t mode)
660 {
661 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
662 bool removing = false;
663
664 spin_lock_irq(&rbd_dev->lock);
665 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
666 removing = true;
667 else
668 rbd_dev->open_count++;
669 spin_unlock_irq(&rbd_dev->lock);
670 if (removing)
671 return -ENOENT;
672
673 (void) get_device(&rbd_dev->dev);
674
675 return 0;
676 }
677
678 static void rbd_release(struct gendisk *disk, fmode_t mode)
679 {
680 struct rbd_device *rbd_dev = disk->private_data;
681 unsigned long open_count_before;
682
683 spin_lock_irq(&rbd_dev->lock);
684 open_count_before = rbd_dev->open_count--;
685 spin_unlock_irq(&rbd_dev->lock);
686 rbd_assert(open_count_before > 0);
687
688 put_device(&rbd_dev->dev);
689 }
690
691 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
692 {
693 int ro;
694
695 if (get_user(ro, (int __user *)arg))
696 return -EFAULT;
697
698
699 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
700 return -EROFS;
701
702
703 return -ENOTTY;
704 }
705
706 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
707 unsigned int cmd, unsigned long arg)
708 {
709 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
710 int ret;
711
712 switch (cmd) {
713 case BLKROSET:
714 ret = rbd_ioctl_set_ro(rbd_dev, arg);
715 break;
716 default:
717 ret = -ENOTTY;
718 }
719
720 return ret;
721 }
722
723 #ifdef CONFIG_COMPAT
724 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
725 unsigned int cmd, unsigned long arg)
726 {
727 return rbd_ioctl(bdev, mode, cmd, arg);
728 }
729 #endif
730
731 static const struct block_device_operations rbd_bd_ops = {
732 .owner = THIS_MODULE,
733 .open = rbd_open,
734 .release = rbd_release,
735 .ioctl = rbd_ioctl,
736 #ifdef CONFIG_COMPAT
737 .compat_ioctl = rbd_compat_ioctl,
738 #endif
739 };
740
741
742
743
744
745 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
746 {
747 struct rbd_client *rbdc;
748 int ret = -ENOMEM;
749
750 dout("%s:\n", __func__);
751 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
752 if (!rbdc)
753 goto out_opt;
754
755 kref_init(&rbdc->kref);
756 INIT_LIST_HEAD(&rbdc->node);
757
758 rbdc->client = ceph_create_client(ceph_opts, rbdc);
759 if (IS_ERR(rbdc->client))
760 goto out_rbdc;
761 ceph_opts = NULL;
762
763 ret = ceph_open_session(rbdc->client);
764 if (ret < 0)
765 goto out_client;
766
767 spin_lock(&rbd_client_list_lock);
768 list_add_tail(&rbdc->node, &rbd_client_list);
769 spin_unlock(&rbd_client_list_lock);
770
771 dout("%s: rbdc %p\n", __func__, rbdc);
772
773 return rbdc;
774 out_client:
775 ceph_destroy_client(rbdc->client);
776 out_rbdc:
777 kfree(rbdc);
778 out_opt:
779 if (ceph_opts)
780 ceph_destroy_options(ceph_opts);
781 dout("%s: error %d\n", __func__, ret);
782
783 return ERR_PTR(ret);
784 }
785
786 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
787 {
788 kref_get(&rbdc->kref);
789
790 return rbdc;
791 }
792
793
794
795
796
797 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
798 {
799 struct rbd_client *client_node;
800 bool found = false;
801
802 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
803 return NULL;
804
805 spin_lock(&rbd_client_list_lock);
806 list_for_each_entry(client_node, &rbd_client_list, node) {
807 if (!ceph_compare_options(ceph_opts, client_node->client)) {
808 __rbd_get_client(client_node);
809
810 found = true;
811 break;
812 }
813 }
814 spin_unlock(&rbd_client_list_lock);
815
816 return found ? client_node : NULL;
817 }
818
819
820
821
822 enum {
823 Opt_queue_depth,
824 Opt_alloc_size,
825 Opt_lock_timeout,
826 Opt_last_int,
827
828 Opt_pool_ns,
829 Opt_last_string,
830
831 Opt_read_only,
832 Opt_read_write,
833 Opt_lock_on_read,
834 Opt_exclusive,
835 Opt_notrim,
836 Opt_err
837 };
838
839 static match_table_t rbd_opts_tokens = {
840 {Opt_queue_depth, "queue_depth=%d"},
841 {Opt_alloc_size, "alloc_size=%d"},
842 {Opt_lock_timeout, "lock_timeout=%d"},
843
844 {Opt_pool_ns, "_pool_ns=%s"},
845
846 {Opt_read_only, "read_only"},
847 {Opt_read_only, "ro"},
848 {Opt_read_write, "read_write"},
849 {Opt_read_write, "rw"},
850 {Opt_lock_on_read, "lock_on_read"},
851 {Opt_exclusive, "exclusive"},
852 {Opt_notrim, "notrim"},
853 {Opt_err, NULL}
854 };
855
856 struct rbd_options {
857 int queue_depth;
858 int alloc_size;
859 unsigned long lock_timeout;
860 bool read_only;
861 bool lock_on_read;
862 bool exclusive;
863 bool trim;
864 };
865
866 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
867 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
868 #define RBD_LOCK_TIMEOUT_DEFAULT 0
869 #define RBD_READ_ONLY_DEFAULT false
870 #define RBD_LOCK_ON_READ_DEFAULT false
871 #define RBD_EXCLUSIVE_DEFAULT false
872 #define RBD_TRIM_DEFAULT true
873
874 struct parse_rbd_opts_ctx {
875 struct rbd_spec *spec;
876 struct rbd_options *opts;
877 };
878
879 static int parse_rbd_opts_token(char *c, void *private)
880 {
881 struct parse_rbd_opts_ctx *pctx = private;
882 substring_t argstr[MAX_OPT_ARGS];
883 int token, intval, ret;
884
885 token = match_token(c, rbd_opts_tokens, argstr);
886 if (token < Opt_last_int) {
887 ret = match_int(&argstr[0], &intval);
888 if (ret < 0) {
889 pr_err("bad option arg (not int) at '%s'\n", c);
890 return ret;
891 }
892 dout("got int token %d val %d\n", token, intval);
893 } else if (token > Opt_last_int && token < Opt_last_string) {
894 dout("got string token %d val %s\n", token, argstr[0].from);
895 } else {
896 dout("got token %d\n", token);
897 }
898
899 switch (token) {
900 case Opt_queue_depth:
901 if (intval < 1) {
902 pr_err("queue_depth out of range\n");
903 return -EINVAL;
904 }
905 pctx->opts->queue_depth = intval;
906 break;
907 case Opt_alloc_size:
908 if (intval < SECTOR_SIZE) {
909 pr_err("alloc_size out of range\n");
910 return -EINVAL;
911 }
912 if (!is_power_of_2(intval)) {
913 pr_err("alloc_size must be a power of 2\n");
914 return -EINVAL;
915 }
916 pctx->opts->alloc_size = intval;
917 break;
918 case Opt_lock_timeout:
919
920 if (intval < 0 || intval > INT_MAX / 1000) {
921 pr_err("lock_timeout out of range\n");
922 return -EINVAL;
923 }
924 pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
925 break;
926 case Opt_pool_ns:
927 kfree(pctx->spec->pool_ns);
928 pctx->spec->pool_ns = match_strdup(argstr);
929 if (!pctx->spec->pool_ns)
930 return -ENOMEM;
931 break;
932 case Opt_read_only:
933 pctx->opts->read_only = true;
934 break;
935 case Opt_read_write:
936 pctx->opts->read_only = false;
937 break;
938 case Opt_lock_on_read:
939 pctx->opts->lock_on_read = true;
940 break;
941 case Opt_exclusive:
942 pctx->opts->exclusive = true;
943 break;
944 case Opt_notrim:
945 pctx->opts->trim = false;
946 break;
947 default:
948
949 return -EINVAL;
950 }
951
952 return 0;
953 }
954
955 static char* obj_op_name(enum obj_operation_type op_type)
956 {
957 switch (op_type) {
958 case OBJ_OP_READ:
959 return "read";
960 case OBJ_OP_WRITE:
961 return "write";
962 case OBJ_OP_DISCARD:
963 return "discard";
964 case OBJ_OP_ZEROOUT:
965 return "zeroout";
966 default:
967 return "???";
968 }
969 }
970
971
972
973
974
975
976 static void rbd_client_release(struct kref *kref)
977 {
978 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
979
980 dout("%s: rbdc %p\n", __func__, rbdc);
981 spin_lock(&rbd_client_list_lock);
982 list_del(&rbdc->node);
983 spin_unlock(&rbd_client_list_lock);
984
985 ceph_destroy_client(rbdc->client);
986 kfree(rbdc);
987 }
988
989
990
991
992
993 static void rbd_put_client(struct rbd_client *rbdc)
994 {
995 if (rbdc)
996 kref_put(&rbdc->kref, rbd_client_release);
997 }
998
999
1000
1001
1002
1003
1004 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
1005 {
1006 struct rbd_client *rbdc;
1007 int ret;
1008
1009 mutex_lock(&client_mutex);
1010 rbdc = rbd_client_find(ceph_opts);
1011 if (rbdc) {
1012 ceph_destroy_options(ceph_opts);
1013
1014
1015
1016
1017
1018 ret = ceph_wait_for_latest_osdmap(rbdc->client,
1019 rbdc->client->options->mount_timeout);
1020 if (ret) {
1021 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
1022 rbd_put_client(rbdc);
1023 rbdc = ERR_PTR(ret);
1024 }
1025 } else {
1026 rbdc = rbd_client_create(ceph_opts);
1027 }
1028 mutex_unlock(&client_mutex);
1029
1030 return rbdc;
1031 }
1032
1033 static bool rbd_image_format_valid(u32 image_format)
1034 {
1035 return image_format == 1 || image_format == 2;
1036 }
1037
1038 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
1039 {
1040 size_t size;
1041 u32 snap_count;
1042
1043
1044 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
1045 return false;
1046
1047
1048
1049 if (ondisk->options.order < SECTOR_SHIFT)
1050 return false;
1051
1052
1053
1054 if (ondisk->options.order > 8 * sizeof (int) - 1)
1055 return false;
1056
1057
1058
1059
1060
1061 snap_count = le32_to_cpu(ondisk->snap_count);
1062 size = SIZE_MAX - sizeof (struct ceph_snap_context);
1063 if (snap_count > size / sizeof (__le64))
1064 return false;
1065
1066
1067
1068
1069
1070 size -= snap_count * sizeof (__le64);
1071 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
1072 return false;
1073
1074 return true;
1075 }
1076
1077
1078
1079
1080 static u32 rbd_obj_bytes(struct rbd_image_header *header)
1081 {
1082 return 1U << header->obj_order;
1083 }
1084
1085 static void rbd_init_layout(struct rbd_device *rbd_dev)
1086 {
1087 if (rbd_dev->header.stripe_unit == 0 ||
1088 rbd_dev->header.stripe_count == 0) {
1089 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1090 rbd_dev->header.stripe_count = 1;
1091 }
1092
1093 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1094 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1095 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1096 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1097 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1098 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1099 }
1100
1101
1102
1103
1104
1105 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1106 struct rbd_image_header_ondisk *ondisk)
1107 {
1108 struct rbd_image_header *header = &rbd_dev->header;
1109 bool first_time = header->object_prefix == NULL;
1110 struct ceph_snap_context *snapc;
1111 char *object_prefix = NULL;
1112 char *snap_names = NULL;
1113 u64 *snap_sizes = NULL;
1114 u32 snap_count;
1115 int ret = -ENOMEM;
1116 u32 i;
1117
1118
1119
1120 if (first_time) {
1121 object_prefix = kstrndup(ondisk->object_prefix,
1122 sizeof(ondisk->object_prefix),
1123 GFP_KERNEL);
1124 if (!object_prefix)
1125 return -ENOMEM;
1126 }
1127
1128
1129
1130 snap_count = le32_to_cpu(ondisk->snap_count);
1131 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1132 if (!snapc)
1133 goto out_err;
1134 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1135 if (snap_count) {
1136 struct rbd_image_snap_ondisk *snaps;
1137 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1138
1139
1140
1141 if (snap_names_len > (u64)SIZE_MAX)
1142 goto out_2big;
1143 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1144 if (!snap_names)
1145 goto out_err;
1146
1147
1148 snap_sizes = kmalloc_array(snap_count,
1149 sizeof(*header->snap_sizes),
1150 GFP_KERNEL);
1151 if (!snap_sizes)
1152 goto out_err;
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1164 snaps = ondisk->snaps;
1165 for (i = 0; i < snap_count; i++) {
1166 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1167 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1168 }
1169 }
1170
1171
1172
1173 if (first_time) {
1174 header->object_prefix = object_prefix;
1175 header->obj_order = ondisk->options.order;
1176 rbd_init_layout(rbd_dev);
1177 } else {
1178 ceph_put_snap_context(header->snapc);
1179 kfree(header->snap_names);
1180 kfree(header->snap_sizes);
1181 }
1182
1183
1184
1185 header->image_size = le64_to_cpu(ondisk->image_size);
1186 header->snapc = snapc;
1187 header->snap_names = snap_names;
1188 header->snap_sizes = snap_sizes;
1189
1190 return 0;
1191 out_2big:
1192 ret = -EIO;
1193 out_err:
1194 kfree(snap_sizes);
1195 kfree(snap_names);
1196 ceph_put_snap_context(snapc);
1197 kfree(object_prefix);
1198
1199 return ret;
1200 }
1201
1202 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1203 {
1204 const char *snap_name;
1205
1206 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1207
1208
1209
1210 snap_name = rbd_dev->header.snap_names;
1211 while (which--)
1212 snap_name += strlen(snap_name) + 1;
1213
1214 return kstrdup(snap_name, GFP_KERNEL);
1215 }
1216
1217
1218
1219
1220
1221 static int snapid_compare_reverse(const void *s1, const void *s2)
1222 {
1223 u64 snap_id1 = *(u64 *)s1;
1224 u64 snap_id2 = *(u64 *)s2;
1225
1226 if (snap_id1 < snap_id2)
1227 return 1;
1228 return snap_id1 == snap_id2 ? 0 : -1;
1229 }
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1242 {
1243 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1244 u64 *found;
1245
1246 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1247 sizeof (snap_id), snapid_compare_reverse);
1248
1249 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1250 }
1251
1252 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1253 u64 snap_id)
1254 {
1255 u32 which;
1256 const char *snap_name;
1257
1258 which = rbd_dev_snap_index(rbd_dev, snap_id);
1259 if (which == BAD_SNAP_INDEX)
1260 return ERR_PTR(-ENOENT);
1261
1262 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1263 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1264 }
1265
1266 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1267 {
1268 if (snap_id == CEPH_NOSNAP)
1269 return RBD_SNAP_HEAD_NAME;
1270
1271 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1272 if (rbd_dev->image_format == 1)
1273 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1274
1275 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1276 }
1277
1278 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1279 u64 *snap_size)
1280 {
1281 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1282 if (snap_id == CEPH_NOSNAP) {
1283 *snap_size = rbd_dev->header.image_size;
1284 } else if (rbd_dev->image_format == 1) {
1285 u32 which;
1286
1287 which = rbd_dev_snap_index(rbd_dev, snap_id);
1288 if (which == BAD_SNAP_INDEX)
1289 return -ENOENT;
1290
1291 *snap_size = rbd_dev->header.snap_sizes[which];
1292 } else {
1293 u64 size = 0;
1294 int ret;
1295
1296 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1297 if (ret)
1298 return ret;
1299
1300 *snap_size = size;
1301 }
1302 return 0;
1303 }
1304
1305 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1306 u64 *snap_features)
1307 {
1308 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1309 if (snap_id == CEPH_NOSNAP) {
1310 *snap_features = rbd_dev->header.features;
1311 } else if (rbd_dev->image_format == 1) {
1312 *snap_features = 0;
1313 } else {
1314 u64 features = 0;
1315 int ret;
1316
1317 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1318 if (ret)
1319 return ret;
1320
1321 *snap_features = features;
1322 }
1323 return 0;
1324 }
1325
1326 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1327 {
1328 u64 snap_id = rbd_dev->spec->snap_id;
1329 u64 size = 0;
1330 u64 features = 0;
1331 int ret;
1332
1333 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1334 if (ret)
1335 return ret;
1336 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1337 if (ret)
1338 return ret;
1339
1340 rbd_dev->mapping.size = size;
1341 rbd_dev->mapping.features = features;
1342
1343 return 0;
1344 }
1345
1346 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1347 {
1348 rbd_dev->mapping.size = 0;
1349 rbd_dev->mapping.features = 0;
1350 }
1351
1352 static void zero_bvec(struct bio_vec *bv)
1353 {
1354 void *buf;
1355 unsigned long flags;
1356
1357 buf = bvec_kmap_irq(bv, &flags);
1358 memset(buf, 0, bv->bv_len);
1359 flush_dcache_page(bv->bv_page);
1360 bvec_kunmap_irq(buf, &flags);
1361 }
1362
1363 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1364 {
1365 struct ceph_bio_iter it = *bio_pos;
1366
1367 ceph_bio_iter_advance(&it, off);
1368 ceph_bio_iter_advance_step(&it, bytes, ({
1369 zero_bvec(&bv);
1370 }));
1371 }
1372
1373 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1374 {
1375 struct ceph_bvec_iter it = *bvec_pos;
1376
1377 ceph_bvec_iter_advance(&it, off);
1378 ceph_bvec_iter_advance_step(&it, bytes, ({
1379 zero_bvec(&bv);
1380 }));
1381 }
1382
1383
1384
1385
1386
1387
1388
1389 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1390 u32 bytes)
1391 {
1392 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1393
1394 switch (obj_req->img_request->data_type) {
1395 case OBJ_REQUEST_BIO:
1396 zero_bios(&obj_req->bio_pos, off, bytes);
1397 break;
1398 case OBJ_REQUEST_BVECS:
1399 case OBJ_REQUEST_OWN_BVECS:
1400 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1401 break;
1402 default:
1403 BUG();
1404 }
1405 }
1406
1407 static void rbd_obj_request_destroy(struct kref *kref);
1408 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1409 {
1410 rbd_assert(obj_request != NULL);
1411 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1412 kref_read(&obj_request->kref));
1413 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1414 }
1415
1416 static void rbd_img_request_destroy(struct kref *kref);
1417 static void rbd_img_request_put(struct rbd_img_request *img_request)
1418 {
1419 rbd_assert(img_request != NULL);
1420 dout("%s: img %p (was %d)\n", __func__, img_request,
1421 kref_read(&img_request->kref));
1422 kref_put(&img_request->kref, rbd_img_request_destroy);
1423 }
1424
1425 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1426 struct rbd_obj_request *obj_request)
1427 {
1428 rbd_assert(obj_request->img_request == NULL);
1429
1430
1431 obj_request->img_request = img_request;
1432 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1433 }
1434
1435 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1436 struct rbd_obj_request *obj_request)
1437 {
1438 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1439 list_del(&obj_request->ex.oe_item);
1440 rbd_assert(obj_request->img_request == img_request);
1441 rbd_obj_request_put(obj_request);
1442 }
1443
1444 static void rbd_osd_submit(struct ceph_osd_request *osd_req)
1445 {
1446 struct rbd_obj_request *obj_req = osd_req->r_priv;
1447
1448 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1449 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1450 obj_req->ex.oe_off, obj_req->ex.oe_len);
1451 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1452 }
1453
1454
1455
1456
1457
1458
1459 static void img_request_layered_set(struct rbd_img_request *img_request)
1460 {
1461 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1462 smp_mb();
1463 }
1464
1465 static void img_request_layered_clear(struct rbd_img_request *img_request)
1466 {
1467 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1468 smp_mb();
1469 }
1470
1471 static bool img_request_layered_test(struct rbd_img_request *img_request)
1472 {
1473 smp_mb();
1474 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1475 }
1476
1477 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1478 {
1479 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1480
1481 return !obj_req->ex.oe_off &&
1482 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1483 }
1484
1485 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1486 {
1487 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1488
1489 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1490 rbd_dev->layout.object_size;
1491 }
1492
1493
1494
1495
1496 static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
1497 {
1498 if (!obj_req->num_img_extents ||
1499 (rbd_obj_is_entire(obj_req) &&
1500 !obj_req->img_request->snapc->num_snaps))
1501 return false;
1502
1503 return true;
1504 }
1505
1506 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1507 {
1508 return ceph_file_extents_bytes(obj_req->img_extents,
1509 obj_req->num_img_extents);
1510 }
1511
1512 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1513 {
1514 switch (img_req->op_type) {
1515 case OBJ_OP_READ:
1516 return false;
1517 case OBJ_OP_WRITE:
1518 case OBJ_OP_DISCARD:
1519 case OBJ_OP_ZEROOUT:
1520 return true;
1521 default:
1522 BUG();
1523 }
1524 }
1525
1526 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1527 {
1528 struct rbd_obj_request *obj_req = osd_req->r_priv;
1529 int result;
1530
1531 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1532 osd_req->r_result, obj_req);
1533
1534
1535
1536
1537
1538
1539 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1540 result = 0;
1541 else
1542 result = osd_req->r_result;
1543
1544 rbd_obj_handle_request(obj_req, result);
1545 }
1546
1547 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1548 {
1549 struct rbd_obj_request *obj_request = osd_req->r_priv;
1550
1551 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1552 osd_req->r_snapid = obj_request->img_request->snap_id;
1553 }
1554
1555 static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1556 {
1557 struct rbd_obj_request *obj_request = osd_req->r_priv;
1558
1559 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1560 ktime_get_real_ts64(&osd_req->r_mtime);
1561 osd_req->r_data_offset = obj_request->ex.oe_off;
1562 }
1563
1564 static struct ceph_osd_request *
1565 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1566 struct ceph_snap_context *snapc, int num_ops)
1567 {
1568 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1569 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1570 struct ceph_osd_request *req;
1571 const char *name_format = rbd_dev->image_format == 1 ?
1572 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1573 int ret;
1574
1575 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1576 if (!req)
1577 return ERR_PTR(-ENOMEM);
1578
1579 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1580 req->r_callback = rbd_osd_req_callback;
1581 req->r_priv = obj_req;
1582
1583
1584
1585
1586
1587 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1588 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1589
1590 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1591 rbd_dev->header.object_prefix,
1592 obj_req->ex.oe_objno);
1593 if (ret)
1594 return ERR_PTR(ret);
1595
1596 return req;
1597 }
1598
1599 static struct ceph_osd_request *
1600 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1601 {
1602 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1603 num_ops);
1604 }
1605
1606 static struct rbd_obj_request *rbd_obj_request_create(void)
1607 {
1608 struct rbd_obj_request *obj_request;
1609
1610 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1611 if (!obj_request)
1612 return NULL;
1613
1614 ceph_object_extent_init(&obj_request->ex);
1615 INIT_LIST_HEAD(&obj_request->osd_reqs);
1616 mutex_init(&obj_request->state_mutex);
1617 kref_init(&obj_request->kref);
1618
1619 dout("%s %p\n", __func__, obj_request);
1620 return obj_request;
1621 }
1622
1623 static void rbd_obj_request_destroy(struct kref *kref)
1624 {
1625 struct rbd_obj_request *obj_request;
1626 struct ceph_osd_request *osd_req;
1627 u32 i;
1628
1629 obj_request = container_of(kref, struct rbd_obj_request, kref);
1630
1631 dout("%s: obj %p\n", __func__, obj_request);
1632
1633 while (!list_empty(&obj_request->osd_reqs)) {
1634 osd_req = list_first_entry(&obj_request->osd_reqs,
1635 struct ceph_osd_request, r_private_item);
1636 list_del_init(&osd_req->r_private_item);
1637 ceph_osdc_put_request(osd_req);
1638 }
1639
1640 switch (obj_request->img_request->data_type) {
1641 case OBJ_REQUEST_NODATA:
1642 case OBJ_REQUEST_BIO:
1643 case OBJ_REQUEST_BVECS:
1644 break;
1645 case OBJ_REQUEST_OWN_BVECS:
1646 kfree(obj_request->bvec_pos.bvecs);
1647 break;
1648 default:
1649 BUG();
1650 }
1651
1652 kfree(obj_request->img_extents);
1653 if (obj_request->copyup_bvecs) {
1654 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1655 if (obj_request->copyup_bvecs[i].bv_page)
1656 __free_page(obj_request->copyup_bvecs[i].bv_page);
1657 }
1658 kfree(obj_request->copyup_bvecs);
1659 }
1660
1661 kmem_cache_free(rbd_obj_request_cache, obj_request);
1662 }
1663
1664
1665
1666 static void rbd_spec_put(struct rbd_spec *spec);
1667 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1668 {
1669 rbd_dev_remove_parent(rbd_dev);
1670 rbd_spec_put(rbd_dev->parent_spec);
1671 rbd_dev->parent_spec = NULL;
1672 rbd_dev->parent_overlap = 0;
1673 }
1674
1675
1676
1677
1678
1679
1680
1681 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1682 {
1683 int counter;
1684
1685 if (!rbd_dev->parent_spec)
1686 return;
1687
1688 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1689 if (counter > 0)
1690 return;
1691
1692
1693
1694 if (!counter)
1695 rbd_dev_unparent(rbd_dev);
1696 else
1697 rbd_warn(rbd_dev, "parent reference underflow");
1698 }
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1709 {
1710 int counter = 0;
1711
1712 if (!rbd_dev->parent_spec)
1713 return false;
1714
1715 down_read(&rbd_dev->header_rwsem);
1716 if (rbd_dev->parent_overlap)
1717 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1718 up_read(&rbd_dev->header_rwsem);
1719
1720 if (counter < 0)
1721 rbd_warn(rbd_dev, "parent reference overflow");
1722
1723 return counter > 0;
1724 }
1725
1726
1727
1728
1729
1730
1731 static struct rbd_img_request *rbd_img_request_create(
1732 struct rbd_device *rbd_dev,
1733 enum obj_operation_type op_type,
1734 struct ceph_snap_context *snapc)
1735 {
1736 struct rbd_img_request *img_request;
1737
1738 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
1739 if (!img_request)
1740 return NULL;
1741
1742 img_request->rbd_dev = rbd_dev;
1743 img_request->op_type = op_type;
1744 if (!rbd_img_is_write(img_request))
1745 img_request->snap_id = rbd_dev->spec->snap_id;
1746 else
1747 img_request->snapc = snapc;
1748
1749 if (rbd_dev_parent_get(rbd_dev))
1750 img_request_layered_set(img_request);
1751
1752 INIT_LIST_HEAD(&img_request->lock_item);
1753 INIT_LIST_HEAD(&img_request->object_extents);
1754 mutex_init(&img_request->state_mutex);
1755 kref_init(&img_request->kref);
1756
1757 return img_request;
1758 }
1759
1760 static void rbd_img_request_destroy(struct kref *kref)
1761 {
1762 struct rbd_img_request *img_request;
1763 struct rbd_obj_request *obj_request;
1764 struct rbd_obj_request *next_obj_request;
1765
1766 img_request = container_of(kref, struct rbd_img_request, kref);
1767
1768 dout("%s: img %p\n", __func__, img_request);
1769
1770 WARN_ON(!list_empty(&img_request->lock_item));
1771 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1772 rbd_img_obj_request_del(img_request, obj_request);
1773
1774 if (img_request_layered_test(img_request)) {
1775 img_request_layered_clear(img_request);
1776 rbd_dev_parent_put(img_request->rbd_dev);
1777 }
1778
1779 if (rbd_img_is_write(img_request))
1780 ceph_put_snap_context(img_request->snapc);
1781
1782 kmem_cache_free(rbd_img_request_cache, img_request);
1783 }
1784
1785 #define BITS_PER_OBJ 2
1786 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1787 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1788
1789 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1790 u64 *index, u8 *shift)
1791 {
1792 u32 off;
1793
1794 rbd_assert(objno < rbd_dev->object_map_size);
1795 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1796 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1797 }
1798
1799 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1800 {
1801 u64 index;
1802 u8 shift;
1803
1804 lockdep_assert_held(&rbd_dev->object_map_lock);
1805 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1806 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1807 }
1808
1809 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1810 {
1811 u64 index;
1812 u8 shift;
1813 u8 *p;
1814
1815 lockdep_assert_held(&rbd_dev->object_map_lock);
1816 rbd_assert(!(val & ~OBJ_MASK));
1817
1818 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1819 p = &rbd_dev->object_map[index];
1820 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1821 }
1822
1823 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1824 {
1825 u8 state;
1826
1827 spin_lock(&rbd_dev->object_map_lock);
1828 state = __rbd_object_map_get(rbd_dev, objno);
1829 spin_unlock(&rbd_dev->object_map_lock);
1830 return state;
1831 }
1832
1833 static bool use_object_map(struct rbd_device *rbd_dev)
1834 {
1835 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1836 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1837 }
1838
1839 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1840 {
1841 u8 state;
1842
1843
1844 if (!use_object_map(rbd_dev))
1845 return true;
1846
1847 state = rbd_object_map_get(rbd_dev, objno);
1848 return state != OBJECT_NONEXISTENT;
1849 }
1850
1851 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1852 struct ceph_object_id *oid)
1853 {
1854 if (snap_id == CEPH_NOSNAP)
1855 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1856 rbd_dev->spec->image_id);
1857 else
1858 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1859 rbd_dev->spec->image_id, snap_id);
1860 }
1861
1862 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1863 {
1864 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1865 CEPH_DEFINE_OID_ONSTACK(oid);
1866 u8 lock_type;
1867 char *lock_tag;
1868 struct ceph_locker *lockers;
1869 u32 num_lockers;
1870 bool broke_lock = false;
1871 int ret;
1872
1873 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1874
1875 again:
1876 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1877 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1878 if (ret != -EBUSY || broke_lock) {
1879 if (ret == -EEXIST)
1880 ret = 0;
1881 if (ret)
1882 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1883 return ret;
1884 }
1885
1886 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1887 RBD_LOCK_NAME, &lock_type, &lock_tag,
1888 &lockers, &num_lockers);
1889 if (ret) {
1890 if (ret == -ENOENT)
1891 goto again;
1892
1893 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1894 return ret;
1895 }
1896
1897 kfree(lock_tag);
1898 if (num_lockers == 0)
1899 goto again;
1900
1901 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1902 ENTITY_NAME(lockers[0].id.name));
1903
1904 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1905 RBD_LOCK_NAME, lockers[0].id.cookie,
1906 &lockers[0].id.name);
1907 ceph_free_lockers(lockers, num_lockers);
1908 if (ret) {
1909 if (ret == -ENOENT)
1910 goto again;
1911
1912 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1913 return ret;
1914 }
1915
1916 broke_lock = true;
1917 goto again;
1918 }
1919
1920 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1921 {
1922 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1923 CEPH_DEFINE_OID_ONSTACK(oid);
1924 int ret;
1925
1926 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1927
1928 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1929 "");
1930 if (ret && ret != -ENOENT)
1931 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1932 }
1933
1934 static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1935 {
1936 u8 struct_v;
1937 u32 struct_len;
1938 u32 header_len;
1939 void *header_end;
1940 int ret;
1941
1942 ceph_decode_32_safe(p, end, header_len, e_inval);
1943 header_end = *p + header_len;
1944
1945 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1946 &struct_len);
1947 if (ret)
1948 return ret;
1949
1950 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1951
1952 *p = header_end;
1953 return 0;
1954
1955 e_inval:
1956 return -EINVAL;
1957 }
1958
1959 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1960 {
1961 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1962 CEPH_DEFINE_OID_ONSTACK(oid);
1963 struct page **pages;
1964 void *p, *end;
1965 size_t reply_len;
1966 u64 num_objects;
1967 u64 object_map_bytes;
1968 u64 object_map_size;
1969 int num_pages;
1970 int ret;
1971
1972 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1973
1974 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1975 rbd_dev->mapping.size);
1976 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1977 BITS_PER_BYTE);
1978 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1979 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1980 if (IS_ERR(pages))
1981 return PTR_ERR(pages);
1982
1983 reply_len = num_pages * PAGE_SIZE;
1984 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1985 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1986 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1987 NULL, 0, pages, &reply_len);
1988 if (ret)
1989 goto out;
1990
1991 p = page_address(pages[0]);
1992 end = p + min(reply_len, (size_t)PAGE_SIZE);
1993 ret = decode_object_map_header(&p, end, &object_map_size);
1994 if (ret)
1995 goto out;
1996
1997 if (object_map_size != num_objects) {
1998 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1999 object_map_size, num_objects);
2000 ret = -EINVAL;
2001 goto out;
2002 }
2003
2004 if (offset_in_page(p) + object_map_bytes > reply_len) {
2005 ret = -EINVAL;
2006 goto out;
2007 }
2008
2009 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
2010 if (!rbd_dev->object_map) {
2011 ret = -ENOMEM;
2012 goto out;
2013 }
2014
2015 rbd_dev->object_map_size = object_map_size;
2016 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
2017 offset_in_page(p), object_map_bytes);
2018
2019 out:
2020 ceph_release_page_vector(pages, num_pages);
2021 return ret;
2022 }
2023
2024 static void rbd_object_map_free(struct rbd_device *rbd_dev)
2025 {
2026 kvfree(rbd_dev->object_map);
2027 rbd_dev->object_map = NULL;
2028 rbd_dev->object_map_size = 0;
2029 }
2030
2031 static int rbd_object_map_load(struct rbd_device *rbd_dev)
2032 {
2033 int ret;
2034
2035 ret = __rbd_object_map_load(rbd_dev);
2036 if (ret)
2037 return ret;
2038
2039 ret = rbd_dev_v2_get_flags(rbd_dev);
2040 if (ret) {
2041 rbd_object_map_free(rbd_dev);
2042 return ret;
2043 }
2044
2045 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
2046 rbd_warn(rbd_dev, "object map is invalid");
2047
2048 return 0;
2049 }
2050
2051 static int rbd_object_map_open(struct rbd_device *rbd_dev)
2052 {
2053 int ret;
2054
2055 ret = rbd_object_map_lock(rbd_dev);
2056 if (ret)
2057 return ret;
2058
2059 ret = rbd_object_map_load(rbd_dev);
2060 if (ret) {
2061 rbd_object_map_unlock(rbd_dev);
2062 return ret;
2063 }
2064
2065 return 0;
2066 }
2067
2068 static void rbd_object_map_close(struct rbd_device *rbd_dev)
2069 {
2070 rbd_object_map_free(rbd_dev);
2071 rbd_object_map_unlock(rbd_dev);
2072 }
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
2085 struct ceph_osd_request *osd_req)
2086 {
2087 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2088 struct ceph_osd_data *osd_data;
2089 u64 objno;
2090 u8 state, new_state, uninitialized_var(current_state);
2091 bool has_current_state;
2092 void *p;
2093
2094 if (osd_req->r_result)
2095 return osd_req->r_result;
2096
2097
2098
2099
2100 if (osd_req->r_num_ops == 1)
2101 return 0;
2102
2103
2104
2105
2106 rbd_assert(osd_req->r_num_ops == 2);
2107 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
2108 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
2109
2110 p = page_address(osd_data->pages[0]);
2111 objno = ceph_decode_64(&p);
2112 rbd_assert(objno == obj_req->ex.oe_objno);
2113 rbd_assert(ceph_decode_64(&p) == objno + 1);
2114 new_state = ceph_decode_8(&p);
2115 has_current_state = ceph_decode_8(&p);
2116 if (has_current_state)
2117 current_state = ceph_decode_8(&p);
2118
2119 spin_lock(&rbd_dev->object_map_lock);
2120 state = __rbd_object_map_get(rbd_dev, objno);
2121 if (!has_current_state || current_state == state ||
2122 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
2123 __rbd_object_map_set(rbd_dev, objno, new_state);
2124 spin_unlock(&rbd_dev->object_map_lock);
2125
2126 return 0;
2127 }
2128
2129 static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
2130 {
2131 struct rbd_obj_request *obj_req = osd_req->r_priv;
2132 int result;
2133
2134 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
2135 osd_req->r_result, obj_req);
2136
2137 result = rbd_object_map_update_finish(obj_req, osd_req);
2138 rbd_obj_handle_request(obj_req, result);
2139 }
2140
2141 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2142 {
2143 u8 state = rbd_object_map_get(rbd_dev, objno);
2144
2145 if (state == new_state ||
2146 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2147 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2148 return false;
2149
2150 return true;
2151 }
2152
2153 static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2154 int which, u64 objno, u8 new_state,
2155 const u8 *current_state)
2156 {
2157 struct page **pages;
2158 void *p, *start;
2159 int ret;
2160
2161 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2162 if (ret)
2163 return ret;
2164
2165 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2166 if (IS_ERR(pages))
2167 return PTR_ERR(pages);
2168
2169 p = start = page_address(pages[0]);
2170 ceph_encode_64(&p, objno);
2171 ceph_encode_64(&p, objno + 1);
2172 ceph_encode_8(&p, new_state);
2173 if (current_state) {
2174 ceph_encode_8(&p, 1);
2175 ceph_encode_8(&p, *current_state);
2176 } else {
2177 ceph_encode_8(&p, 0);
2178 }
2179
2180 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2181 false, true);
2182 return 0;
2183 }
2184
2185
2186
2187
2188
2189
2190
2191 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2192 u8 new_state, const u8 *current_state)
2193 {
2194 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2195 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2196 struct ceph_osd_request *req;
2197 int num_ops = 1;
2198 int which = 0;
2199 int ret;
2200
2201 if (snap_id == CEPH_NOSNAP) {
2202 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2203 return 1;
2204
2205 num_ops++;
2206 }
2207
2208 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2209 if (!req)
2210 return -ENOMEM;
2211
2212 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2213 req->r_callback = rbd_object_map_callback;
2214 req->r_priv = obj_req;
2215
2216 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2217 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2218 req->r_flags = CEPH_OSD_FLAG_WRITE;
2219 ktime_get_real_ts64(&req->r_mtime);
2220
2221 if (snap_id == CEPH_NOSNAP) {
2222
2223
2224
2225
2226 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2227 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2228 if (ret)
2229 return ret;
2230 }
2231
2232 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2233 new_state, current_state);
2234 if (ret)
2235 return ret;
2236
2237 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2238 if (ret)
2239 return ret;
2240
2241 ceph_osdc_start_request(osdc, req, false);
2242 return 0;
2243 }
2244
2245 static void prune_extents(struct ceph_file_extent *img_extents,
2246 u32 *num_img_extents, u64 overlap)
2247 {
2248 u32 cnt = *num_img_extents;
2249
2250
2251 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2252 cnt--;
2253
2254 if (cnt) {
2255 struct ceph_file_extent *ex = &img_extents[cnt - 1];
2256
2257
2258 if (ex->fe_off + ex->fe_len > overlap)
2259 ex->fe_len = overlap - ex->fe_off;
2260 }
2261
2262 *num_img_extents = cnt;
2263 }
2264
2265
2266
2267
2268
2269 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2270 bool entire)
2271 {
2272 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2273 int ret;
2274
2275 if (!rbd_dev->parent_overlap)
2276 return 0;
2277
2278 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2279 entire ? 0 : obj_req->ex.oe_off,
2280 entire ? rbd_dev->layout.object_size :
2281 obj_req->ex.oe_len,
2282 &obj_req->img_extents,
2283 &obj_req->num_img_extents);
2284 if (ret)
2285 return ret;
2286
2287 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2288 rbd_dev->parent_overlap);
2289 return 0;
2290 }
2291
2292 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2293 {
2294 struct rbd_obj_request *obj_req = osd_req->r_priv;
2295
2296 switch (obj_req->img_request->data_type) {
2297 case OBJ_REQUEST_BIO:
2298 osd_req_op_extent_osd_data_bio(osd_req, which,
2299 &obj_req->bio_pos,
2300 obj_req->ex.oe_len);
2301 break;
2302 case OBJ_REQUEST_BVECS:
2303 case OBJ_REQUEST_OWN_BVECS:
2304 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2305 obj_req->ex.oe_len);
2306 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2307 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2308 &obj_req->bvec_pos);
2309 break;
2310 default:
2311 BUG();
2312 }
2313 }
2314
2315 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2316 {
2317 struct page **pages;
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2328 if (IS_ERR(pages))
2329 return PTR_ERR(pages);
2330
2331 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2332 osd_req_op_raw_data_in_pages(osd_req, which, pages,
2333 8 + sizeof(struct ceph_timespec),
2334 0, false, true);
2335 return 0;
2336 }
2337
2338 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2339 u32 bytes)
2340 {
2341 struct rbd_obj_request *obj_req = osd_req->r_priv;
2342 int ret;
2343
2344 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2345 if (ret)
2346 return ret;
2347
2348 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2349 obj_req->copyup_bvec_count, bytes);
2350 return 0;
2351 }
2352
2353 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2354 {
2355 obj_req->read_state = RBD_OBJ_READ_START;
2356 return 0;
2357 }
2358
2359 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2360 int which)
2361 {
2362 struct rbd_obj_request *obj_req = osd_req->r_priv;
2363 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2364 u16 opcode;
2365
2366 if (!use_object_map(rbd_dev) ||
2367 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2368 osd_req_op_alloc_hint_init(osd_req, which++,
2369 rbd_dev->layout.object_size,
2370 rbd_dev->layout.object_size);
2371 }
2372
2373 if (rbd_obj_is_entire(obj_req))
2374 opcode = CEPH_OSD_OP_WRITEFULL;
2375 else
2376 opcode = CEPH_OSD_OP_WRITE;
2377
2378 osd_req_op_extent_init(osd_req, which, opcode,
2379 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2380 rbd_osd_setup_data(osd_req, which);
2381 }
2382
2383 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2384 {
2385 int ret;
2386
2387
2388 ret = rbd_obj_calc_img_extents(obj_req, true);
2389 if (ret)
2390 return ret;
2391
2392 if (rbd_obj_copyup_enabled(obj_req))
2393 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2394
2395 obj_req->write_state = RBD_OBJ_WRITE_START;
2396 return 0;
2397 }
2398
2399 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2400 {
2401 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2402 CEPH_OSD_OP_ZERO;
2403 }
2404
2405 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2406 int which)
2407 {
2408 struct rbd_obj_request *obj_req = osd_req->r_priv;
2409
2410 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2411 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2412 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2413 } else {
2414 osd_req_op_extent_init(osd_req, which,
2415 truncate_or_zero_opcode(obj_req),
2416 obj_req->ex.oe_off, obj_req->ex.oe_len,
2417 0, 0);
2418 }
2419 }
2420
2421 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2422 {
2423 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2424 u64 off, next_off;
2425 int ret;
2426
2427
2428
2429
2430
2431
2432
2433
2434
2435 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2436 !rbd_obj_is_tail(obj_req)) {
2437 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2438 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2439 rbd_dev->opts->alloc_size);
2440 if (off >= next_off)
2441 return 1;
2442
2443 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2444 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2445 off, next_off - off);
2446 obj_req->ex.oe_off = off;
2447 obj_req->ex.oe_len = next_off - off;
2448 }
2449
2450
2451 ret = rbd_obj_calc_img_extents(obj_req, true);
2452 if (ret)
2453 return ret;
2454
2455 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2456 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2457 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2458
2459 obj_req->write_state = RBD_OBJ_WRITE_START;
2460 return 0;
2461 }
2462
2463 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2464 int which)
2465 {
2466 struct rbd_obj_request *obj_req = osd_req->r_priv;
2467 u16 opcode;
2468
2469 if (rbd_obj_is_entire(obj_req)) {
2470 if (obj_req->num_img_extents) {
2471 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2472 osd_req_op_init(osd_req, which++,
2473 CEPH_OSD_OP_CREATE, 0);
2474 opcode = CEPH_OSD_OP_TRUNCATE;
2475 } else {
2476 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2477 osd_req_op_init(osd_req, which++,
2478 CEPH_OSD_OP_DELETE, 0);
2479 opcode = 0;
2480 }
2481 } else {
2482 opcode = truncate_or_zero_opcode(obj_req);
2483 }
2484
2485 if (opcode)
2486 osd_req_op_extent_init(osd_req, which, opcode,
2487 obj_req->ex.oe_off, obj_req->ex.oe_len,
2488 0, 0);
2489 }
2490
2491 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2492 {
2493 int ret;
2494
2495
2496 ret = rbd_obj_calc_img_extents(obj_req, true);
2497 if (ret)
2498 return ret;
2499
2500 if (rbd_obj_copyup_enabled(obj_req))
2501 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2502 if (!obj_req->num_img_extents) {
2503 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2504 if (rbd_obj_is_entire(obj_req))
2505 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2506 }
2507
2508 obj_req->write_state = RBD_OBJ_WRITE_START;
2509 return 0;
2510 }
2511
2512 static int count_write_ops(struct rbd_obj_request *obj_req)
2513 {
2514 struct rbd_img_request *img_req = obj_req->img_request;
2515
2516 switch (img_req->op_type) {
2517 case OBJ_OP_WRITE:
2518 if (!use_object_map(img_req->rbd_dev) ||
2519 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2520 return 2;
2521
2522 return 1;
2523 case OBJ_OP_DISCARD:
2524 return 1;
2525 case OBJ_OP_ZEROOUT:
2526 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2527 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2528 return 2;
2529
2530 return 1;
2531 default:
2532 BUG();
2533 }
2534 }
2535
2536 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2537 int which)
2538 {
2539 struct rbd_obj_request *obj_req = osd_req->r_priv;
2540
2541 switch (obj_req->img_request->op_type) {
2542 case OBJ_OP_WRITE:
2543 __rbd_osd_setup_write_ops(osd_req, which);
2544 break;
2545 case OBJ_OP_DISCARD:
2546 __rbd_osd_setup_discard_ops(osd_req, which);
2547 break;
2548 case OBJ_OP_ZEROOUT:
2549 __rbd_osd_setup_zeroout_ops(osd_req, which);
2550 break;
2551 default:
2552 BUG();
2553 }
2554 }
2555
2556
2557
2558
2559
2560
2561 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2562 {
2563 struct rbd_obj_request *obj_req, *next_obj_req;
2564 int ret;
2565
2566 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2567 switch (img_req->op_type) {
2568 case OBJ_OP_READ:
2569 ret = rbd_obj_init_read(obj_req);
2570 break;
2571 case OBJ_OP_WRITE:
2572 ret = rbd_obj_init_write(obj_req);
2573 break;
2574 case OBJ_OP_DISCARD:
2575 ret = rbd_obj_init_discard(obj_req);
2576 break;
2577 case OBJ_OP_ZEROOUT:
2578 ret = rbd_obj_init_zeroout(obj_req);
2579 break;
2580 default:
2581 BUG();
2582 }
2583 if (ret < 0)
2584 return ret;
2585 if (ret > 0) {
2586 rbd_img_obj_request_del(img_req, obj_req);
2587 continue;
2588 }
2589 }
2590
2591 img_req->state = RBD_IMG_START;
2592 return 0;
2593 }
2594
2595 union rbd_img_fill_iter {
2596 struct ceph_bio_iter bio_iter;
2597 struct ceph_bvec_iter bvec_iter;
2598 };
2599
2600 struct rbd_img_fill_ctx {
2601 enum obj_request_type pos_type;
2602 union rbd_img_fill_iter *pos;
2603 union rbd_img_fill_iter iter;
2604 ceph_object_extent_fn_t set_pos_fn;
2605 ceph_object_extent_fn_t count_fn;
2606 ceph_object_extent_fn_t copy_fn;
2607 };
2608
2609 static struct ceph_object_extent *alloc_object_extent(void *arg)
2610 {
2611 struct rbd_img_request *img_req = arg;
2612 struct rbd_obj_request *obj_req;
2613
2614 obj_req = rbd_obj_request_create();
2615 if (!obj_req)
2616 return NULL;
2617
2618 rbd_img_obj_request_add(img_req, obj_req);
2619 return &obj_req->ex;
2620 }
2621
2622
2623
2624
2625
2626
2627
2628
2629 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2630 {
2631 return l->stripe_unit != l->object_size;
2632 }
2633
2634 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2635 struct ceph_file_extent *img_extents,
2636 u32 num_img_extents,
2637 struct rbd_img_fill_ctx *fctx)
2638 {
2639 u32 i;
2640 int ret;
2641
2642 img_req->data_type = fctx->pos_type;
2643
2644
2645
2646
2647
2648 fctx->iter = *fctx->pos;
2649 for (i = 0; i < num_img_extents; i++) {
2650 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2651 img_extents[i].fe_off,
2652 img_extents[i].fe_len,
2653 &img_req->object_extents,
2654 alloc_object_extent, img_req,
2655 fctx->set_pos_fn, &fctx->iter);
2656 if (ret)
2657 return ret;
2658 }
2659
2660 return __rbd_img_fill_request(img_req);
2661 }
2662
2663
2664
2665
2666
2667
2668
2669
2670
2671
2672
2673
2674
2675
2676 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2677 struct ceph_file_extent *img_extents,
2678 u32 num_img_extents,
2679 struct rbd_img_fill_ctx *fctx)
2680 {
2681 struct rbd_device *rbd_dev = img_req->rbd_dev;
2682 struct rbd_obj_request *obj_req;
2683 u32 i;
2684 int ret;
2685
2686 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2687 !rbd_layout_is_fancy(&rbd_dev->layout))
2688 return rbd_img_fill_request_nocopy(img_req, img_extents,
2689 num_img_extents, fctx);
2690
2691 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2692
2693
2694
2695
2696
2697
2698
2699
2700 fctx->iter = *fctx->pos;
2701 for (i = 0; i < num_img_extents; i++) {
2702 ret = ceph_file_to_extents(&rbd_dev->layout,
2703 img_extents[i].fe_off,
2704 img_extents[i].fe_len,
2705 &img_req->object_extents,
2706 alloc_object_extent, img_req,
2707 fctx->count_fn, &fctx->iter);
2708 if (ret)
2709 return ret;
2710 }
2711
2712 for_each_obj_request(img_req, obj_req) {
2713 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2714 sizeof(*obj_req->bvec_pos.bvecs),
2715 GFP_NOIO);
2716 if (!obj_req->bvec_pos.bvecs)
2717 return -ENOMEM;
2718 }
2719
2720
2721
2722
2723
2724 fctx->iter = *fctx->pos;
2725 for (i = 0; i < num_img_extents; i++) {
2726 ret = ceph_iterate_extents(&rbd_dev->layout,
2727 img_extents[i].fe_off,
2728 img_extents[i].fe_len,
2729 &img_req->object_extents,
2730 fctx->copy_fn, &fctx->iter);
2731 if (ret)
2732 return ret;
2733 }
2734
2735 return __rbd_img_fill_request(img_req);
2736 }
2737
2738 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2739 u64 off, u64 len)
2740 {
2741 struct ceph_file_extent ex = { off, len };
2742 union rbd_img_fill_iter dummy = {};
2743 struct rbd_img_fill_ctx fctx = {
2744 .pos_type = OBJ_REQUEST_NODATA,
2745 .pos = &dummy,
2746 };
2747
2748 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2749 }
2750
2751 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2752 {
2753 struct rbd_obj_request *obj_req =
2754 container_of(ex, struct rbd_obj_request, ex);
2755 struct ceph_bio_iter *it = arg;
2756
2757 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2758 obj_req->bio_pos = *it;
2759 ceph_bio_iter_advance(it, bytes);
2760 }
2761
2762 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2763 {
2764 struct rbd_obj_request *obj_req =
2765 container_of(ex, struct rbd_obj_request, ex);
2766 struct ceph_bio_iter *it = arg;
2767
2768 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2769 ceph_bio_iter_advance_step(it, bytes, ({
2770 obj_req->bvec_count++;
2771 }));
2772
2773 }
2774
2775 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2776 {
2777 struct rbd_obj_request *obj_req =
2778 container_of(ex, struct rbd_obj_request, ex);
2779 struct ceph_bio_iter *it = arg;
2780
2781 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2782 ceph_bio_iter_advance_step(it, bytes, ({
2783 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2784 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2785 }));
2786 }
2787
2788 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2789 struct ceph_file_extent *img_extents,
2790 u32 num_img_extents,
2791 struct ceph_bio_iter *bio_pos)
2792 {
2793 struct rbd_img_fill_ctx fctx = {
2794 .pos_type = OBJ_REQUEST_BIO,
2795 .pos = (union rbd_img_fill_iter *)bio_pos,
2796 .set_pos_fn = set_bio_pos,
2797 .count_fn = count_bio_bvecs,
2798 .copy_fn = copy_bio_bvecs,
2799 };
2800
2801 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2802 &fctx);
2803 }
2804
2805 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2806 u64 off, u64 len, struct bio *bio)
2807 {
2808 struct ceph_file_extent ex = { off, len };
2809 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2810
2811 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2812 }
2813
2814 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2815 {
2816 struct rbd_obj_request *obj_req =
2817 container_of(ex, struct rbd_obj_request, ex);
2818 struct ceph_bvec_iter *it = arg;
2819
2820 obj_req->bvec_pos = *it;
2821 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2822 ceph_bvec_iter_advance(it, bytes);
2823 }
2824
2825 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2826 {
2827 struct rbd_obj_request *obj_req =
2828 container_of(ex, struct rbd_obj_request, ex);
2829 struct ceph_bvec_iter *it = arg;
2830
2831 ceph_bvec_iter_advance_step(it, bytes, ({
2832 obj_req->bvec_count++;
2833 }));
2834 }
2835
2836 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2837 {
2838 struct rbd_obj_request *obj_req =
2839 container_of(ex, struct rbd_obj_request, ex);
2840 struct ceph_bvec_iter *it = arg;
2841
2842 ceph_bvec_iter_advance_step(it, bytes, ({
2843 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2844 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2845 }));
2846 }
2847
2848 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2849 struct ceph_file_extent *img_extents,
2850 u32 num_img_extents,
2851 struct ceph_bvec_iter *bvec_pos)
2852 {
2853 struct rbd_img_fill_ctx fctx = {
2854 .pos_type = OBJ_REQUEST_BVECS,
2855 .pos = (union rbd_img_fill_iter *)bvec_pos,
2856 .set_pos_fn = set_bvec_pos,
2857 .count_fn = count_bvecs,
2858 .copy_fn = copy_bvecs,
2859 };
2860
2861 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2862 &fctx);
2863 }
2864
2865 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2866 struct ceph_file_extent *img_extents,
2867 u32 num_img_extents,
2868 struct bio_vec *bvecs)
2869 {
2870 struct ceph_bvec_iter it = {
2871 .bvecs = bvecs,
2872 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2873 num_img_extents) },
2874 };
2875
2876 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2877 &it);
2878 }
2879
2880 static void rbd_img_handle_request_work(struct work_struct *work)
2881 {
2882 struct rbd_img_request *img_req =
2883 container_of(work, struct rbd_img_request, work);
2884
2885 rbd_img_handle_request(img_req, img_req->work_result);
2886 }
2887
2888 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2889 {
2890 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2891 img_req->work_result = result;
2892 queue_work(rbd_wq, &img_req->work);
2893 }
2894
2895 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2896 {
2897 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2898
2899 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2900 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2901 return true;
2902 }
2903
2904 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2905 obj_req->ex.oe_objno);
2906 return false;
2907 }
2908
2909 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2910 {
2911 struct ceph_osd_request *osd_req;
2912 int ret;
2913
2914 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2915 if (IS_ERR(osd_req))
2916 return PTR_ERR(osd_req);
2917
2918 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2919 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2920 rbd_osd_setup_data(osd_req, 0);
2921 rbd_osd_format_read(osd_req);
2922
2923 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2924 if (ret)
2925 return ret;
2926
2927 rbd_osd_submit(osd_req);
2928 return 0;
2929 }
2930
2931 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2932 {
2933 struct rbd_img_request *img_req = obj_req->img_request;
2934 struct rbd_img_request *child_img_req;
2935 int ret;
2936
2937 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2938 OBJ_OP_READ, NULL);
2939 if (!child_img_req)
2940 return -ENOMEM;
2941
2942 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2943 child_img_req->obj_request = obj_req;
2944
2945 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2946 obj_req);
2947
2948 if (!rbd_img_is_write(img_req)) {
2949 switch (img_req->data_type) {
2950 case OBJ_REQUEST_BIO:
2951 ret = __rbd_img_fill_from_bio(child_img_req,
2952 obj_req->img_extents,
2953 obj_req->num_img_extents,
2954 &obj_req->bio_pos);
2955 break;
2956 case OBJ_REQUEST_BVECS:
2957 case OBJ_REQUEST_OWN_BVECS:
2958 ret = __rbd_img_fill_from_bvecs(child_img_req,
2959 obj_req->img_extents,
2960 obj_req->num_img_extents,
2961 &obj_req->bvec_pos);
2962 break;
2963 default:
2964 BUG();
2965 }
2966 } else {
2967 ret = rbd_img_fill_from_bvecs(child_img_req,
2968 obj_req->img_extents,
2969 obj_req->num_img_extents,
2970 obj_req->copyup_bvecs);
2971 }
2972 if (ret) {
2973 rbd_img_request_put(child_img_req);
2974 return ret;
2975 }
2976
2977
2978 rbd_img_schedule(child_img_req, 0);
2979 return 0;
2980 }
2981
2982 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2983 {
2984 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2985 int ret;
2986
2987 again:
2988 switch (obj_req->read_state) {
2989 case RBD_OBJ_READ_START:
2990 rbd_assert(!*result);
2991
2992 if (!rbd_obj_may_exist(obj_req)) {
2993 *result = -ENOENT;
2994 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2995 goto again;
2996 }
2997
2998 ret = rbd_obj_read_object(obj_req);
2999 if (ret) {
3000 *result = ret;
3001 return true;
3002 }
3003 obj_req->read_state = RBD_OBJ_READ_OBJECT;
3004 return false;
3005 case RBD_OBJ_READ_OBJECT:
3006 if (*result == -ENOENT && rbd_dev->parent_overlap) {
3007
3008 ret = rbd_obj_calc_img_extents(obj_req, false);
3009 if (ret) {
3010 *result = ret;
3011 return true;
3012 }
3013 if (obj_req->num_img_extents) {
3014 ret = rbd_obj_read_from_parent(obj_req);
3015 if (ret) {
3016 *result = ret;
3017 return true;
3018 }
3019 obj_req->read_state = RBD_OBJ_READ_PARENT;
3020 return false;
3021 }
3022 }
3023
3024
3025
3026
3027
3028
3029 if (*result == -ENOENT) {
3030 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
3031 *result = 0;
3032 } else if (*result >= 0) {
3033 if (*result < obj_req->ex.oe_len)
3034 rbd_obj_zero_range(obj_req, *result,
3035 obj_req->ex.oe_len - *result);
3036 else
3037 rbd_assert(*result == obj_req->ex.oe_len);
3038 *result = 0;
3039 }
3040 return true;
3041 case RBD_OBJ_READ_PARENT:
3042
3043
3044
3045
3046 if (!*result) {
3047 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
3048
3049 if (obj_overlap < obj_req->ex.oe_len)
3050 rbd_obj_zero_range(obj_req, obj_overlap,
3051 obj_req->ex.oe_len - obj_overlap);
3052 }
3053 return true;
3054 default:
3055 BUG();
3056 }
3057 }
3058
3059 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
3060 {
3061 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3062
3063 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
3064 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
3065
3066 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
3067 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
3068 dout("%s %p noop for nonexistent\n", __func__, obj_req);
3069 return true;
3070 }
3071
3072 return false;
3073 }
3074
3075
3076
3077
3078
3079
3080
3081 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
3082 {
3083 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3084 u8 new_state;
3085
3086 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3087 return 1;
3088
3089 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3090 new_state = OBJECT_PENDING;
3091 else
3092 new_state = OBJECT_EXISTS;
3093
3094 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
3095 }
3096
3097 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
3098 {
3099 struct ceph_osd_request *osd_req;
3100 int num_ops = count_write_ops(obj_req);
3101 int which = 0;
3102 int ret;
3103
3104 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
3105 num_ops++;
3106
3107 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3108 if (IS_ERR(osd_req))
3109 return PTR_ERR(osd_req);
3110
3111 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3112 ret = rbd_osd_setup_stat(osd_req, which++);
3113 if (ret)
3114 return ret;
3115 }
3116
3117 rbd_osd_setup_write_ops(osd_req, which);
3118 rbd_osd_format_write(osd_req);
3119
3120 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3121 if (ret)
3122 return ret;
3123
3124 rbd_osd_submit(osd_req);
3125 return 0;
3126 }
3127
3128
3129
3130
3131 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
3132 {
3133 struct ceph_bvec_iter it = {
3134 .bvecs = bvecs,
3135 .iter = { .bi_size = bytes },
3136 };
3137
3138 ceph_bvec_iter_advance_step(&it, bytes, ({
3139 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
3140 bv.bv_len))
3141 return false;
3142 }));
3143 return true;
3144 }
3145
3146 #define MODS_ONLY U32_MAX
3147
3148 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3149 u32 bytes)
3150 {
3151 struct ceph_osd_request *osd_req;
3152 int ret;
3153
3154 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3155 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3156
3157 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3158 if (IS_ERR(osd_req))
3159 return PTR_ERR(osd_req);
3160
3161 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3162 if (ret)
3163 return ret;
3164
3165 rbd_osd_format_write(osd_req);
3166
3167 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3168 if (ret)
3169 return ret;
3170
3171 rbd_osd_submit(osd_req);
3172 return 0;
3173 }
3174
3175 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3176 u32 bytes)
3177 {
3178 struct ceph_osd_request *osd_req;
3179 int num_ops = count_write_ops(obj_req);
3180 int which = 0;
3181 int ret;
3182
3183 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3184
3185 if (bytes != MODS_ONLY)
3186 num_ops++;
3187
3188 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3189 if (IS_ERR(osd_req))
3190 return PTR_ERR(osd_req);
3191
3192 if (bytes != MODS_ONLY) {
3193 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3194 if (ret)
3195 return ret;
3196 }
3197
3198 rbd_osd_setup_write_ops(osd_req, which);
3199 rbd_osd_format_write(osd_req);
3200
3201 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3202 if (ret)
3203 return ret;
3204
3205 rbd_osd_submit(osd_req);
3206 return 0;
3207 }
3208
3209 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3210 {
3211 u32 i;
3212
3213 rbd_assert(!obj_req->copyup_bvecs);
3214 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3215 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3216 sizeof(*obj_req->copyup_bvecs),
3217 GFP_NOIO);
3218 if (!obj_req->copyup_bvecs)
3219 return -ENOMEM;
3220
3221 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3222 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3223
3224 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
3225 if (!obj_req->copyup_bvecs[i].bv_page)
3226 return -ENOMEM;
3227
3228 obj_req->copyup_bvecs[i].bv_offset = 0;
3229 obj_req->copyup_bvecs[i].bv_len = len;
3230 obj_overlap -= len;
3231 }
3232
3233 rbd_assert(!obj_overlap);
3234 return 0;
3235 }
3236
3237
3238
3239
3240
3241
3242 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3243 {
3244 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3245 int ret;
3246
3247 rbd_assert(obj_req->num_img_extents);
3248 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3249 rbd_dev->parent_overlap);
3250 if (!obj_req->num_img_extents) {
3251
3252
3253
3254
3255
3256
3257 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3258 }
3259
3260 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3261 if (ret)
3262 return ret;
3263
3264 return rbd_obj_read_from_parent(obj_req);
3265 }
3266
3267 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3268 {
3269 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3270 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3271 u8 new_state;
3272 u32 i;
3273 int ret;
3274
3275 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3276
3277 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3278 return;
3279
3280 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3281 return;
3282
3283 for (i = 0; i < snapc->num_snaps; i++) {
3284 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3285 i + 1 < snapc->num_snaps)
3286 new_state = OBJECT_EXISTS_CLEAN;
3287 else
3288 new_state = OBJECT_EXISTS;
3289
3290 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3291 new_state, NULL);
3292 if (ret < 0) {
3293 obj_req->pending.result = ret;
3294 return;
3295 }
3296
3297 rbd_assert(!ret);
3298 obj_req->pending.num_pending++;
3299 }
3300 }
3301
3302 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3303 {
3304 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3305 int ret;
3306
3307 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3308
3309
3310
3311
3312
3313
3314 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3315 bytes = 0;
3316
3317 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3318
3319
3320
3321
3322
3323
3324 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3325 if (ret) {
3326 obj_req->pending.result = ret;
3327 return;
3328 }
3329
3330 obj_req->pending.num_pending++;
3331 bytes = MODS_ONLY;
3332 }
3333
3334 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3335 if (ret) {
3336 obj_req->pending.result = ret;
3337 return;
3338 }
3339
3340 obj_req->pending.num_pending++;
3341 }
3342
3343 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3344 {
3345 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3346 int ret;
3347
3348 again:
3349 switch (obj_req->copyup_state) {
3350 case RBD_OBJ_COPYUP_START:
3351 rbd_assert(!*result);
3352
3353 ret = rbd_obj_copyup_read_parent(obj_req);
3354 if (ret) {
3355 *result = ret;
3356 return true;
3357 }
3358 if (obj_req->num_img_extents)
3359 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3360 else
3361 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3362 return false;
3363 case RBD_OBJ_COPYUP_READ_PARENT:
3364 if (*result)
3365 return true;
3366
3367 if (is_zero_bvecs(obj_req->copyup_bvecs,
3368 rbd_obj_img_extents_bytes(obj_req))) {
3369 dout("%s %p detected zeros\n", __func__, obj_req);
3370 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3371 }
3372
3373 rbd_obj_copyup_object_maps(obj_req);
3374 if (!obj_req->pending.num_pending) {
3375 *result = obj_req->pending.result;
3376 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3377 goto again;
3378 }
3379 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3380 return false;
3381 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3382 if (!pending_result_dec(&obj_req->pending, result))
3383 return false;
3384
3385 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3386 if (*result) {
3387 rbd_warn(rbd_dev, "snap object map update failed: %d",
3388 *result);
3389 return true;
3390 }
3391
3392 rbd_obj_copyup_write_object(obj_req);
3393 if (!obj_req->pending.num_pending) {
3394 *result = obj_req->pending.result;
3395 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3396 goto again;
3397 }
3398 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3399 return false;
3400 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3401 if (!pending_result_dec(&obj_req->pending, result))
3402 return false;
3403
3404 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3405 return true;
3406 default:
3407 BUG();
3408 }
3409 }
3410
3411
3412
3413
3414
3415
3416
3417 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3418 {
3419 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3420 u8 current_state = OBJECT_PENDING;
3421
3422 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3423 return 1;
3424
3425 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3426 return 1;
3427
3428 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3429 ¤t_state);
3430 }
3431
3432 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3433 {
3434 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3435 int ret;
3436
3437 again:
3438 switch (obj_req->write_state) {
3439 case RBD_OBJ_WRITE_START:
3440 rbd_assert(!*result);
3441
3442 if (rbd_obj_write_is_noop(obj_req))
3443 return true;
3444
3445 ret = rbd_obj_write_pre_object_map(obj_req);
3446 if (ret < 0) {
3447 *result = ret;
3448 return true;
3449 }
3450 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3451 if (ret > 0)
3452 goto again;
3453 return false;
3454 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3455 if (*result) {
3456 rbd_warn(rbd_dev, "pre object map update failed: %d",
3457 *result);
3458 return true;
3459 }
3460 ret = rbd_obj_write_object(obj_req);
3461 if (ret) {
3462 *result = ret;
3463 return true;
3464 }
3465 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3466 return false;
3467 case RBD_OBJ_WRITE_OBJECT:
3468 if (*result == -ENOENT) {
3469 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3470 *result = 0;
3471 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3472 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3473 goto again;
3474 }
3475
3476
3477
3478
3479 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3480 *result = 0;
3481 }
3482 if (*result)
3483 return true;
3484
3485 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3486 goto again;
3487 case __RBD_OBJ_WRITE_COPYUP:
3488 if (!rbd_obj_advance_copyup(obj_req, result))
3489 return false;
3490
3491 case RBD_OBJ_WRITE_COPYUP:
3492 if (*result) {
3493 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3494 return true;
3495 }
3496 ret = rbd_obj_write_post_object_map(obj_req);
3497 if (ret < 0) {
3498 *result = ret;
3499 return true;
3500 }
3501 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3502 if (ret > 0)
3503 goto again;
3504 return false;
3505 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3506 if (*result)
3507 rbd_warn(rbd_dev, "post object map update failed: %d",
3508 *result);
3509 return true;
3510 default:
3511 BUG();
3512 }
3513 }
3514
3515
3516
3517
3518 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3519 int *result)
3520 {
3521 struct rbd_img_request *img_req = obj_req->img_request;
3522 struct rbd_device *rbd_dev = img_req->rbd_dev;
3523 bool done;
3524
3525 mutex_lock(&obj_req->state_mutex);
3526 if (!rbd_img_is_write(img_req))
3527 done = rbd_obj_advance_read(obj_req, result);
3528 else
3529 done = rbd_obj_advance_write(obj_req, result);
3530 mutex_unlock(&obj_req->state_mutex);
3531
3532 if (done && *result) {
3533 rbd_assert(*result < 0);
3534 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3535 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3536 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3537 }
3538 return done;
3539 }
3540
3541
3542
3543
3544
3545 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3546 {
3547 if (__rbd_obj_handle_request(obj_req, &result))
3548 rbd_img_handle_request(obj_req->img_request, result);
3549 }
3550
3551 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3552 {
3553 struct rbd_device *rbd_dev = img_req->rbd_dev;
3554
3555 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3556 return false;
3557
3558 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
3559 return false;
3560
3561 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3562 if (rbd_dev->opts->lock_on_read ||
3563 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3564 return true;
3565
3566 return rbd_img_is_write(img_req);
3567 }
3568
3569 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3570 {
3571 struct rbd_device *rbd_dev = img_req->rbd_dev;
3572 bool locked;
3573
3574 lockdep_assert_held(&rbd_dev->lock_rwsem);
3575 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3576 spin_lock(&rbd_dev->lock_lists_lock);
3577 rbd_assert(list_empty(&img_req->lock_item));
3578 if (!locked)
3579 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3580 else
3581 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3582 spin_unlock(&rbd_dev->lock_lists_lock);
3583 return locked;
3584 }
3585
3586 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3587 {
3588 struct rbd_device *rbd_dev = img_req->rbd_dev;
3589 bool need_wakeup;
3590
3591 lockdep_assert_held(&rbd_dev->lock_rwsem);
3592 spin_lock(&rbd_dev->lock_lists_lock);
3593 rbd_assert(!list_empty(&img_req->lock_item));
3594 list_del_init(&img_req->lock_item);
3595 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3596 list_empty(&rbd_dev->running_list));
3597 spin_unlock(&rbd_dev->lock_lists_lock);
3598 if (need_wakeup)
3599 complete(&rbd_dev->releasing_wait);
3600 }
3601
3602 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3603 {
3604 struct rbd_device *rbd_dev = img_req->rbd_dev;
3605
3606 if (!need_exclusive_lock(img_req))
3607 return 1;
3608
3609 if (rbd_lock_add_request(img_req))
3610 return 1;
3611
3612 if (rbd_dev->opts->exclusive) {
3613 WARN_ON(1);
3614 return -EROFS;
3615 }
3616
3617
3618
3619
3620
3621 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3622 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3623 return 0;
3624 }
3625
3626 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3627 {
3628 struct rbd_obj_request *obj_req;
3629
3630 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3631
3632 for_each_obj_request(img_req, obj_req) {
3633 int result = 0;
3634
3635 if (__rbd_obj_handle_request(obj_req, &result)) {
3636 if (result) {
3637 img_req->pending.result = result;
3638 return;
3639 }
3640 } else {
3641 img_req->pending.num_pending++;
3642 }
3643 }
3644 }
3645
3646 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3647 {
3648 struct rbd_device *rbd_dev = img_req->rbd_dev;
3649 int ret;
3650
3651 again:
3652 switch (img_req->state) {
3653 case RBD_IMG_START:
3654 rbd_assert(!*result);
3655
3656 ret = rbd_img_exclusive_lock(img_req);
3657 if (ret < 0) {
3658 *result = ret;
3659 return true;
3660 }
3661 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3662 if (ret > 0)
3663 goto again;
3664 return false;
3665 case RBD_IMG_EXCLUSIVE_LOCK:
3666 if (*result)
3667 return true;
3668
3669 rbd_assert(!need_exclusive_lock(img_req) ||
3670 __rbd_is_lock_owner(rbd_dev));
3671
3672 rbd_img_object_requests(img_req);
3673 if (!img_req->pending.num_pending) {
3674 *result = img_req->pending.result;
3675 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3676 goto again;
3677 }
3678 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3679 return false;
3680 case __RBD_IMG_OBJECT_REQUESTS:
3681 if (!pending_result_dec(&img_req->pending, result))
3682 return false;
3683
3684 case RBD_IMG_OBJECT_REQUESTS:
3685 return true;
3686 default:
3687 BUG();
3688 }
3689 }
3690
3691
3692
3693
3694 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3695 int *result)
3696 {
3697 struct rbd_device *rbd_dev = img_req->rbd_dev;
3698 bool done;
3699
3700 if (need_exclusive_lock(img_req)) {
3701 down_read(&rbd_dev->lock_rwsem);
3702 mutex_lock(&img_req->state_mutex);
3703 done = rbd_img_advance(img_req, result);
3704 if (done)
3705 rbd_lock_del_request(img_req);
3706 mutex_unlock(&img_req->state_mutex);
3707 up_read(&rbd_dev->lock_rwsem);
3708 } else {
3709 mutex_lock(&img_req->state_mutex);
3710 done = rbd_img_advance(img_req, result);
3711 mutex_unlock(&img_req->state_mutex);
3712 }
3713
3714 if (done && *result) {
3715 rbd_assert(*result < 0);
3716 rbd_warn(rbd_dev, "%s%s result %d",
3717 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3718 obj_op_name(img_req->op_type), *result);
3719 }
3720 return done;
3721 }
3722
3723 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3724 {
3725 again:
3726 if (!__rbd_img_handle_request(img_req, &result))
3727 return;
3728
3729 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3730 struct rbd_obj_request *obj_req = img_req->obj_request;
3731
3732 rbd_img_request_put(img_req);
3733 if (__rbd_obj_handle_request(obj_req, &result)) {
3734 img_req = obj_req->img_request;
3735 goto again;
3736 }
3737 } else {
3738 struct request *rq = img_req->rq;
3739
3740 rbd_img_request_put(img_req);
3741 blk_mq_end_request(rq, errno_to_blk_status(result));
3742 }
3743 }
3744
3745 static const struct rbd_client_id rbd_empty_cid;
3746
3747 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3748 const struct rbd_client_id *rhs)
3749 {
3750 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3751 }
3752
3753 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3754 {
3755 struct rbd_client_id cid;
3756
3757 mutex_lock(&rbd_dev->watch_mutex);
3758 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3759 cid.handle = rbd_dev->watch_cookie;
3760 mutex_unlock(&rbd_dev->watch_mutex);
3761 return cid;
3762 }
3763
3764
3765
3766
3767 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3768 const struct rbd_client_id *cid)
3769 {
3770 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3771 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3772 cid->gid, cid->handle);
3773 rbd_dev->owner_cid = *cid;
3774 }
3775
3776 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3777 {
3778 mutex_lock(&rbd_dev->watch_mutex);
3779 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3780 mutex_unlock(&rbd_dev->watch_mutex);
3781 }
3782
3783 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3784 {
3785 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3786
3787 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3788 strcpy(rbd_dev->lock_cookie, cookie);
3789 rbd_set_owner_cid(rbd_dev, &cid);
3790 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3791 }
3792
3793
3794
3795
3796 static int rbd_lock(struct rbd_device *rbd_dev)
3797 {
3798 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3799 char cookie[32];
3800 int ret;
3801
3802 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3803 rbd_dev->lock_cookie[0] != '\0');
3804
3805 format_lock_cookie(rbd_dev, cookie);
3806 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3807 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3808 RBD_LOCK_TAG, "", 0);
3809 if (ret)
3810 return ret;
3811
3812 __rbd_lock(rbd_dev, cookie);
3813 return 0;
3814 }
3815
3816
3817
3818
3819 static void rbd_unlock(struct rbd_device *rbd_dev)
3820 {
3821 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3822 int ret;
3823
3824 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3825 rbd_dev->lock_cookie[0] == '\0');
3826
3827 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3828 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3829 if (ret && ret != -ENOENT)
3830 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3831
3832
3833 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3834 rbd_dev->lock_cookie[0] = '\0';
3835 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3836 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3837 }
3838
3839 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3840 enum rbd_notify_op notify_op,
3841 struct page ***preply_pages,
3842 size_t *preply_len)
3843 {
3844 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3845 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3846 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3847 int buf_size = sizeof(buf);
3848 void *p = buf;
3849
3850 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3851
3852
3853 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3854 ceph_encode_32(&p, notify_op);
3855 ceph_encode_64(&p, cid.gid);
3856 ceph_encode_64(&p, cid.handle);
3857
3858 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3859 &rbd_dev->header_oloc, buf, buf_size,
3860 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3861 }
3862
3863 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3864 enum rbd_notify_op notify_op)
3865 {
3866 struct page **reply_pages;
3867 size_t reply_len;
3868
3869 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
3870 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3871 }
3872
3873 static void rbd_notify_acquired_lock(struct work_struct *work)
3874 {
3875 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3876 acquired_lock_work);
3877
3878 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3879 }
3880
3881 static void rbd_notify_released_lock(struct work_struct *work)
3882 {
3883 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3884 released_lock_work);
3885
3886 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3887 }
3888
3889 static int rbd_request_lock(struct rbd_device *rbd_dev)
3890 {
3891 struct page **reply_pages;
3892 size_t reply_len;
3893 bool lock_owner_responded = false;
3894 int ret;
3895
3896 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3897
3898 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3899 &reply_pages, &reply_len);
3900 if (ret && ret != -ETIMEDOUT) {
3901 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3902 goto out;
3903 }
3904
3905 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3906 void *p = page_address(reply_pages[0]);
3907 void *const end = p + reply_len;
3908 u32 n;
3909
3910 ceph_decode_32_safe(&p, end, n, e_inval);
3911 while (n--) {
3912 u8 struct_v;
3913 u32 len;
3914
3915 ceph_decode_need(&p, end, 8 + 8, e_inval);
3916 p += 8 + 8;
3917
3918 ceph_decode_32_safe(&p, end, len, e_inval);
3919 if (!len)
3920 continue;
3921
3922 if (lock_owner_responded) {
3923 rbd_warn(rbd_dev,
3924 "duplicate lock owners detected");
3925 ret = -EIO;
3926 goto out;
3927 }
3928
3929 lock_owner_responded = true;
3930 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3931 &struct_v, &len);
3932 if (ret) {
3933 rbd_warn(rbd_dev,
3934 "failed to decode ResponseMessage: %d",
3935 ret);
3936 goto e_inval;
3937 }
3938
3939 ret = ceph_decode_32(&p);
3940 }
3941 }
3942
3943 if (!lock_owner_responded) {
3944 rbd_warn(rbd_dev, "no lock owners detected");
3945 ret = -ETIMEDOUT;
3946 }
3947
3948 out:
3949 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3950 return ret;
3951
3952 e_inval:
3953 ret = -EINVAL;
3954 goto out;
3955 }
3956
3957
3958
3959
3960
3961 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3962 {
3963 struct rbd_img_request *img_req;
3964
3965 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3966 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3967
3968 cancel_delayed_work(&rbd_dev->lock_dwork);
3969 if (!completion_done(&rbd_dev->acquire_wait)) {
3970 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3971 list_empty(&rbd_dev->running_list));
3972 rbd_dev->acquire_err = result;
3973 complete_all(&rbd_dev->acquire_wait);
3974 return;
3975 }
3976
3977 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3978 mutex_lock(&img_req->state_mutex);
3979 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3980 rbd_img_schedule(img_req, result);
3981 mutex_unlock(&img_req->state_mutex);
3982 }
3983
3984 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
3985 }
3986
3987 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3988 struct ceph_locker **lockers, u32 *num_lockers)
3989 {
3990 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3991 u8 lock_type;
3992 char *lock_tag;
3993 int ret;
3994
3995 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3996
3997 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3998 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3999 &lock_type, &lock_tag, lockers, num_lockers);
4000 if (ret)
4001 return ret;
4002
4003 if (*num_lockers == 0) {
4004 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
4005 goto out;
4006 }
4007
4008 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
4009 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
4010 lock_tag);
4011 ret = -EBUSY;
4012 goto out;
4013 }
4014
4015 if (lock_type == CEPH_CLS_LOCK_SHARED) {
4016 rbd_warn(rbd_dev, "shared lock type detected");
4017 ret = -EBUSY;
4018 goto out;
4019 }
4020
4021 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
4022 strlen(RBD_LOCK_COOKIE_PREFIX))) {
4023 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
4024 (*lockers)[0].id.cookie);
4025 ret = -EBUSY;
4026 goto out;
4027 }
4028
4029 out:
4030 kfree(lock_tag);
4031 return ret;
4032 }
4033
4034 static int find_watcher(struct rbd_device *rbd_dev,
4035 const struct ceph_locker *locker)
4036 {
4037 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4038 struct ceph_watch_item *watchers;
4039 u32 num_watchers;
4040 u64 cookie;
4041 int i;
4042 int ret;
4043
4044 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
4045 &rbd_dev->header_oloc, &watchers,
4046 &num_watchers);
4047 if (ret)
4048 return ret;
4049
4050 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
4051 for (i = 0; i < num_watchers; i++) {
4052 if (!memcmp(&watchers[i].addr, &locker->info.addr,
4053 sizeof(locker->info.addr)) &&
4054 watchers[i].cookie == cookie) {
4055 struct rbd_client_id cid = {
4056 .gid = le64_to_cpu(watchers[i].name.num),
4057 .handle = cookie,
4058 };
4059
4060 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
4061 rbd_dev, cid.gid, cid.handle);
4062 rbd_set_owner_cid(rbd_dev, &cid);
4063 ret = 1;
4064 goto out;
4065 }
4066 }
4067
4068 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
4069 ret = 0;
4070 out:
4071 kfree(watchers);
4072 return ret;
4073 }
4074
4075
4076
4077
4078 static int rbd_try_lock(struct rbd_device *rbd_dev)
4079 {
4080 struct ceph_client *client = rbd_dev->rbd_client->client;
4081 struct ceph_locker *lockers;
4082 u32 num_lockers;
4083 int ret;
4084
4085 for (;;) {
4086 ret = rbd_lock(rbd_dev);
4087 if (ret != -EBUSY)
4088 return ret;
4089
4090
4091 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
4092 if (ret)
4093 return ret;
4094
4095 if (num_lockers == 0)
4096 goto again;
4097
4098 ret = find_watcher(rbd_dev, lockers);
4099 if (ret)
4100 goto out;
4101
4102 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4103 ENTITY_NAME(lockers[0].id.name));
4104
4105 ret = ceph_monc_blacklist_add(&client->monc,
4106 &lockers[0].info.addr);
4107 if (ret) {
4108 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
4109 ENTITY_NAME(lockers[0].id.name), ret);
4110 goto out;
4111 }
4112
4113 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4114 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4115 lockers[0].id.cookie,
4116 &lockers[0].id.name);
4117 if (ret && ret != -ENOENT)
4118 goto out;
4119
4120 again:
4121 ceph_free_lockers(lockers, num_lockers);
4122 }
4123
4124 out:
4125 ceph_free_lockers(lockers, num_lockers);
4126 return ret;
4127 }
4128
4129 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4130 {
4131 int ret;
4132
4133 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4134 ret = rbd_object_map_open(rbd_dev);
4135 if (ret)
4136 return ret;
4137 }
4138
4139 return 0;
4140 }
4141
4142
4143
4144
4145
4146
4147
4148 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4149 {
4150 int ret;
4151
4152 down_read(&rbd_dev->lock_rwsem);
4153 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4154 rbd_dev->lock_state);
4155 if (__rbd_is_lock_owner(rbd_dev)) {
4156 up_read(&rbd_dev->lock_rwsem);
4157 return 0;
4158 }
4159
4160 up_read(&rbd_dev->lock_rwsem);
4161 down_write(&rbd_dev->lock_rwsem);
4162 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4163 rbd_dev->lock_state);
4164 if (__rbd_is_lock_owner(rbd_dev)) {
4165 up_write(&rbd_dev->lock_rwsem);
4166 return 0;
4167 }
4168
4169 ret = rbd_try_lock(rbd_dev);
4170 if (ret < 0) {
4171 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4172 if (ret == -EBLACKLISTED)
4173 goto out;
4174
4175 ret = 1;
4176 }
4177 if (ret > 0) {
4178 up_write(&rbd_dev->lock_rwsem);
4179 return ret;
4180 }
4181
4182 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4183 rbd_assert(list_empty(&rbd_dev->running_list));
4184
4185 ret = rbd_post_acquire_action(rbd_dev);
4186 if (ret) {
4187 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4188
4189
4190
4191
4192
4193 rbd_unlock(rbd_dev);
4194 }
4195
4196 out:
4197 wake_lock_waiters(rbd_dev, ret);
4198 up_write(&rbd_dev->lock_rwsem);
4199 return ret;
4200 }
4201
4202 static void rbd_acquire_lock(struct work_struct *work)
4203 {
4204 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4205 struct rbd_device, lock_dwork);
4206 int ret;
4207
4208 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4209 again:
4210 ret = rbd_try_acquire_lock(rbd_dev);
4211 if (ret <= 0) {
4212 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4213 return;
4214 }
4215
4216 ret = rbd_request_lock(rbd_dev);
4217 if (ret == -ETIMEDOUT) {
4218 goto again;
4219 } else if (ret == -EROFS) {
4220 rbd_warn(rbd_dev, "peer will not release lock");
4221 down_write(&rbd_dev->lock_rwsem);
4222 wake_lock_waiters(rbd_dev, ret);
4223 up_write(&rbd_dev->lock_rwsem);
4224 } else if (ret < 0) {
4225 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4226 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4227 RBD_RETRY_DELAY);
4228 } else {
4229
4230
4231
4232
4233 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
4234 rbd_dev);
4235 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4236 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4237 }
4238 }
4239
4240 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4241 {
4242 bool need_wait;
4243
4244 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4245 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4246
4247 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4248 return false;
4249
4250
4251
4252
4253 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4254 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4255 need_wait = !list_empty(&rbd_dev->running_list);
4256 downgrade_write(&rbd_dev->lock_rwsem);
4257 if (need_wait)
4258 wait_for_completion(&rbd_dev->releasing_wait);
4259 up_read(&rbd_dev->lock_rwsem);
4260
4261 down_write(&rbd_dev->lock_rwsem);
4262 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4263 return false;
4264
4265 rbd_assert(list_empty(&rbd_dev->running_list));
4266 return true;
4267 }
4268
4269 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4270 {
4271 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4272 rbd_object_map_close(rbd_dev);
4273 }
4274
4275 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4276 {
4277 rbd_assert(list_empty(&rbd_dev->running_list));
4278
4279 rbd_pre_release_action(rbd_dev);
4280 rbd_unlock(rbd_dev);
4281 }
4282
4283
4284
4285
4286 static void rbd_release_lock(struct rbd_device *rbd_dev)
4287 {
4288 if (!rbd_quiesce_lock(rbd_dev))
4289 return;
4290
4291 __rbd_release_lock(rbd_dev);
4292
4293
4294
4295
4296
4297
4298
4299
4300 cancel_delayed_work(&rbd_dev->lock_dwork);
4301 }
4302
4303 static void rbd_release_lock_work(struct work_struct *work)
4304 {
4305 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4306 unlock_work);
4307
4308 down_write(&rbd_dev->lock_rwsem);
4309 rbd_release_lock(rbd_dev);
4310 up_write(&rbd_dev->lock_rwsem);
4311 }
4312
4313 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4314 {
4315 bool have_requests;
4316
4317 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4318 if (__rbd_is_lock_owner(rbd_dev))
4319 return;
4320
4321 spin_lock(&rbd_dev->lock_lists_lock);
4322 have_requests = !list_empty(&rbd_dev->acquiring_list);
4323 spin_unlock(&rbd_dev->lock_lists_lock);
4324 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4325 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4326 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4327 }
4328 }
4329
4330 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4331 void **p)
4332 {
4333 struct rbd_client_id cid = { 0 };
4334
4335 if (struct_v >= 2) {
4336 cid.gid = ceph_decode_64(p);
4337 cid.handle = ceph_decode_64(p);
4338 }
4339
4340 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4341 cid.handle);
4342 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4343 down_write(&rbd_dev->lock_rwsem);
4344 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4345
4346
4347
4348
4349 up_write(&rbd_dev->lock_rwsem);
4350 return;
4351 }
4352
4353 rbd_set_owner_cid(rbd_dev, &cid);
4354 downgrade_write(&rbd_dev->lock_rwsem);
4355 } else {
4356 down_read(&rbd_dev->lock_rwsem);
4357 }
4358
4359 maybe_kick_acquire(rbd_dev);
4360 up_read(&rbd_dev->lock_rwsem);
4361 }
4362
4363 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4364 void **p)
4365 {
4366 struct rbd_client_id cid = { 0 };
4367
4368 if (struct_v >= 2) {
4369 cid.gid = ceph_decode_64(p);
4370 cid.handle = ceph_decode_64(p);
4371 }
4372
4373 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4374 cid.handle);
4375 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4376 down_write(&rbd_dev->lock_rwsem);
4377 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4378 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
4379 __func__, rbd_dev, cid.gid, cid.handle,
4380 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4381 up_write(&rbd_dev->lock_rwsem);
4382 return;
4383 }
4384
4385 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4386 downgrade_write(&rbd_dev->lock_rwsem);
4387 } else {
4388 down_read(&rbd_dev->lock_rwsem);
4389 }
4390
4391 maybe_kick_acquire(rbd_dev);
4392 up_read(&rbd_dev->lock_rwsem);
4393 }
4394
4395
4396
4397
4398
4399 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4400 void **p)
4401 {
4402 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4403 struct rbd_client_id cid = { 0 };
4404 int result = 1;
4405
4406 if (struct_v >= 2) {
4407 cid.gid = ceph_decode_64(p);
4408 cid.handle = ceph_decode_64(p);
4409 }
4410
4411 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4412 cid.handle);
4413 if (rbd_cid_equal(&cid, &my_cid))
4414 return result;
4415
4416 down_read(&rbd_dev->lock_rwsem);
4417 if (__rbd_is_lock_owner(rbd_dev)) {
4418 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4419 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4420 goto out_unlock;
4421
4422
4423
4424
4425
4426 result = 0;
4427
4428 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4429 if (!rbd_dev->opts->exclusive) {
4430 dout("%s rbd_dev %p queueing unlock_work\n",
4431 __func__, rbd_dev);
4432 queue_work(rbd_dev->task_wq,
4433 &rbd_dev->unlock_work);
4434 } else {
4435
4436 result = -EROFS;
4437 }
4438 }
4439 }
4440
4441 out_unlock:
4442 up_read(&rbd_dev->lock_rwsem);
4443 return result;
4444 }
4445
4446 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4447 u64 notify_id, u64 cookie, s32 *result)
4448 {
4449 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4450 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4451 int buf_size = sizeof(buf);
4452 int ret;
4453
4454 if (result) {
4455 void *p = buf;
4456
4457
4458 ceph_start_encoding(&p, 1, 1,
4459 buf_size - CEPH_ENCODING_START_BLK_LEN);
4460 ceph_encode_32(&p, *result);
4461 } else {
4462 buf_size = 0;
4463 }
4464
4465 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4466 &rbd_dev->header_oloc, notify_id, cookie,
4467 buf, buf_size);
4468 if (ret)
4469 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4470 }
4471
4472 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4473 u64 cookie)
4474 {
4475 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4476 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4477 }
4478
4479 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4480 u64 notify_id, u64 cookie, s32 result)
4481 {
4482 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4483 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4484 }
4485
4486 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4487 u64 notifier_id, void *data, size_t data_len)
4488 {
4489 struct rbd_device *rbd_dev = arg;
4490 void *p = data;
4491 void *const end = p + data_len;
4492 u8 struct_v = 0;
4493 u32 len;
4494 u32 notify_op;
4495 int ret;
4496
4497 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4498 __func__, rbd_dev, cookie, notify_id, data_len);
4499 if (data_len) {
4500 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4501 &struct_v, &len);
4502 if (ret) {
4503 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4504 ret);
4505 return;
4506 }
4507
4508 notify_op = ceph_decode_32(&p);
4509 } else {
4510
4511 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4512 len = 0;
4513 }
4514
4515 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4516 switch (notify_op) {
4517 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4518 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4519 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4520 break;
4521 case RBD_NOTIFY_OP_RELEASED_LOCK:
4522 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4523 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4524 break;
4525 case RBD_NOTIFY_OP_REQUEST_LOCK:
4526 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4527 if (ret <= 0)
4528 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4529 cookie, ret);
4530 else
4531 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4532 break;
4533 case RBD_NOTIFY_OP_HEADER_UPDATE:
4534 ret = rbd_dev_refresh(rbd_dev);
4535 if (ret)
4536 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4537
4538 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4539 break;
4540 default:
4541 if (rbd_is_lock_owner(rbd_dev))
4542 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4543 cookie, -EOPNOTSUPP);
4544 else
4545 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4546 break;
4547 }
4548 }
4549
4550 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4551
4552 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4553 {
4554 struct rbd_device *rbd_dev = arg;
4555
4556 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4557
4558 down_write(&rbd_dev->lock_rwsem);
4559 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4560 up_write(&rbd_dev->lock_rwsem);
4561
4562 mutex_lock(&rbd_dev->watch_mutex);
4563 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4564 __rbd_unregister_watch(rbd_dev);
4565 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4566
4567 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4568 }
4569 mutex_unlock(&rbd_dev->watch_mutex);
4570 }
4571
4572
4573
4574
4575 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4576 {
4577 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4578 struct ceph_osd_linger_request *handle;
4579
4580 rbd_assert(!rbd_dev->watch_handle);
4581 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4582
4583 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4584 &rbd_dev->header_oloc, rbd_watch_cb,
4585 rbd_watch_errcb, rbd_dev);
4586 if (IS_ERR(handle))
4587 return PTR_ERR(handle);
4588
4589 rbd_dev->watch_handle = handle;
4590 return 0;
4591 }
4592
4593
4594
4595
4596 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4597 {
4598 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4599 int ret;
4600
4601 rbd_assert(rbd_dev->watch_handle);
4602 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4603
4604 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4605 if (ret)
4606 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4607
4608 rbd_dev->watch_handle = NULL;
4609 }
4610
4611 static int rbd_register_watch(struct rbd_device *rbd_dev)
4612 {
4613 int ret;
4614
4615 mutex_lock(&rbd_dev->watch_mutex);
4616 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4617 ret = __rbd_register_watch(rbd_dev);
4618 if (ret)
4619 goto out;
4620
4621 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4622 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4623
4624 out:
4625 mutex_unlock(&rbd_dev->watch_mutex);
4626 return ret;
4627 }
4628
4629 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4630 {
4631 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4632
4633 cancel_work_sync(&rbd_dev->acquired_lock_work);
4634 cancel_work_sync(&rbd_dev->released_lock_work);
4635 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4636 cancel_work_sync(&rbd_dev->unlock_work);
4637 }
4638
4639
4640
4641
4642
4643 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4644 {
4645 cancel_tasks_sync(rbd_dev);
4646
4647 mutex_lock(&rbd_dev->watch_mutex);
4648 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4649 __rbd_unregister_watch(rbd_dev);
4650 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4651 mutex_unlock(&rbd_dev->watch_mutex);
4652
4653 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4654 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4655 }
4656
4657
4658
4659
4660 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4661 {
4662 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4663 char cookie[32];
4664 int ret;
4665
4666 if (!rbd_quiesce_lock(rbd_dev))
4667 return;
4668
4669 format_lock_cookie(rbd_dev, cookie);
4670 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4671 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4672 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4673 RBD_LOCK_TAG, cookie);
4674 if (ret) {
4675 if (ret != -EOPNOTSUPP)
4676 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4677 ret);
4678
4679
4680
4681
4682
4683 __rbd_release_lock(rbd_dev);
4684 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4685 } else {
4686 __rbd_lock(rbd_dev, cookie);
4687 wake_lock_waiters(rbd_dev, 0);
4688 }
4689 }
4690
4691 static void rbd_reregister_watch(struct work_struct *work)
4692 {
4693 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4694 struct rbd_device, watch_dwork);
4695 int ret;
4696
4697 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4698
4699 mutex_lock(&rbd_dev->watch_mutex);
4700 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4701 mutex_unlock(&rbd_dev->watch_mutex);
4702 return;
4703 }
4704
4705 ret = __rbd_register_watch(rbd_dev);
4706 if (ret) {
4707 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4708 if (ret != -EBLACKLISTED && ret != -ENOENT) {
4709 queue_delayed_work(rbd_dev->task_wq,
4710 &rbd_dev->watch_dwork,
4711 RBD_RETRY_DELAY);
4712 mutex_unlock(&rbd_dev->watch_mutex);
4713 return;
4714 }
4715
4716 mutex_unlock(&rbd_dev->watch_mutex);
4717 down_write(&rbd_dev->lock_rwsem);
4718 wake_lock_waiters(rbd_dev, ret);
4719 up_write(&rbd_dev->lock_rwsem);
4720 return;
4721 }
4722
4723 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4724 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4725 mutex_unlock(&rbd_dev->watch_mutex);
4726
4727 down_write(&rbd_dev->lock_rwsem);
4728 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4729 rbd_reacquire_lock(rbd_dev);
4730 up_write(&rbd_dev->lock_rwsem);
4731
4732 ret = rbd_dev_refresh(rbd_dev);
4733 if (ret)
4734 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4735 }
4736
4737
4738
4739
4740
4741 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4742 struct ceph_object_id *oid,
4743 struct ceph_object_locator *oloc,
4744 const char *method_name,
4745 const void *outbound,
4746 size_t outbound_size,
4747 void *inbound,
4748 size_t inbound_size)
4749 {
4750 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4751 struct page *req_page = NULL;
4752 struct page *reply_page;
4753 int ret;
4754
4755
4756
4757
4758
4759
4760
4761
4762 if (outbound) {
4763 if (outbound_size > PAGE_SIZE)
4764 return -E2BIG;
4765
4766 req_page = alloc_page(GFP_KERNEL);
4767 if (!req_page)
4768 return -ENOMEM;
4769
4770 memcpy(page_address(req_page), outbound, outbound_size);
4771 }
4772
4773 reply_page = alloc_page(GFP_KERNEL);
4774 if (!reply_page) {
4775 if (req_page)
4776 __free_page(req_page);
4777 return -ENOMEM;
4778 }
4779
4780 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4781 CEPH_OSD_FLAG_READ, req_page, outbound_size,
4782 &reply_page, &inbound_size);
4783 if (!ret) {
4784 memcpy(inbound, page_address(reply_page), inbound_size);
4785 ret = inbound_size;
4786 }
4787
4788 if (req_page)
4789 __free_page(req_page);
4790 __free_page(reply_page);
4791 return ret;
4792 }
4793
4794 static void rbd_queue_workfn(struct work_struct *work)
4795 {
4796 struct request *rq = blk_mq_rq_from_pdu(work);
4797 struct rbd_device *rbd_dev = rq->q->queuedata;
4798 struct rbd_img_request *img_request;
4799 struct ceph_snap_context *snapc = NULL;
4800 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4801 u64 length = blk_rq_bytes(rq);
4802 enum obj_operation_type op_type;
4803 u64 mapping_size;
4804 int result;
4805
4806 switch (req_op(rq)) {
4807 case REQ_OP_DISCARD:
4808 op_type = OBJ_OP_DISCARD;
4809 break;
4810 case REQ_OP_WRITE_ZEROES:
4811 op_type = OBJ_OP_ZEROOUT;
4812 break;
4813 case REQ_OP_WRITE:
4814 op_type = OBJ_OP_WRITE;
4815 break;
4816 case REQ_OP_READ:
4817 op_type = OBJ_OP_READ;
4818 break;
4819 default:
4820 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
4821 result = -EIO;
4822 goto err;
4823 }
4824
4825
4826
4827 if (!length) {
4828 dout("%s: zero-length request\n", __func__);
4829 result = 0;
4830 goto err_rq;
4831 }
4832
4833 if (op_type != OBJ_OP_READ && rbd_dev->spec->snap_id != CEPH_NOSNAP) {
4834 rbd_warn(rbd_dev, "%s on read-only snapshot",
4835 obj_op_name(op_type));
4836 result = -EIO;
4837 goto err;
4838 }
4839
4840
4841
4842
4843
4844
4845
4846 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
4847 dout("request for non-existent snapshot");
4848 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
4849 result = -ENXIO;
4850 goto err_rq;
4851 }
4852
4853 if (offset && length > U64_MAX - offset + 1) {
4854 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
4855 length);
4856 result = -EINVAL;
4857 goto err_rq;
4858 }
4859
4860 blk_mq_start_request(rq);
4861
4862 down_read(&rbd_dev->header_rwsem);
4863 mapping_size = rbd_dev->mapping.size;
4864 if (op_type != OBJ_OP_READ) {
4865 snapc = rbd_dev->header.snapc;
4866 ceph_get_snap_context(snapc);
4867 }
4868 up_read(&rbd_dev->header_rwsem);
4869
4870 if (offset + length > mapping_size) {
4871 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4872 length, mapping_size);
4873 result = -EIO;
4874 goto err_rq;
4875 }
4876
4877 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
4878 if (!img_request) {
4879 result = -ENOMEM;
4880 goto err_rq;
4881 }
4882 img_request->rq = rq;
4883 snapc = NULL;
4884
4885 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4886 img_request, obj_op_name(op_type), offset, length);
4887
4888 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4889 result = rbd_img_fill_nodata(img_request, offset, length);
4890 else
4891 result = rbd_img_fill_from_bio(img_request, offset, length,
4892 rq->bio);
4893 if (result)
4894 goto err_img_request;
4895
4896 rbd_img_handle_request(img_request, 0);
4897 return;
4898
4899 err_img_request:
4900 rbd_img_request_put(img_request);
4901 err_rq:
4902 if (result)
4903 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4904 obj_op_name(op_type), length, offset, result);
4905 ceph_put_snap_context(snapc);
4906 err:
4907 blk_mq_end_request(rq, errno_to_blk_status(result));
4908 }
4909
4910 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4911 const struct blk_mq_queue_data *bd)
4912 {
4913 struct request *rq = bd->rq;
4914 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4915
4916 queue_work(rbd_wq, work);
4917 return BLK_STS_OK;
4918 }
4919
4920 static void rbd_free_disk(struct rbd_device *rbd_dev)
4921 {
4922 blk_cleanup_queue(rbd_dev->disk->queue);
4923 blk_mq_free_tag_set(&rbd_dev->tag_set);
4924 put_disk(rbd_dev->disk);
4925 rbd_dev->disk = NULL;
4926 }
4927
4928 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4929 struct ceph_object_id *oid,
4930 struct ceph_object_locator *oloc,
4931 void *buf, int buf_len)
4932
4933 {
4934 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4935 struct ceph_osd_request *req;
4936 struct page **pages;
4937 int num_pages = calc_pages_for(0, buf_len);
4938 int ret;
4939
4940 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4941 if (!req)
4942 return -ENOMEM;
4943
4944 ceph_oid_copy(&req->r_base_oid, oid);
4945 ceph_oloc_copy(&req->r_base_oloc, oloc);
4946 req->r_flags = CEPH_OSD_FLAG_READ;
4947
4948 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4949 if (IS_ERR(pages)) {
4950 ret = PTR_ERR(pages);
4951 goto out_req;
4952 }
4953
4954 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4955 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4956 true);
4957
4958 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4959 if (ret)
4960 goto out_req;
4961
4962 ceph_osdc_start_request(osdc, req, false);
4963 ret = ceph_osdc_wait_request(osdc, req);
4964 if (ret >= 0)
4965 ceph_copy_from_page_vector(pages, buf, 0, ret);
4966
4967 out_req:
4968 ceph_osdc_put_request(req);
4969 return ret;
4970 }
4971
4972
4973
4974
4975
4976
4977 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4978 {
4979 struct rbd_image_header_ondisk *ondisk = NULL;
4980 u32 snap_count = 0;
4981 u64 names_size = 0;
4982 u32 want_count;
4983 int ret;
4984
4985
4986
4987
4988
4989
4990
4991
4992 do {
4993 size_t size;
4994
4995 kfree(ondisk);
4996
4997 size = sizeof (*ondisk);
4998 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4999 size += names_size;
5000 ondisk = kmalloc(size, GFP_KERNEL);
5001 if (!ondisk)
5002 return -ENOMEM;
5003
5004 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
5005 &rbd_dev->header_oloc, ondisk, size);
5006 if (ret < 0)
5007 goto out;
5008 if ((size_t)ret < size) {
5009 ret = -ENXIO;
5010 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
5011 size, ret);
5012 goto out;
5013 }
5014 if (!rbd_dev_ondisk_valid(ondisk)) {
5015 ret = -ENXIO;
5016 rbd_warn(rbd_dev, "invalid header");
5017 goto out;
5018 }
5019
5020 names_size = le64_to_cpu(ondisk->snap_names_len);
5021 want_count = snap_count;
5022 snap_count = le32_to_cpu(ondisk->snap_count);
5023 } while (snap_count != want_count);
5024
5025 ret = rbd_header_from_disk(rbd_dev, ondisk);
5026 out:
5027 kfree(ondisk);
5028
5029 return ret;
5030 }
5031
5032
5033
5034
5035
5036 static void rbd_exists_validate(struct rbd_device *rbd_dev)
5037 {
5038 u64 snap_id;
5039
5040 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
5041 return;
5042
5043 snap_id = rbd_dev->spec->snap_id;
5044 if (snap_id == CEPH_NOSNAP)
5045 return;
5046
5047 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
5048 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5049 }
5050
5051 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
5052 {
5053 sector_t size;
5054
5055
5056
5057
5058
5059
5060 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
5061 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
5062 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
5063 dout("setting size to %llu sectors", (unsigned long long)size);
5064 set_capacity(rbd_dev->disk, size);
5065 revalidate_disk(rbd_dev->disk);
5066 }
5067 }
5068
5069 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
5070 {
5071 u64 mapping_size;
5072 int ret;
5073
5074 down_write(&rbd_dev->header_rwsem);
5075 mapping_size = rbd_dev->mapping.size;
5076
5077 ret = rbd_dev_header_info(rbd_dev);
5078 if (ret)
5079 goto out;
5080
5081
5082
5083
5084
5085 if (rbd_dev->parent) {
5086 ret = rbd_dev_v2_parent_info(rbd_dev);
5087 if (ret)
5088 goto out;
5089 }
5090
5091 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
5092 rbd_dev->mapping.size = rbd_dev->header.image_size;
5093 } else {
5094
5095 rbd_exists_validate(rbd_dev);
5096 }
5097
5098 out:
5099 up_write(&rbd_dev->header_rwsem);
5100 if (!ret && mapping_size != rbd_dev->mapping.size)
5101 rbd_dev_update_size(rbd_dev);
5102
5103 return ret;
5104 }
5105
5106 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
5107 unsigned int hctx_idx, unsigned int numa_node)
5108 {
5109 struct work_struct *work = blk_mq_rq_to_pdu(rq);
5110
5111 INIT_WORK(work, rbd_queue_workfn);
5112 return 0;
5113 }
5114
5115 static const struct blk_mq_ops rbd_mq_ops = {
5116 .queue_rq = rbd_queue_rq,
5117 .init_request = rbd_init_request,
5118 };
5119
5120 static int rbd_init_disk(struct rbd_device *rbd_dev)
5121 {
5122 struct gendisk *disk;
5123 struct request_queue *q;
5124 unsigned int objset_bytes =
5125 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
5126 int err;
5127
5128
5129 disk = alloc_disk(single_major ?
5130 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
5131 RBD_MINORS_PER_MAJOR);
5132 if (!disk)
5133 return -ENOMEM;
5134
5135 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
5136 rbd_dev->dev_id);
5137 disk->major = rbd_dev->major;
5138 disk->first_minor = rbd_dev->minor;
5139 if (single_major)
5140 disk->flags |= GENHD_FL_EXT_DEVT;
5141 disk->fops = &rbd_bd_ops;
5142 disk->private_data = rbd_dev;
5143
5144 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
5145 rbd_dev->tag_set.ops = &rbd_mq_ops;
5146 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
5147 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
5148 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5149 rbd_dev->tag_set.nr_hw_queues = 1;
5150 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
5151
5152 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
5153 if (err)
5154 goto out_disk;
5155
5156 q = blk_mq_init_queue(&rbd_dev->tag_set);
5157 if (IS_ERR(q)) {
5158 err = PTR_ERR(q);
5159 goto out_tag_set;
5160 }
5161
5162 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
5163
5164
5165 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
5166 q->limits.max_sectors = queue_max_hw_sectors(q);
5167 blk_queue_max_segments(q, USHRT_MAX);
5168 blk_queue_max_segment_size(q, UINT_MAX);
5169 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5170 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
5171
5172 if (rbd_dev->opts->trim) {
5173 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
5174 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
5175 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
5176 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
5177 }
5178
5179 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
5180 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
5181
5182
5183
5184
5185
5186 WARN_ON(!blk_get_queue(q));
5187 disk->queue = q;
5188 q->queuedata = rbd_dev;
5189
5190 rbd_dev->disk = disk;
5191
5192 return 0;
5193 out_tag_set:
5194 blk_mq_free_tag_set(&rbd_dev->tag_set);
5195 out_disk:
5196 put_disk(disk);
5197 return err;
5198 }
5199
5200
5201
5202
5203
5204 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5205 {
5206 return container_of(dev, struct rbd_device, dev);
5207 }
5208
5209 static ssize_t rbd_size_show(struct device *dev,
5210 struct device_attribute *attr, char *buf)
5211 {
5212 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5213
5214 return sprintf(buf, "%llu\n",
5215 (unsigned long long)rbd_dev->mapping.size);
5216 }
5217
5218
5219
5220
5221
5222 static ssize_t rbd_features_show(struct device *dev,
5223 struct device_attribute *attr, char *buf)
5224 {
5225 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5226
5227 return sprintf(buf, "0x%016llx\n",
5228 (unsigned long long)rbd_dev->mapping.features);
5229 }
5230
5231 static ssize_t rbd_major_show(struct device *dev,
5232 struct device_attribute *attr, char *buf)
5233 {
5234 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5235
5236 if (rbd_dev->major)
5237 return sprintf(buf, "%d\n", rbd_dev->major);
5238
5239 return sprintf(buf, "(none)\n");
5240 }
5241
5242 static ssize_t rbd_minor_show(struct device *dev,
5243 struct device_attribute *attr, char *buf)
5244 {
5245 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5246
5247 return sprintf(buf, "%d\n", rbd_dev->minor);
5248 }
5249
5250 static ssize_t rbd_client_addr_show(struct device *dev,
5251 struct device_attribute *attr, char *buf)
5252 {
5253 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5254 struct ceph_entity_addr *client_addr =
5255 ceph_client_addr(rbd_dev->rbd_client->client);
5256
5257 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5258 le32_to_cpu(client_addr->nonce));
5259 }
5260
5261 static ssize_t rbd_client_id_show(struct device *dev,
5262 struct device_attribute *attr, char *buf)
5263 {
5264 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5265
5266 return sprintf(buf, "client%lld\n",
5267 ceph_client_gid(rbd_dev->rbd_client->client));
5268 }
5269
5270 static ssize_t rbd_cluster_fsid_show(struct device *dev,
5271 struct device_attribute *attr, char *buf)
5272 {
5273 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5274
5275 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5276 }
5277
5278 static ssize_t rbd_config_info_show(struct device *dev,
5279 struct device_attribute *attr, char *buf)
5280 {
5281 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5282
5283 return sprintf(buf, "%s\n", rbd_dev->config_info);
5284 }
5285
5286 static ssize_t rbd_pool_show(struct device *dev,
5287 struct device_attribute *attr, char *buf)
5288 {
5289 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5290
5291 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5292 }
5293
5294 static ssize_t rbd_pool_id_show(struct device *dev,
5295 struct device_attribute *attr, char *buf)
5296 {
5297 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5298
5299 return sprintf(buf, "%llu\n",
5300 (unsigned long long) rbd_dev->spec->pool_id);
5301 }
5302
5303 static ssize_t rbd_pool_ns_show(struct device *dev,
5304 struct device_attribute *attr, char *buf)
5305 {
5306 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5307
5308 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5309 }
5310
5311 static ssize_t rbd_name_show(struct device *dev,
5312 struct device_attribute *attr, char *buf)
5313 {
5314 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5315
5316 if (rbd_dev->spec->image_name)
5317 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5318
5319 return sprintf(buf, "(unknown)\n");
5320 }
5321
5322 static ssize_t rbd_image_id_show(struct device *dev,
5323 struct device_attribute *attr, char *buf)
5324 {
5325 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5326
5327 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5328 }
5329
5330
5331
5332
5333
5334 static ssize_t rbd_snap_show(struct device *dev,
5335 struct device_attribute *attr,
5336 char *buf)
5337 {
5338 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5339
5340 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5341 }
5342
5343 static ssize_t rbd_snap_id_show(struct device *dev,
5344 struct device_attribute *attr, char *buf)
5345 {
5346 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5347
5348 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5349 }
5350
5351
5352
5353
5354
5355
5356 static ssize_t rbd_parent_show(struct device *dev,
5357 struct device_attribute *attr,
5358 char *buf)
5359 {
5360 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5361 ssize_t count = 0;
5362
5363 if (!rbd_dev->parent)
5364 return sprintf(buf, "(no parent image)\n");
5365
5366 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5367 struct rbd_spec *spec = rbd_dev->parent_spec;
5368
5369 count += sprintf(&buf[count], "%s"
5370 "pool_id %llu\npool_name %s\n"
5371 "pool_ns %s\n"
5372 "image_id %s\nimage_name %s\n"
5373 "snap_id %llu\nsnap_name %s\n"
5374 "overlap %llu\n",
5375 !count ? "" : "\n",
5376 spec->pool_id, spec->pool_name,
5377 spec->pool_ns ?: "",
5378 spec->image_id, spec->image_name ?: "(unknown)",
5379 spec->snap_id, spec->snap_name,
5380 rbd_dev->parent_overlap);
5381 }
5382
5383 return count;
5384 }
5385
5386 static ssize_t rbd_image_refresh(struct device *dev,
5387 struct device_attribute *attr,
5388 const char *buf,
5389 size_t size)
5390 {
5391 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5392 int ret;
5393
5394 ret = rbd_dev_refresh(rbd_dev);
5395 if (ret)
5396 return ret;
5397
5398 return size;
5399 }
5400
5401 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5402 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5403 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5404 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5405 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5406 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5407 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5408 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5409 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5410 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5411 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5412 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5413 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5414 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5415 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5416 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5417 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5418
5419 static struct attribute *rbd_attrs[] = {
5420 &dev_attr_size.attr,
5421 &dev_attr_features.attr,
5422 &dev_attr_major.attr,
5423 &dev_attr_minor.attr,
5424 &dev_attr_client_addr.attr,
5425 &dev_attr_client_id.attr,
5426 &dev_attr_cluster_fsid.attr,
5427 &dev_attr_config_info.attr,
5428 &dev_attr_pool.attr,
5429 &dev_attr_pool_id.attr,
5430 &dev_attr_pool_ns.attr,
5431 &dev_attr_name.attr,
5432 &dev_attr_image_id.attr,
5433 &dev_attr_current_snap.attr,
5434 &dev_attr_snap_id.attr,
5435 &dev_attr_parent.attr,
5436 &dev_attr_refresh.attr,
5437 NULL
5438 };
5439
5440 static struct attribute_group rbd_attr_group = {
5441 .attrs = rbd_attrs,
5442 };
5443
5444 static const struct attribute_group *rbd_attr_groups[] = {
5445 &rbd_attr_group,
5446 NULL
5447 };
5448
5449 static void rbd_dev_release(struct device *dev);
5450
5451 static const struct device_type rbd_device_type = {
5452 .name = "rbd",
5453 .groups = rbd_attr_groups,
5454 .release = rbd_dev_release,
5455 };
5456
5457 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5458 {
5459 kref_get(&spec->kref);
5460
5461 return spec;
5462 }
5463
5464 static void rbd_spec_free(struct kref *kref);
5465 static void rbd_spec_put(struct rbd_spec *spec)
5466 {
5467 if (spec)
5468 kref_put(&spec->kref, rbd_spec_free);
5469 }
5470
5471 static struct rbd_spec *rbd_spec_alloc(void)
5472 {
5473 struct rbd_spec *spec;
5474
5475 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5476 if (!spec)
5477 return NULL;
5478
5479 spec->pool_id = CEPH_NOPOOL;
5480 spec->snap_id = CEPH_NOSNAP;
5481 kref_init(&spec->kref);
5482
5483 return spec;
5484 }
5485
5486 static void rbd_spec_free(struct kref *kref)
5487 {
5488 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5489
5490 kfree(spec->pool_name);
5491 kfree(spec->pool_ns);
5492 kfree(spec->image_id);
5493 kfree(spec->image_name);
5494 kfree(spec->snap_name);
5495 kfree(spec);
5496 }
5497
5498 static void rbd_dev_free(struct rbd_device *rbd_dev)
5499 {
5500 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5501 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5502
5503 ceph_oid_destroy(&rbd_dev->header_oid);
5504 ceph_oloc_destroy(&rbd_dev->header_oloc);
5505 kfree(rbd_dev->config_info);
5506
5507 rbd_put_client(rbd_dev->rbd_client);
5508 rbd_spec_put(rbd_dev->spec);
5509 kfree(rbd_dev->opts);
5510 kfree(rbd_dev);
5511 }
5512
5513 static void rbd_dev_release(struct device *dev)
5514 {
5515 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5516 bool need_put = !!rbd_dev->opts;
5517
5518 if (need_put) {
5519 destroy_workqueue(rbd_dev->task_wq);
5520 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5521 }
5522
5523 rbd_dev_free(rbd_dev);
5524
5525
5526
5527
5528
5529
5530 if (need_put)
5531 module_put(THIS_MODULE);
5532 }
5533
5534 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
5535 struct rbd_spec *spec)
5536 {
5537 struct rbd_device *rbd_dev;
5538
5539 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5540 if (!rbd_dev)
5541 return NULL;
5542
5543 spin_lock_init(&rbd_dev->lock);
5544 INIT_LIST_HEAD(&rbd_dev->node);
5545 init_rwsem(&rbd_dev->header_rwsem);
5546
5547 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5548 ceph_oid_init(&rbd_dev->header_oid);
5549 rbd_dev->header_oloc.pool = spec->pool_id;
5550 if (spec->pool_ns) {
5551 WARN_ON(!*spec->pool_ns);
5552 rbd_dev->header_oloc.pool_ns =
5553 ceph_find_or_create_string(spec->pool_ns,
5554 strlen(spec->pool_ns));
5555 }
5556
5557 mutex_init(&rbd_dev->watch_mutex);
5558 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5559 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5560
5561 init_rwsem(&rbd_dev->lock_rwsem);
5562 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5563 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5564 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5565 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5566 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5567 spin_lock_init(&rbd_dev->lock_lists_lock);
5568 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5569 INIT_LIST_HEAD(&rbd_dev->running_list);
5570 init_completion(&rbd_dev->acquire_wait);
5571 init_completion(&rbd_dev->releasing_wait);
5572
5573 spin_lock_init(&rbd_dev->object_map_lock);
5574
5575 rbd_dev->dev.bus = &rbd_bus_type;
5576 rbd_dev->dev.type = &rbd_device_type;
5577 rbd_dev->dev.parent = &rbd_root_dev;
5578 device_initialize(&rbd_dev->dev);
5579
5580 rbd_dev->rbd_client = rbdc;
5581 rbd_dev->spec = spec;
5582
5583 return rbd_dev;
5584 }
5585
5586
5587
5588
5589 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5590 struct rbd_spec *spec,
5591 struct rbd_options *opts)
5592 {
5593 struct rbd_device *rbd_dev;
5594
5595 rbd_dev = __rbd_dev_create(rbdc, spec);
5596 if (!rbd_dev)
5597 return NULL;
5598
5599 rbd_dev->opts = opts;
5600
5601
5602 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5603 minor_to_rbd_dev_id(1 << MINORBITS),
5604 GFP_KERNEL);
5605 if (rbd_dev->dev_id < 0)
5606 goto fail_rbd_dev;
5607
5608 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5609 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5610 rbd_dev->name);
5611 if (!rbd_dev->task_wq)
5612 goto fail_dev_id;
5613
5614
5615 __module_get(THIS_MODULE);
5616
5617 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5618 return rbd_dev;
5619
5620 fail_dev_id:
5621 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5622 fail_rbd_dev:
5623 rbd_dev_free(rbd_dev);
5624 return NULL;
5625 }
5626
5627 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5628 {
5629 if (rbd_dev)
5630 put_device(&rbd_dev->dev);
5631 }
5632
5633
5634
5635
5636
5637
5638 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5639 u8 *order, u64 *snap_size)
5640 {
5641 __le64 snapid = cpu_to_le64(snap_id);
5642 int ret;
5643 struct {
5644 u8 order;
5645 __le64 size;
5646 } __attribute__ ((packed)) size_buf = { 0 };
5647
5648 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5649 &rbd_dev->header_oloc, "get_size",
5650 &snapid, sizeof(snapid),
5651 &size_buf, sizeof(size_buf));
5652 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5653 if (ret < 0)
5654 return ret;
5655 if (ret < sizeof (size_buf))
5656 return -ERANGE;
5657
5658 if (order) {
5659 *order = size_buf.order;
5660 dout(" order %u", (unsigned int)*order);
5661 }
5662 *snap_size = le64_to_cpu(size_buf.size);
5663
5664 dout(" snap_id 0x%016llx snap_size = %llu\n",
5665 (unsigned long long)snap_id,
5666 (unsigned long long)*snap_size);
5667
5668 return 0;
5669 }
5670
5671 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5672 {
5673 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5674 &rbd_dev->header.obj_order,
5675 &rbd_dev->header.image_size);
5676 }
5677
5678 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5679 {
5680 size_t size;
5681 void *reply_buf;
5682 int ret;
5683 void *p;
5684
5685
5686 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5687 reply_buf = kzalloc(size, GFP_KERNEL);
5688 if (!reply_buf)
5689 return -ENOMEM;
5690
5691 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5692 &rbd_dev->header_oloc, "get_object_prefix",
5693 NULL, 0, reply_buf, size);
5694 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5695 if (ret < 0)
5696 goto out;
5697
5698 p = reply_buf;
5699 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
5700 p + ret, NULL, GFP_NOIO);
5701 ret = 0;
5702
5703 if (IS_ERR(rbd_dev->header.object_prefix)) {
5704 ret = PTR_ERR(rbd_dev->header.object_prefix);
5705 rbd_dev->header.object_prefix = NULL;
5706 } else {
5707 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5708 }
5709 out:
5710 kfree(reply_buf);
5711
5712 return ret;
5713 }
5714
5715 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5716 u64 *snap_features)
5717 {
5718 __le64 snapid = cpu_to_le64(snap_id);
5719 struct {
5720 __le64 features;
5721 __le64 incompat;
5722 } __attribute__ ((packed)) features_buf = { 0 };
5723 u64 unsup;
5724 int ret;
5725
5726 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5727 &rbd_dev->header_oloc, "get_features",
5728 &snapid, sizeof(snapid),
5729 &features_buf, sizeof(features_buf));
5730 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5731 if (ret < 0)
5732 return ret;
5733 if (ret < sizeof (features_buf))
5734 return -ERANGE;
5735
5736 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5737 if (unsup) {
5738 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5739 unsup);
5740 return -ENXIO;
5741 }
5742
5743 *snap_features = le64_to_cpu(features_buf.features);
5744
5745 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5746 (unsigned long long)snap_id,
5747 (unsigned long long)*snap_features,
5748 (unsigned long long)le64_to_cpu(features_buf.incompat));
5749
5750 return 0;
5751 }
5752
5753 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5754 {
5755 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5756 &rbd_dev->header.features);
5757 }
5758
5759
5760
5761
5762
5763
5764
5765
5766 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5767 {
5768 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5769 __le64 flags;
5770 int ret;
5771
5772 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5773 &rbd_dev->header_oloc, "get_flags",
5774 &snapid, sizeof(snapid),
5775 &flags, sizeof(flags));
5776 if (ret < 0)
5777 return ret;
5778 if (ret < sizeof(flags))
5779 return -EBADMSG;
5780
5781 rbd_dev->object_map_flags = le64_to_cpu(flags);
5782 return 0;
5783 }
5784
5785 struct parent_image_info {
5786 u64 pool_id;
5787 const char *pool_ns;
5788 const char *image_id;
5789 u64 snap_id;
5790
5791 bool has_overlap;
5792 u64 overlap;
5793 };
5794
5795
5796
5797
5798 static int decode_parent_image_spec(void **p, void *end,
5799 struct parent_image_info *pii)
5800 {
5801 u8 struct_v;
5802 u32 struct_len;
5803 int ret;
5804
5805 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5806 &struct_v, &struct_len);
5807 if (ret)
5808 return ret;
5809
5810 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5811 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5812 if (IS_ERR(pii->pool_ns)) {
5813 ret = PTR_ERR(pii->pool_ns);
5814 pii->pool_ns = NULL;
5815 return ret;
5816 }
5817 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5818 if (IS_ERR(pii->image_id)) {
5819 ret = PTR_ERR(pii->image_id);
5820 pii->image_id = NULL;
5821 return ret;
5822 }
5823 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5824 return 0;
5825
5826 e_inval:
5827 return -EINVAL;
5828 }
5829
5830 static int __get_parent_info(struct rbd_device *rbd_dev,
5831 struct page *req_page,
5832 struct page *reply_page,
5833 struct parent_image_info *pii)
5834 {
5835 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5836 size_t reply_len = PAGE_SIZE;
5837 void *p, *end;
5838 int ret;
5839
5840 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5841 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5842 req_page, sizeof(u64), &reply_page, &reply_len);
5843 if (ret)
5844 return ret == -EOPNOTSUPP ? 1 : ret;
5845
5846 p = page_address(reply_page);
5847 end = p + reply_len;
5848 ret = decode_parent_image_spec(&p, end, pii);
5849 if (ret)
5850 return ret;
5851
5852 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5853 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5854 req_page, sizeof(u64), &reply_page, &reply_len);
5855 if (ret)
5856 return ret;
5857
5858 p = page_address(reply_page);
5859 end = p + reply_len;
5860 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5861 if (pii->has_overlap)
5862 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5863
5864 return 0;
5865
5866 e_inval:
5867 return -EINVAL;
5868 }
5869
5870
5871
5872
5873 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5874 struct page *req_page,
5875 struct page *reply_page,
5876 struct parent_image_info *pii)
5877 {
5878 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5879 size_t reply_len = PAGE_SIZE;
5880 void *p, *end;
5881 int ret;
5882
5883 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5884 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5885 req_page, sizeof(u64), &reply_page, &reply_len);
5886 if (ret)
5887 return ret;
5888
5889 p = page_address(reply_page);
5890 end = p + reply_len;
5891 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5892 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5893 if (IS_ERR(pii->image_id)) {
5894 ret = PTR_ERR(pii->image_id);
5895 pii->image_id = NULL;
5896 return ret;
5897 }
5898 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5899 pii->has_overlap = true;
5900 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5901
5902 return 0;
5903
5904 e_inval:
5905 return -EINVAL;
5906 }
5907
5908 static int get_parent_info(struct rbd_device *rbd_dev,
5909 struct parent_image_info *pii)
5910 {
5911 struct page *req_page, *reply_page;
5912 void *p;
5913 int ret;
5914
5915 req_page = alloc_page(GFP_KERNEL);
5916 if (!req_page)
5917 return -ENOMEM;
5918
5919 reply_page = alloc_page(GFP_KERNEL);
5920 if (!reply_page) {
5921 __free_page(req_page);
5922 return -ENOMEM;
5923 }
5924
5925 p = page_address(req_page);
5926 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5927 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5928 if (ret > 0)
5929 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5930 pii);
5931
5932 __free_page(req_page);
5933 __free_page(reply_page);
5934 return ret;
5935 }
5936
5937 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5938 {
5939 struct rbd_spec *parent_spec;
5940 struct parent_image_info pii = { 0 };
5941 int ret;
5942
5943 parent_spec = rbd_spec_alloc();
5944 if (!parent_spec)
5945 return -ENOMEM;
5946
5947 ret = get_parent_info(rbd_dev, &pii);
5948 if (ret)
5949 goto out_err;
5950
5951 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5952 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
5953 pii.has_overlap, pii.overlap);
5954
5955 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
5956
5957
5958
5959
5960
5961
5962
5963
5964
5965
5966
5967
5968
5969 if (rbd_dev->parent_overlap) {
5970 rbd_dev->parent_overlap = 0;
5971 rbd_dev_parent_put(rbd_dev);
5972 pr_info("%s: clone image has been flattened\n",
5973 rbd_dev->disk->disk_name);
5974 }
5975
5976 goto out;
5977 }
5978
5979
5980
5981 ret = -EIO;
5982 if (pii.pool_id > (u64)U32_MAX) {
5983 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5984 (unsigned long long)pii.pool_id, U32_MAX);
5985 goto out_err;
5986 }
5987
5988
5989
5990
5991
5992
5993 if (!rbd_dev->parent_spec) {
5994 parent_spec->pool_id = pii.pool_id;
5995 if (pii.pool_ns && *pii.pool_ns) {
5996 parent_spec->pool_ns = pii.pool_ns;
5997 pii.pool_ns = NULL;
5998 }
5999 parent_spec->image_id = pii.image_id;
6000 pii.image_id = NULL;
6001 parent_spec->snap_id = pii.snap_id;
6002
6003 rbd_dev->parent_spec = parent_spec;
6004 parent_spec = NULL;
6005 }
6006
6007
6008
6009
6010
6011 if (!pii.overlap) {
6012 if (parent_spec) {
6013
6014 if (rbd_dev->parent_overlap)
6015 rbd_warn(rbd_dev,
6016 "clone now standalone (overlap became 0)");
6017 } else {
6018
6019 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
6020 }
6021 }
6022 rbd_dev->parent_overlap = pii.overlap;
6023
6024 out:
6025 ret = 0;
6026 out_err:
6027 kfree(pii.pool_ns);
6028 kfree(pii.image_id);
6029 rbd_spec_put(parent_spec);
6030 return ret;
6031 }
6032
6033 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
6034 {
6035 struct {
6036 __le64 stripe_unit;
6037 __le64 stripe_count;
6038 } __attribute__ ((packed)) striping_info_buf = { 0 };
6039 size_t size = sizeof (striping_info_buf);
6040 void *p;
6041 int ret;
6042
6043 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6044 &rbd_dev->header_oloc, "get_stripe_unit_count",
6045 NULL, 0, &striping_info_buf, size);
6046 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6047 if (ret < 0)
6048 return ret;
6049 if (ret < size)
6050 return -ERANGE;
6051
6052 p = &striping_info_buf;
6053 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
6054 rbd_dev->header.stripe_count = ceph_decode_64(&p);
6055 return 0;
6056 }
6057
6058 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
6059 {
6060 __le64 data_pool_id;
6061 int ret;
6062
6063 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6064 &rbd_dev->header_oloc, "get_data_pool",
6065 NULL, 0, &data_pool_id, sizeof(data_pool_id));
6066 if (ret < 0)
6067 return ret;
6068 if (ret < sizeof(data_pool_id))
6069 return -EBADMSG;
6070
6071 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
6072 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
6073 return 0;
6074 }
6075
6076 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
6077 {
6078 CEPH_DEFINE_OID_ONSTACK(oid);
6079 size_t image_id_size;
6080 char *image_id;
6081 void *p;
6082 void *end;
6083 size_t size;
6084 void *reply_buf = NULL;
6085 size_t len = 0;
6086 char *image_name = NULL;
6087 int ret;
6088
6089 rbd_assert(!rbd_dev->spec->image_name);
6090
6091 len = strlen(rbd_dev->spec->image_id);
6092 image_id_size = sizeof (__le32) + len;
6093 image_id = kmalloc(image_id_size, GFP_KERNEL);
6094 if (!image_id)
6095 return NULL;
6096
6097 p = image_id;
6098 end = image_id + image_id_size;
6099 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
6100
6101 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
6102 reply_buf = kmalloc(size, GFP_KERNEL);
6103 if (!reply_buf)
6104 goto out;
6105
6106 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
6107 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6108 "dir_get_name", image_id, image_id_size,
6109 reply_buf, size);
6110 if (ret < 0)
6111 goto out;
6112 p = reply_buf;
6113 end = reply_buf + ret;
6114
6115 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
6116 if (IS_ERR(image_name))
6117 image_name = NULL;
6118 else
6119 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
6120 out:
6121 kfree(reply_buf);
6122 kfree(image_id);
6123
6124 return image_name;
6125 }
6126
6127 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6128 {
6129 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6130 const char *snap_name;
6131 u32 which = 0;
6132
6133
6134
6135 snap_name = rbd_dev->header.snap_names;
6136 while (which < snapc->num_snaps) {
6137 if (!strcmp(name, snap_name))
6138 return snapc->snaps[which];
6139 snap_name += strlen(snap_name) + 1;
6140 which++;
6141 }
6142 return CEPH_NOSNAP;
6143 }
6144
6145 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6146 {
6147 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6148 u32 which;
6149 bool found = false;
6150 u64 snap_id;
6151
6152 for (which = 0; !found && which < snapc->num_snaps; which++) {
6153 const char *snap_name;
6154
6155 snap_id = snapc->snaps[which];
6156 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
6157 if (IS_ERR(snap_name)) {
6158
6159 if (PTR_ERR(snap_name) == -ENOENT)
6160 continue;
6161 else
6162 break;
6163 }
6164 found = !strcmp(name, snap_name);
6165 kfree(snap_name);
6166 }
6167 return found ? snap_id : CEPH_NOSNAP;
6168 }
6169
6170
6171
6172
6173
6174 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6175 {
6176 if (rbd_dev->image_format == 1)
6177 return rbd_v1_snap_id_by_name(rbd_dev, name);
6178
6179 return rbd_v2_snap_id_by_name(rbd_dev, name);
6180 }
6181
6182
6183
6184
6185 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6186 {
6187 struct rbd_spec *spec = rbd_dev->spec;
6188
6189 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
6190 rbd_assert(spec->image_id && spec->image_name);
6191 rbd_assert(spec->snap_name);
6192
6193 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
6194 u64 snap_id;
6195
6196 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6197 if (snap_id == CEPH_NOSNAP)
6198 return -ENOENT;
6199
6200 spec->snap_id = snap_id;
6201 } else {
6202 spec->snap_id = CEPH_NOSNAP;
6203 }
6204
6205 return 0;
6206 }
6207
6208
6209
6210
6211
6212
6213
6214 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6215 {
6216 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6217 struct rbd_spec *spec = rbd_dev->spec;
6218 const char *pool_name;
6219 const char *image_name;
6220 const char *snap_name;
6221 int ret;
6222
6223 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6224 rbd_assert(spec->image_id);
6225 rbd_assert(spec->snap_id != CEPH_NOSNAP);
6226
6227
6228
6229 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6230 if (!pool_name) {
6231 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6232 return -EIO;
6233 }
6234 pool_name = kstrdup(pool_name, GFP_KERNEL);
6235 if (!pool_name)
6236 return -ENOMEM;
6237
6238
6239
6240 image_name = rbd_dev_image_name(rbd_dev);
6241 if (!image_name)
6242 rbd_warn(rbd_dev, "unable to get image name");
6243
6244
6245
6246 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6247 if (IS_ERR(snap_name)) {
6248 ret = PTR_ERR(snap_name);
6249 goto out_err;
6250 }
6251
6252 spec->pool_name = pool_name;
6253 spec->image_name = image_name;
6254 spec->snap_name = snap_name;
6255
6256 return 0;
6257
6258 out_err:
6259 kfree(image_name);
6260 kfree(pool_name);
6261 return ret;
6262 }
6263
6264 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
6265 {
6266 size_t size;
6267 int ret;
6268 void *reply_buf;
6269 void *p;
6270 void *end;
6271 u64 seq;
6272 u32 snap_count;
6273 struct ceph_snap_context *snapc;
6274 u32 i;
6275
6276
6277
6278
6279
6280
6281
6282 size = sizeof (__le64) + sizeof (__le32) +
6283 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6284 reply_buf = kzalloc(size, GFP_KERNEL);
6285 if (!reply_buf)
6286 return -ENOMEM;
6287
6288 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6289 &rbd_dev->header_oloc, "get_snapcontext",
6290 NULL, 0, reply_buf, size);
6291 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6292 if (ret < 0)
6293 goto out;
6294
6295 p = reply_buf;
6296 end = reply_buf + ret;
6297 ret = -ERANGE;
6298 ceph_decode_64_safe(&p, end, seq, out);
6299 ceph_decode_32_safe(&p, end, snap_count, out);
6300
6301
6302
6303
6304
6305
6306
6307 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6308 / sizeof (u64)) {
6309 ret = -EINVAL;
6310 goto out;
6311 }
6312 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6313 goto out;
6314 ret = 0;
6315
6316 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6317 if (!snapc) {
6318 ret = -ENOMEM;
6319 goto out;
6320 }
6321 snapc->seq = seq;
6322 for (i = 0; i < snap_count; i++)
6323 snapc->snaps[i] = ceph_decode_64(&p);
6324
6325 ceph_put_snap_context(rbd_dev->header.snapc);
6326 rbd_dev->header.snapc = snapc;
6327
6328 dout(" snap context seq = %llu, snap_count = %u\n",
6329 (unsigned long long)seq, (unsigned int)snap_count);
6330 out:
6331 kfree(reply_buf);
6332
6333 return ret;
6334 }
6335
6336 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6337 u64 snap_id)
6338 {
6339 size_t size;
6340 void *reply_buf;
6341 __le64 snapid;
6342 int ret;
6343 void *p;
6344 void *end;
6345 char *snap_name;
6346
6347 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6348 reply_buf = kmalloc(size, GFP_KERNEL);
6349 if (!reply_buf)
6350 return ERR_PTR(-ENOMEM);
6351
6352 snapid = cpu_to_le64(snap_id);
6353 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6354 &rbd_dev->header_oloc, "get_snapshot_name",
6355 &snapid, sizeof(snapid), reply_buf, size);
6356 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6357 if (ret < 0) {
6358 snap_name = ERR_PTR(ret);
6359 goto out;
6360 }
6361
6362 p = reply_buf;
6363 end = reply_buf + ret;
6364 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6365 if (IS_ERR(snap_name))
6366 goto out;
6367
6368 dout(" snap_id 0x%016llx snap_name = %s\n",
6369 (unsigned long long)snap_id, snap_name);
6370 out:
6371 kfree(reply_buf);
6372
6373 return snap_name;
6374 }
6375
6376 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
6377 {
6378 bool first_time = rbd_dev->header.object_prefix == NULL;
6379 int ret;
6380
6381 ret = rbd_dev_v2_image_size(rbd_dev);
6382 if (ret)
6383 return ret;
6384
6385 if (first_time) {
6386 ret = rbd_dev_v2_header_onetime(rbd_dev);
6387 if (ret)
6388 return ret;
6389 }
6390
6391 ret = rbd_dev_v2_snap_context(rbd_dev);
6392 if (ret && first_time) {
6393 kfree(rbd_dev->header.object_prefix);
6394 rbd_dev->header.object_prefix = NULL;
6395 }
6396
6397 return ret;
6398 }
6399
6400 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
6401 {
6402 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6403
6404 if (rbd_dev->image_format == 1)
6405 return rbd_dev_v1_header_info(rbd_dev);
6406
6407 return rbd_dev_v2_header_info(rbd_dev);
6408 }
6409
6410
6411
6412
6413
6414
6415
6416 static inline size_t next_token(const char **buf)
6417 {
6418
6419
6420
6421
6422 const char *spaces = " \f\n\r\t\v";
6423
6424 *buf += strspn(*buf, spaces);
6425
6426 return strcspn(*buf, spaces);
6427 }
6428
6429
6430
6431
6432
6433
6434
6435
6436
6437
6438
6439
6440
6441
6442
6443
6444
6445 static inline char *dup_token(const char **buf, size_t *lenp)
6446 {
6447 char *dup;
6448 size_t len;
6449
6450 len = next_token(buf);
6451 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6452 if (!dup)
6453 return NULL;
6454 *(dup + len) = '\0';
6455 *buf += len;
6456
6457 if (lenp)
6458 *lenp = len;
6459
6460 return dup;
6461 }
6462
6463
6464
6465
6466
6467
6468
6469
6470
6471
6472
6473
6474
6475
6476
6477
6478
6479
6480
6481
6482
6483
6484
6485
6486
6487
6488
6489
6490
6491
6492
6493
6494
6495
6496
6497
6498
6499
6500
6501
6502
6503
6504 static int rbd_add_parse_args(const char *buf,
6505 struct ceph_options **ceph_opts,
6506 struct rbd_options **opts,
6507 struct rbd_spec **rbd_spec)
6508 {
6509 size_t len;
6510 char *options;
6511 const char *mon_addrs;
6512 char *snap_name;
6513 size_t mon_addrs_size;
6514 struct parse_rbd_opts_ctx pctx = { 0 };
6515 struct ceph_options *copts;
6516 int ret;
6517
6518
6519
6520 len = next_token(&buf);
6521 if (!len) {
6522 rbd_warn(NULL, "no monitor address(es) provided");
6523 return -EINVAL;
6524 }
6525 mon_addrs = buf;
6526 mon_addrs_size = len + 1;
6527 buf += len;
6528
6529 ret = -EINVAL;
6530 options = dup_token(&buf, NULL);
6531 if (!options)
6532 return -ENOMEM;
6533 if (!*options) {
6534 rbd_warn(NULL, "no options provided");
6535 goto out_err;
6536 }
6537
6538 pctx.spec = rbd_spec_alloc();
6539 if (!pctx.spec)
6540 goto out_mem;
6541
6542 pctx.spec->pool_name = dup_token(&buf, NULL);
6543 if (!pctx.spec->pool_name)
6544 goto out_mem;
6545 if (!*pctx.spec->pool_name) {
6546 rbd_warn(NULL, "no pool name provided");
6547 goto out_err;
6548 }
6549
6550 pctx.spec->image_name = dup_token(&buf, NULL);
6551 if (!pctx.spec->image_name)
6552 goto out_mem;
6553 if (!*pctx.spec->image_name) {
6554 rbd_warn(NULL, "no image name provided");
6555 goto out_err;
6556 }
6557
6558
6559
6560
6561
6562 len = next_token(&buf);
6563 if (!len) {
6564 buf = RBD_SNAP_HEAD_NAME;
6565 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6566 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
6567 ret = -ENAMETOOLONG;
6568 goto out_err;
6569 }
6570 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6571 if (!snap_name)
6572 goto out_mem;
6573 *(snap_name + len) = '\0';
6574 pctx.spec->snap_name = snap_name;
6575
6576
6577
6578 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6579 if (!pctx.opts)
6580 goto out_mem;
6581
6582 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6583 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6584 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6585 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6586 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6587 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6588 pctx.opts->trim = RBD_TRIM_DEFAULT;
6589
6590 copts = ceph_parse_options(options, mon_addrs,
6591 mon_addrs + mon_addrs_size - 1,
6592 parse_rbd_opts_token, &pctx);
6593 if (IS_ERR(copts)) {
6594 ret = PTR_ERR(copts);
6595 goto out_err;
6596 }
6597 kfree(options);
6598
6599 *ceph_opts = copts;
6600 *opts = pctx.opts;
6601 *rbd_spec = pctx.spec;
6602
6603 return 0;
6604 out_mem:
6605 ret = -ENOMEM;
6606 out_err:
6607 kfree(pctx.opts);
6608 rbd_spec_put(pctx.spec);
6609 kfree(options);
6610
6611 return ret;
6612 }
6613
6614 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6615 {
6616 down_write(&rbd_dev->lock_rwsem);
6617 if (__rbd_is_lock_owner(rbd_dev))
6618 __rbd_release_lock(rbd_dev);
6619 up_write(&rbd_dev->lock_rwsem);
6620 }
6621
6622
6623
6624
6625
6626
6627 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6628 {
6629 long ret;
6630
6631 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6632 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6633 return 0;
6634
6635 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6636 return -EINVAL;
6637 }
6638
6639 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
6640 return 0;
6641
6642 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6643 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6644 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6645 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6646 if (ret > 0) {
6647 ret = rbd_dev->acquire_err;
6648 } else {
6649 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6650 if (!ret)
6651 ret = -ETIMEDOUT;
6652 }
6653
6654 if (ret) {
6655 rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
6656 return ret;
6657 }
6658
6659
6660
6661
6662
6663 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6664 return 0;
6665 }
6666
6667
6668
6669
6670
6671
6672
6673
6674
6675
6676
6677
6678
6679
6680
6681 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6682 {
6683 int ret;
6684 size_t size;
6685 CEPH_DEFINE_OID_ONSTACK(oid);
6686 void *response;
6687 char *image_id;
6688
6689
6690
6691
6692
6693
6694
6695 if (rbd_dev->spec->image_id) {
6696 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6697
6698 return 0;
6699 }
6700
6701
6702
6703
6704
6705 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6706 rbd_dev->spec->image_name);
6707 if (ret)
6708 return ret;
6709
6710 dout("rbd id object name is %s\n", oid.name);
6711
6712
6713 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6714 response = kzalloc(size, GFP_NOIO);
6715 if (!response) {
6716 ret = -ENOMEM;
6717 goto out;
6718 }
6719
6720
6721
6722 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6723 "get_id", NULL, 0,
6724 response, size);
6725 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6726 if (ret == -ENOENT) {
6727 image_id = kstrdup("", GFP_KERNEL);
6728 ret = image_id ? 0 : -ENOMEM;
6729 if (!ret)
6730 rbd_dev->image_format = 1;
6731 } else if (ret >= 0) {
6732 void *p = response;
6733
6734 image_id = ceph_extract_encoded_string(&p, p + ret,
6735 NULL, GFP_NOIO);
6736 ret = PTR_ERR_OR_ZERO(image_id);
6737 if (!ret)
6738 rbd_dev->image_format = 2;
6739 }
6740
6741 if (!ret) {
6742 rbd_dev->spec->image_id = image_id;
6743 dout("image_id is %s\n", image_id);
6744 }
6745 out:
6746 kfree(response);
6747 ceph_oid_destroy(&oid);
6748 return ret;
6749 }
6750
6751
6752
6753
6754
6755 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6756 {
6757 struct rbd_image_header *header;
6758
6759 rbd_dev_parent_put(rbd_dev);
6760 rbd_object_map_free(rbd_dev);
6761 rbd_dev_mapping_clear(rbd_dev);
6762
6763
6764
6765 header = &rbd_dev->header;
6766 ceph_put_snap_context(header->snapc);
6767 kfree(header->snap_sizes);
6768 kfree(header->snap_names);
6769 kfree(header->object_prefix);
6770 memset(header, 0, sizeof (*header));
6771 }
6772
6773 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
6774 {
6775 int ret;
6776
6777 ret = rbd_dev_v2_object_prefix(rbd_dev);
6778 if (ret)
6779 goto out_err;
6780
6781
6782
6783
6784
6785 ret = rbd_dev_v2_features(rbd_dev);
6786 if (ret)
6787 goto out_err;
6788
6789
6790
6791 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
6792 ret = rbd_dev_v2_striping_info(rbd_dev);
6793 if (ret < 0)
6794 goto out_err;
6795 }
6796
6797 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
6798 ret = rbd_dev_v2_data_pool(rbd_dev);
6799 if (ret)
6800 goto out_err;
6801 }
6802
6803 rbd_init_layout(rbd_dev);
6804 return 0;
6805
6806 out_err:
6807 rbd_dev->header.features = 0;
6808 kfree(rbd_dev->header.object_prefix);
6809 rbd_dev->header.object_prefix = NULL;
6810 return ret;
6811 }
6812
6813
6814
6815
6816
6817
6818 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6819 {
6820 struct rbd_device *parent = NULL;
6821 int ret;
6822
6823 if (!rbd_dev->parent_spec)
6824 return 0;
6825
6826 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6827 pr_info("parent chain is too long (%d)\n", depth);
6828 ret = -EINVAL;
6829 goto out_err;
6830 }
6831
6832 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
6833 if (!parent) {
6834 ret = -ENOMEM;
6835 goto out_err;
6836 }
6837
6838
6839
6840
6841
6842 __rbd_get_client(rbd_dev->rbd_client);
6843 rbd_spec_get(rbd_dev->parent_spec);
6844
6845 ret = rbd_dev_image_probe(parent, depth);
6846 if (ret < 0)
6847 goto out_err;
6848
6849 rbd_dev->parent = parent;
6850 atomic_set(&rbd_dev->parent_ref, 1);
6851 return 0;
6852
6853 out_err:
6854 rbd_dev_unparent(rbd_dev);
6855 rbd_dev_destroy(parent);
6856 return ret;
6857 }
6858
6859 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6860 {
6861 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6862 rbd_free_disk(rbd_dev);
6863 if (!single_major)
6864 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6865 }
6866
6867
6868
6869
6870
6871 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6872 {
6873 int ret;
6874
6875
6876
6877 if (!single_major) {
6878 ret = register_blkdev(0, rbd_dev->name);
6879 if (ret < 0)
6880 goto err_out_unlock;
6881
6882 rbd_dev->major = ret;
6883 rbd_dev->minor = 0;
6884 } else {
6885 rbd_dev->major = rbd_major;
6886 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6887 }
6888
6889
6890
6891 ret = rbd_init_disk(rbd_dev);
6892 if (ret)
6893 goto err_out_blkdev;
6894
6895 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6896 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
6897
6898 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6899 if (ret)
6900 goto err_out_disk;
6901
6902 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6903 up_write(&rbd_dev->header_rwsem);
6904 return 0;
6905
6906 err_out_disk:
6907 rbd_free_disk(rbd_dev);
6908 err_out_blkdev:
6909 if (!single_major)
6910 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6911 err_out_unlock:
6912 up_write(&rbd_dev->header_rwsem);
6913 return ret;
6914 }
6915
6916 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6917 {
6918 struct rbd_spec *spec = rbd_dev->spec;
6919 int ret;
6920
6921
6922
6923 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6924 if (rbd_dev->image_format == 1)
6925 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6926 spec->image_name, RBD_SUFFIX);
6927 else
6928 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6929 RBD_HEADER_PREFIX, spec->image_id);
6930
6931 return ret;
6932 }
6933
6934 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6935 {
6936 if (rbd_dev->opts)
6937 rbd_unregister_watch(rbd_dev);
6938
6939 rbd_dev_unprobe(rbd_dev);
6940 rbd_dev->image_format = 0;
6941 kfree(rbd_dev->spec->image_id);
6942 rbd_dev->spec->image_id = NULL;
6943 }
6944
6945
6946
6947
6948
6949
6950
6951
6952
6953
6954 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6955 {
6956 int ret;
6957
6958
6959
6960
6961
6962
6963
6964 ret = rbd_dev_image_id(rbd_dev);
6965 if (ret)
6966 return ret;
6967
6968 ret = rbd_dev_header_name(rbd_dev);
6969 if (ret)
6970 goto err_out_format;
6971
6972 if (!depth) {
6973 ret = rbd_register_watch(rbd_dev);
6974 if (ret) {
6975 if (ret == -ENOENT)
6976 pr_info("image %s/%s%s%s does not exist\n",
6977 rbd_dev->spec->pool_name,
6978 rbd_dev->spec->pool_ns ?: "",
6979 rbd_dev->spec->pool_ns ? "/" : "",
6980 rbd_dev->spec->image_name);
6981 goto err_out_format;
6982 }
6983 }
6984
6985 if (!depth)
6986 down_write(&rbd_dev->header_rwsem);
6987
6988 ret = rbd_dev_header_info(rbd_dev);
6989 if (ret)
6990 goto err_out_probe;
6991
6992
6993
6994
6995
6996
6997
6998 if (!depth)
6999 ret = rbd_spec_fill_snap_id(rbd_dev);
7000 else
7001 ret = rbd_spec_fill_names(rbd_dev);
7002 if (ret) {
7003 if (ret == -ENOENT)
7004 pr_info("snap %s/%s%s%s@%s does not exist\n",
7005 rbd_dev->spec->pool_name,
7006 rbd_dev->spec->pool_ns ?: "",
7007 rbd_dev->spec->pool_ns ? "/" : "",
7008 rbd_dev->spec->image_name,
7009 rbd_dev->spec->snap_name);
7010 goto err_out_probe;
7011 }
7012
7013 ret = rbd_dev_mapping_set(rbd_dev);
7014 if (ret)
7015 goto err_out_probe;
7016
7017 if (rbd_dev->spec->snap_id != CEPH_NOSNAP &&
7018 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
7019 ret = rbd_object_map_load(rbd_dev);
7020 if (ret)
7021 goto err_out_probe;
7022 }
7023
7024 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7025 ret = rbd_dev_v2_parent_info(rbd_dev);
7026 if (ret)
7027 goto err_out_probe;
7028 }
7029
7030 ret = rbd_dev_probe_parent(rbd_dev, depth);
7031 if (ret)
7032 goto err_out_probe;
7033
7034 dout("discovered format %u image, header name is %s\n",
7035 rbd_dev->image_format, rbd_dev->header_oid.name);
7036 return 0;
7037
7038 err_out_probe:
7039 if (!depth)
7040 up_write(&rbd_dev->header_rwsem);
7041 if (!depth)
7042 rbd_unregister_watch(rbd_dev);
7043 rbd_dev_unprobe(rbd_dev);
7044 err_out_format:
7045 rbd_dev->image_format = 0;
7046 kfree(rbd_dev->spec->image_id);
7047 rbd_dev->spec->image_id = NULL;
7048 return ret;
7049 }
7050
7051 static ssize_t do_rbd_add(struct bus_type *bus,
7052 const char *buf,
7053 size_t count)
7054 {
7055 struct rbd_device *rbd_dev = NULL;
7056 struct ceph_options *ceph_opts = NULL;
7057 struct rbd_options *rbd_opts = NULL;
7058 struct rbd_spec *spec = NULL;
7059 struct rbd_client *rbdc;
7060 int rc;
7061
7062 if (!try_module_get(THIS_MODULE))
7063 return -ENODEV;
7064
7065
7066 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
7067 if (rc < 0)
7068 goto out;
7069
7070 rbdc = rbd_get_client(ceph_opts);
7071 if (IS_ERR(rbdc)) {
7072 rc = PTR_ERR(rbdc);
7073 goto err_out_args;
7074 }
7075
7076
7077 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7078 if (rc < 0) {
7079 if (rc == -ENOENT)
7080 pr_info("pool %s does not exist\n", spec->pool_name);
7081 goto err_out_client;
7082 }
7083 spec->pool_id = (u64)rc;
7084
7085 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7086 if (!rbd_dev) {
7087 rc = -ENOMEM;
7088 goto err_out_client;
7089 }
7090 rbdc = NULL;
7091 spec = NULL;
7092 rbd_opts = NULL;
7093
7094 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7095 if (!rbd_dev->config_info) {
7096 rc = -ENOMEM;
7097 goto err_out_rbd_dev;
7098 }
7099
7100 rc = rbd_dev_image_probe(rbd_dev, 0);
7101 if (rc < 0)
7102 goto err_out_rbd_dev;
7103
7104
7105 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
7106 rbd_dev->opts->read_only = true;
7107
7108 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7109 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7110 rbd_dev->layout.object_size);
7111 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7112 }
7113
7114 rc = rbd_dev_device_setup(rbd_dev);
7115 if (rc)
7116 goto err_out_image_probe;
7117
7118 rc = rbd_add_acquire_lock(rbd_dev);
7119 if (rc)
7120 goto err_out_image_lock;
7121
7122
7123
7124 rc = device_add(&rbd_dev->dev);
7125 if (rc)
7126 goto err_out_image_lock;
7127
7128 add_disk(rbd_dev->disk);
7129
7130 blk_put_queue(rbd_dev->disk->queue);
7131
7132 spin_lock(&rbd_dev_list_lock);
7133 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7134 spin_unlock(&rbd_dev_list_lock);
7135
7136 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7137 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7138 rbd_dev->header.features);
7139 rc = count;
7140 out:
7141 module_put(THIS_MODULE);
7142 return rc;
7143
7144 err_out_image_lock:
7145 rbd_dev_image_unlock(rbd_dev);
7146 rbd_dev_device_release(rbd_dev);
7147 err_out_image_probe:
7148 rbd_dev_image_release(rbd_dev);
7149 err_out_rbd_dev:
7150 rbd_dev_destroy(rbd_dev);
7151 err_out_client:
7152 rbd_put_client(rbdc);
7153 err_out_args:
7154 rbd_spec_put(spec);
7155 kfree(rbd_opts);
7156 goto out;
7157 }
7158
7159 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
7160 {
7161 if (single_major)
7162 return -EINVAL;
7163
7164 return do_rbd_add(bus, buf, count);
7165 }
7166
7167 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
7168 size_t count)
7169 {
7170 return do_rbd_add(bus, buf, count);
7171 }
7172
7173 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7174 {
7175 while (rbd_dev->parent) {
7176 struct rbd_device *first = rbd_dev;
7177 struct rbd_device *second = first->parent;
7178 struct rbd_device *third;
7179
7180
7181
7182
7183
7184 while (second && (third = second->parent)) {
7185 first = second;
7186 second = third;
7187 }
7188 rbd_assert(second);
7189 rbd_dev_image_release(second);
7190 rbd_dev_destroy(second);
7191 first->parent = NULL;
7192 first->parent_overlap = 0;
7193
7194 rbd_assert(first->parent_spec);
7195 rbd_spec_put(first->parent_spec);
7196 first->parent_spec = NULL;
7197 }
7198 }
7199
7200 static ssize_t do_rbd_remove(struct bus_type *bus,
7201 const char *buf,
7202 size_t count)
7203 {
7204 struct rbd_device *rbd_dev = NULL;
7205 struct list_head *tmp;
7206 int dev_id;
7207 char opt_buf[6];
7208 bool force = false;
7209 int ret;
7210
7211 dev_id = -1;
7212 opt_buf[0] = '\0';
7213 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7214 if (dev_id < 0) {
7215 pr_err("dev_id out of range\n");
7216 return -EINVAL;
7217 }
7218 if (opt_buf[0] != '\0') {
7219 if (!strcmp(opt_buf, "force")) {
7220 force = true;
7221 } else {
7222 pr_err("bad remove option at '%s'\n", opt_buf);
7223 return -EINVAL;
7224 }
7225 }
7226
7227 ret = -ENOENT;
7228 spin_lock(&rbd_dev_list_lock);
7229 list_for_each(tmp, &rbd_dev_list) {
7230 rbd_dev = list_entry(tmp, struct rbd_device, node);
7231 if (rbd_dev->dev_id == dev_id) {
7232 ret = 0;
7233 break;
7234 }
7235 }
7236 if (!ret) {
7237 spin_lock_irq(&rbd_dev->lock);
7238 if (rbd_dev->open_count && !force)
7239 ret = -EBUSY;
7240 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7241 &rbd_dev->flags))
7242 ret = -EINPROGRESS;
7243 spin_unlock_irq(&rbd_dev->lock);
7244 }
7245 spin_unlock(&rbd_dev_list_lock);
7246 if (ret)
7247 return ret;
7248
7249 if (force) {
7250
7251
7252
7253
7254 blk_mq_freeze_queue(rbd_dev->disk->queue);
7255 blk_set_queue_dying(rbd_dev->disk->queue);
7256 }
7257
7258 del_gendisk(rbd_dev->disk);
7259 spin_lock(&rbd_dev_list_lock);
7260 list_del_init(&rbd_dev->node);
7261 spin_unlock(&rbd_dev_list_lock);
7262 device_del(&rbd_dev->dev);
7263
7264 rbd_dev_image_unlock(rbd_dev);
7265 rbd_dev_device_release(rbd_dev);
7266 rbd_dev_image_release(rbd_dev);
7267 rbd_dev_destroy(rbd_dev);
7268 return count;
7269 }
7270
7271 static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
7272 {
7273 if (single_major)
7274 return -EINVAL;
7275
7276 return do_rbd_remove(bus, buf, count);
7277 }
7278
7279 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
7280 size_t count)
7281 {
7282 return do_rbd_remove(bus, buf, count);
7283 }
7284
7285
7286
7287
7288
7289 static int __init rbd_sysfs_init(void)
7290 {
7291 int ret;
7292
7293 ret = device_register(&rbd_root_dev);
7294 if (ret < 0)
7295 return ret;
7296
7297 ret = bus_register(&rbd_bus_type);
7298 if (ret < 0)
7299 device_unregister(&rbd_root_dev);
7300
7301 return ret;
7302 }
7303
7304 static void __exit rbd_sysfs_cleanup(void)
7305 {
7306 bus_unregister(&rbd_bus_type);
7307 device_unregister(&rbd_root_dev);
7308 }
7309
7310 static int __init rbd_slab_init(void)
7311 {
7312 rbd_assert(!rbd_img_request_cache);
7313 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
7314 if (!rbd_img_request_cache)
7315 return -ENOMEM;
7316
7317 rbd_assert(!rbd_obj_request_cache);
7318 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
7319 if (!rbd_obj_request_cache)
7320 goto out_err;
7321
7322 return 0;
7323
7324 out_err:
7325 kmem_cache_destroy(rbd_img_request_cache);
7326 rbd_img_request_cache = NULL;
7327 return -ENOMEM;
7328 }
7329
7330 static void rbd_slab_exit(void)
7331 {
7332 rbd_assert(rbd_obj_request_cache);
7333 kmem_cache_destroy(rbd_obj_request_cache);
7334 rbd_obj_request_cache = NULL;
7335
7336 rbd_assert(rbd_img_request_cache);
7337 kmem_cache_destroy(rbd_img_request_cache);
7338 rbd_img_request_cache = NULL;
7339 }
7340
7341 static int __init rbd_init(void)
7342 {
7343 int rc;
7344
7345 if (!libceph_compatible(NULL)) {
7346 rbd_warn(NULL, "libceph incompatibility (quitting)");
7347 return -EINVAL;
7348 }
7349
7350 rc = rbd_slab_init();
7351 if (rc)
7352 return rc;
7353
7354
7355
7356
7357
7358 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7359 if (!rbd_wq) {
7360 rc = -ENOMEM;
7361 goto err_out_slab;
7362 }
7363
7364 if (single_major) {
7365 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7366 if (rbd_major < 0) {
7367 rc = rbd_major;
7368 goto err_out_wq;
7369 }
7370 }
7371
7372 rc = rbd_sysfs_init();
7373 if (rc)
7374 goto err_out_blkdev;
7375
7376 if (single_major)
7377 pr_info("loaded (major %d)\n", rbd_major);
7378 else
7379 pr_info("loaded\n");
7380
7381 return 0;
7382
7383 err_out_blkdev:
7384 if (single_major)
7385 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7386 err_out_wq:
7387 destroy_workqueue(rbd_wq);
7388 err_out_slab:
7389 rbd_slab_exit();
7390 return rc;
7391 }
7392
7393 static void __exit rbd_exit(void)
7394 {
7395 ida_destroy(&rbd_dev_id_ida);
7396 rbd_sysfs_cleanup();
7397 if (single_major)
7398 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7399 destroy_workqueue(rbd_wq);
7400 rbd_slab_exit();
7401 }
7402
7403 module_init(rbd_init);
7404 module_exit(rbd_exit);
7405
7406 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7407 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7408 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7409
7410 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7411
7412 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7413 MODULE_LICENSE("GPL");