root/net/ceph/osd_client.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. rwsem_is_wrlocked
  2. verify_osdc_locked
  3. verify_osdc_wrlocked
  4. verify_osd_locked
  5. verify_lreq_locked
  6. verify_osdc_locked
  7. verify_osdc_wrlocked
  8. verify_osd_locked
  9. verify_lreq_locked
  10. calc_layout
  11. ceph_osd_data_init
  12. ceph_osd_data_pages_init
  13. ceph_osd_data_pagelist_init
  14. ceph_osd_data_bio_init
  15. ceph_osd_data_bvecs_init
  16. osd_req_op_raw_data_in
  17. osd_req_op_extent_osd_data
  18. osd_req_op_raw_data_in_pages
  19. osd_req_op_extent_osd_data_pages
  20. osd_req_op_extent_osd_data_pagelist
  21. osd_req_op_extent_osd_data_bio
  22. osd_req_op_extent_osd_data_bvecs
  23. osd_req_op_extent_osd_data_bvec_pos
  24. osd_req_op_cls_request_info_pagelist
  25. osd_req_op_cls_request_data_pagelist
  26. osd_req_op_cls_request_data_pages
  27. osd_req_op_cls_request_data_bvecs
  28. osd_req_op_cls_response_data_pages
  29. ceph_osd_data_length
  30. ceph_osd_data_release
  31. osd_req_op_data_release
  32. target_init
  33. target_copy
  34. target_destroy
  35. request_release_checks
  36. ceph_osdc_release_request
  37. ceph_osdc_get_request
  38. ceph_osdc_put_request
  39. request_init
  40. request_reinit
  41. ceph_osdc_alloc_request
  42. ceph_oloc_encoding_size
  43. __ceph_osdc_alloc_messages
  44. osd_req_opcode_valid
  45. get_num_data_items
  46. ceph_osdc_alloc_messages
  47. _osd_req_op_init
  48. osd_req_op_init
  49. osd_req_op_extent_init
  50. osd_req_op_extent_update
  51. osd_req_op_extent_dup_last
  52. osd_req_op_cls_init
  53. osd_req_op_xattr_init
  54. osd_req_op_watch_init
  55. osd_req_op_alloc_hint_init
  56. ceph_osdc_msg_data_add
  57. osd_req_encode_op
  58. ceph_osdc_new_request
  59. DEFINE_RB_FUNCS
  60. osd_homeless
  61. osd_registered
  62. osd_init
  63. osd_cleanup
  64. create_osd
  65. get_osd
  66. put_osd
  67. DEFINE_RB_FUNCS
  68. maybe_move_osd_to_lru
  69. __remove_osd_from_lru
  70. close_osd
  71. reopen_osd
  72. lookup_create_osd
  73. link_request
  74. unlink_request
  75. __pool_full
  76. have_pool_full
  77. pool_full
  78. target_should_be_paused
  79. alloc_spg_mapping
  80. free_spg_mapping
  81. DEFINE_RB_FUNCS2
  82. hoid_get_effective_key
  83. compare_names
  84. hoid_compare
  85. decode_hoid
  86. hoid_encoding_size
  87. encode_hoid
  88. free_hoid
  89. alloc_backoff
  90. free_backoff
  91. lookup_containing_backoff
  92. DEFINE_RB_FUNCS
  93. hoid_fill_from_target
  94. should_plug_request
  95. setup_request_data
  96. encode_pgid
  97. encode_spgid
  98. encode_oloc
  99. encode_request_partial
  100. encode_request_finish
  101. send_request
  102. maybe_request_map
  103. __submit_request
  104. account_request
  105. submit_request
  106. finish_request
  107. __complete_request
  108. complete_request_workfn
  109. complete_request
  110. cancel_map_check
  111. cancel_request
  112. abort_request
  113. abort_fn
  114. ceph_osdc_abort_requests
  115. ceph_osdc_clear_abort_err
  116. update_epoch_barrier
  117. ceph_osdc_update_epoch_barrier
  118. abort_on_full_fn
  119. ceph_osdc_abort_on_full
  120. check_pool_dne
  121. map_check_cb
  122. send_map_check
  123. linger_release
  124. linger_put
  125. linger_get
  126. linger_alloc
  127. DEFINE_RB_INSDEL_FUNCS
  128. unlink_linger
  129. __linger_registered
  130. linger_registered
  131. linger_register
  132. linger_unregister
  133. cancel_linger_request
  134. lwork_alloc
  135. lwork_free
  136. lwork_queue
  137. do_watch_notify
  138. do_watch_error
  139. queue_watch_error
  140. linger_reg_commit_complete
  141. linger_commit_cb
  142. normalize_watch_error
  143. linger_reconnect_cb
  144. send_linger
  145. linger_ping_cb
  146. send_linger_ping
  147. linger_submit
  148. cancel_linger_map_check
  149. __linger_cancel
  150. linger_cancel
  151. check_linger_pool_dne
  152. linger_map_check_cb
  153. send_linger_map_check
  154. linger_reg_commit_wait
  155. linger_notify_finish_wait
  156. handle_timeout
  157. handle_osds_timeout
  158. ceph_oloc_decode
  159. ceph_redirect_decode
  160. decode_MOSDOpReply
  161. handle_reply
  162. set_pool_was_full
  163. pool_cleared_full
  164. recalc_linger_target
  165. scan_requests
  166. handle_one_map
  167. kick_requests
  168. ceph_osdc_handle_map
  169. kick_osd_requests
  170. osd_fault
  171. decode_MOSDBackoff
  172. create_backoff_message
  173. handle_backoff_block
  174. target_contained_by
  175. handle_backoff_unblock
  176. handle_backoff
  177. handle_watch_notify
  178. ceph_osdc_start_request
  179. ceph_osdc_cancel_request
  180. wait_request_timeout
  181. ceph_osdc_wait_request
  182. ceph_osdc_sync
  183. alloc_linger_request
  184. alloc_watch_request
  185. ceph_osdc_watch
  186. ceph_osdc_unwatch
  187. osd_req_op_notify_ack_init
  188. ceph_osdc_notify_ack
  189. osd_req_op_notify_init
  190. ceph_osdc_notify
  191. ceph_osdc_watch_check
  192. decode_watcher
  193. decode_watchers
  194. ceph_osdc_list_watchers
  195. ceph_osdc_flush_notifies
  196. ceph_osdc_maybe_request_map
  197. ceph_osdc_call
  198. ceph_osdc_reopen_osds
  199. ceph_osdc_init
  200. ceph_osdc_stop
  201. ceph_osdc_readpages
  202. ceph_osdc_writepages
  203. osd_req_op_copy_from_init
  204. ceph_osdc_copy_from
  205. ceph_osdc_setup
  206. ceph_osdc_cleanup
  207. dispatch
  208. get_reply
  209. alloc_msg_with_page_vector
  210. alloc_msg
  211. get_osd_con
  212. put_osd_con
  213. get_authorizer
  214. add_authorizer_challenge
  215. verify_authorizer_reply
  216. invalidate_authorizer
  217. osd_reencode_message
  218. osd_sign_message
  219. osd_check_message_signature

   1 // SPDX-License-Identifier: GPL-2.0
   2 
   3 #include <linux/ceph/ceph_debug.h>
   4 
   5 #include <linux/module.h>
   6 #include <linux/err.h>
   7 #include <linux/highmem.h>
   8 #include <linux/mm.h>
   9 #include <linux/pagemap.h>
  10 #include <linux/slab.h>
  11 #include <linux/uaccess.h>
  12 #ifdef CONFIG_BLOCK
  13 #include <linux/bio.h>
  14 #endif
  15 
  16 #include <linux/ceph/ceph_features.h>
  17 #include <linux/ceph/libceph.h>
  18 #include <linux/ceph/osd_client.h>
  19 #include <linux/ceph/messenger.h>
  20 #include <linux/ceph/decode.h>
  21 #include <linux/ceph/auth.h>
  22 #include <linux/ceph/pagelist.h>
  23 #include <linux/ceph/striper.h>
  24 
  25 #define OSD_OPREPLY_FRONT_LEN   512
  26 
  27 static struct kmem_cache        *ceph_osd_request_cache;
  28 
  29 static const struct ceph_connection_operations osd_con_ops;
  30 
  31 /*
  32  * Implement client access to distributed object storage cluster.
  33  *
  34  * All data objects are stored within a cluster/cloud of OSDs, or
  35  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
  36  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
  37  * remote daemons serving up and coordinating consistent and safe
  38  * access to storage.
  39  *
  40  * Cluster membership and the mapping of data objects onto storage devices
  41  * are described by the osd map.
  42  *
  43  * We keep track of pending OSD requests (read, write), resubmit
  44  * requests to different OSDs when the cluster topology/data layout
  45  * change, or retry the affected requests when the communications
  46  * channel with an OSD is reset.
  47  */
  48 
  49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
  50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
  51 static void link_linger(struct ceph_osd *osd,
  52                         struct ceph_osd_linger_request *lreq);
  53 static void unlink_linger(struct ceph_osd *osd,
  54                           struct ceph_osd_linger_request *lreq);
  55 static void clear_backoffs(struct ceph_osd *osd);
  56 
  57 #if 1
  58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
  59 {
  60         bool wrlocked = true;
  61 
  62         if (unlikely(down_read_trylock(sem))) {
  63                 wrlocked = false;
  64                 up_read(sem);
  65         }
  66 
  67         return wrlocked;
  68 }
  69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
  70 {
  71         WARN_ON(!rwsem_is_locked(&osdc->lock));
  72 }
  73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
  74 {
  75         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
  76 }
  77 static inline void verify_osd_locked(struct ceph_osd *osd)
  78 {
  79         struct ceph_osd_client *osdc = osd->o_osdc;
  80 
  81         WARN_ON(!(mutex_is_locked(&osd->lock) &&
  82                   rwsem_is_locked(&osdc->lock)) &&
  83                 !rwsem_is_wrlocked(&osdc->lock));
  84 }
  85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
  86 {
  87         WARN_ON(!mutex_is_locked(&lreq->lock));
  88 }
  89 #else
  90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
  91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
  92 static inline void verify_osd_locked(struct ceph_osd *osd) { }
  93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
  94 #endif
  95 
  96 /*
  97  * calculate the mapping of a file extent onto an object, and fill out the
  98  * request accordingly.  shorten extent as necessary if it crosses an
  99  * object boundary.
 100  *
 101  * fill osd op in request message.
 102  */
 103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
 104                         u64 *objnum, u64 *objoff, u64 *objlen)
 105 {
 106         u64 orig_len = *plen;
 107         u32 xlen;
 108 
 109         /* object extent? */
 110         ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
 111                                           objoff, &xlen);
 112         *objlen = xlen;
 113         if (*objlen < orig_len) {
 114                 *plen = *objlen;
 115                 dout(" skipping last %llu, final file extent %llu~%llu\n",
 116                      orig_len - *plen, off, *plen);
 117         }
 118 
 119         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
 120         return 0;
 121 }
 122 
 123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
 124 {
 125         memset(osd_data, 0, sizeof (*osd_data));
 126         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
 127 }
 128 
 129 /*
 130  * Consumes @pages if @own_pages is true.
 131  */
 132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
 133                         struct page **pages, u64 length, u32 alignment,
 134                         bool pages_from_pool, bool own_pages)
 135 {
 136         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
 137         osd_data->pages = pages;
 138         osd_data->length = length;
 139         osd_data->alignment = alignment;
 140         osd_data->pages_from_pool = pages_from_pool;
 141         osd_data->own_pages = own_pages;
 142 }
 143 
 144 /*
 145  * Consumes a ref on @pagelist.
 146  */
 147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
 148                         struct ceph_pagelist *pagelist)
 149 {
 150         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
 151         osd_data->pagelist = pagelist;
 152 }
 153 
 154 #ifdef CONFIG_BLOCK
 155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
 156                                    struct ceph_bio_iter *bio_pos,
 157                                    u32 bio_length)
 158 {
 159         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
 160         osd_data->bio_pos = *bio_pos;
 161         osd_data->bio_length = bio_length;
 162 }
 163 #endif /* CONFIG_BLOCK */
 164 
 165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
 166                                      struct ceph_bvec_iter *bvec_pos,
 167                                      u32 num_bvecs)
 168 {
 169         osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
 170         osd_data->bvec_pos = *bvec_pos;
 171         osd_data->num_bvecs = num_bvecs;
 172 }
 173 
 174 static struct ceph_osd_data *
 175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
 176 {
 177         BUG_ON(which >= osd_req->r_num_ops);
 178 
 179         return &osd_req->r_ops[which].raw_data_in;
 180 }
 181 
 182 struct ceph_osd_data *
 183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
 184                         unsigned int which)
 185 {
 186         return osd_req_op_data(osd_req, which, extent, osd_data);
 187 }
 188 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
 189 
 190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
 191                         unsigned int which, struct page **pages,
 192                         u64 length, u32 alignment,
 193                         bool pages_from_pool, bool own_pages)
 194 {
 195         struct ceph_osd_data *osd_data;
 196 
 197         osd_data = osd_req_op_raw_data_in(osd_req, which);
 198         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
 199                                 pages_from_pool, own_pages);
 200 }
 201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
 202 
 203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
 204                         unsigned int which, struct page **pages,
 205                         u64 length, u32 alignment,
 206                         bool pages_from_pool, bool own_pages)
 207 {
 208         struct ceph_osd_data *osd_data;
 209 
 210         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
 211         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
 212                                 pages_from_pool, own_pages);
 213 }
 214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
 215 
 216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
 217                         unsigned int which, struct ceph_pagelist *pagelist)
 218 {
 219         struct ceph_osd_data *osd_data;
 220 
 221         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
 222         ceph_osd_data_pagelist_init(osd_data, pagelist);
 223 }
 224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
 225 
 226 #ifdef CONFIG_BLOCK
 227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
 228                                     unsigned int which,
 229                                     struct ceph_bio_iter *bio_pos,
 230                                     u32 bio_length)
 231 {
 232         struct ceph_osd_data *osd_data;
 233 
 234         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
 235         ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
 236 }
 237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
 238 #endif /* CONFIG_BLOCK */
 239 
 240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
 241                                       unsigned int which,
 242                                       struct bio_vec *bvecs, u32 num_bvecs,
 243                                       u32 bytes)
 244 {
 245         struct ceph_osd_data *osd_data;
 246         struct ceph_bvec_iter it = {
 247                 .bvecs = bvecs,
 248                 .iter = { .bi_size = bytes },
 249         };
 250 
 251         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
 252         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
 253 }
 254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
 255 
 256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
 257                                          unsigned int which,
 258                                          struct ceph_bvec_iter *bvec_pos)
 259 {
 260         struct ceph_osd_data *osd_data;
 261 
 262         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
 263         ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
 264 }
 265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
 266 
 267 static void osd_req_op_cls_request_info_pagelist(
 268                         struct ceph_osd_request *osd_req,
 269                         unsigned int which, struct ceph_pagelist *pagelist)
 270 {
 271         struct ceph_osd_data *osd_data;
 272 
 273         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
 274         ceph_osd_data_pagelist_init(osd_data, pagelist);
 275 }
 276 
 277 void osd_req_op_cls_request_data_pagelist(
 278                         struct ceph_osd_request *osd_req,
 279                         unsigned int which, struct ceph_pagelist *pagelist)
 280 {
 281         struct ceph_osd_data *osd_data;
 282 
 283         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
 284         ceph_osd_data_pagelist_init(osd_data, pagelist);
 285         osd_req->r_ops[which].cls.indata_len += pagelist->length;
 286         osd_req->r_ops[which].indata_len += pagelist->length;
 287 }
 288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
 289 
 290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
 291                         unsigned int which, struct page **pages, u64 length,
 292                         u32 alignment, bool pages_from_pool, bool own_pages)
 293 {
 294         struct ceph_osd_data *osd_data;
 295 
 296         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
 297         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
 298                                 pages_from_pool, own_pages);
 299         osd_req->r_ops[which].cls.indata_len += length;
 300         osd_req->r_ops[which].indata_len += length;
 301 }
 302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
 303 
 304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
 305                                        unsigned int which,
 306                                        struct bio_vec *bvecs, u32 num_bvecs,
 307                                        u32 bytes)
 308 {
 309         struct ceph_osd_data *osd_data;
 310         struct ceph_bvec_iter it = {
 311                 .bvecs = bvecs,
 312                 .iter = { .bi_size = bytes },
 313         };
 314 
 315         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
 316         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
 317         osd_req->r_ops[which].cls.indata_len += bytes;
 318         osd_req->r_ops[which].indata_len += bytes;
 319 }
 320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
 321 
 322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
 323                         unsigned int which, struct page **pages, u64 length,
 324                         u32 alignment, bool pages_from_pool, bool own_pages)
 325 {
 326         struct ceph_osd_data *osd_data;
 327 
 328         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
 329         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
 330                                 pages_from_pool, own_pages);
 331 }
 332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
 333 
 334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
 335 {
 336         switch (osd_data->type) {
 337         case CEPH_OSD_DATA_TYPE_NONE:
 338                 return 0;
 339         case CEPH_OSD_DATA_TYPE_PAGES:
 340                 return osd_data->length;
 341         case CEPH_OSD_DATA_TYPE_PAGELIST:
 342                 return (u64)osd_data->pagelist->length;
 343 #ifdef CONFIG_BLOCK
 344         case CEPH_OSD_DATA_TYPE_BIO:
 345                 return (u64)osd_data->bio_length;
 346 #endif /* CONFIG_BLOCK */
 347         case CEPH_OSD_DATA_TYPE_BVECS:
 348                 return osd_data->bvec_pos.iter.bi_size;
 349         default:
 350                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
 351                 return 0;
 352         }
 353 }
 354 
 355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
 356 {
 357         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
 358                 int num_pages;
 359 
 360                 num_pages = calc_pages_for((u64)osd_data->alignment,
 361                                                 (u64)osd_data->length);
 362                 ceph_release_page_vector(osd_data->pages, num_pages);
 363         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
 364                 ceph_pagelist_release(osd_data->pagelist);
 365         }
 366         ceph_osd_data_init(osd_data);
 367 }
 368 
 369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
 370                         unsigned int which)
 371 {
 372         struct ceph_osd_req_op *op;
 373 
 374         BUG_ON(which >= osd_req->r_num_ops);
 375         op = &osd_req->r_ops[which];
 376 
 377         switch (op->op) {
 378         case CEPH_OSD_OP_READ:
 379         case CEPH_OSD_OP_WRITE:
 380         case CEPH_OSD_OP_WRITEFULL:
 381                 ceph_osd_data_release(&op->extent.osd_data);
 382                 break;
 383         case CEPH_OSD_OP_CALL:
 384                 ceph_osd_data_release(&op->cls.request_info);
 385                 ceph_osd_data_release(&op->cls.request_data);
 386                 ceph_osd_data_release(&op->cls.response_data);
 387                 break;
 388         case CEPH_OSD_OP_SETXATTR:
 389         case CEPH_OSD_OP_CMPXATTR:
 390                 ceph_osd_data_release(&op->xattr.osd_data);
 391                 break;
 392         case CEPH_OSD_OP_STAT:
 393                 ceph_osd_data_release(&op->raw_data_in);
 394                 break;
 395         case CEPH_OSD_OP_NOTIFY_ACK:
 396                 ceph_osd_data_release(&op->notify_ack.request_data);
 397                 break;
 398         case CEPH_OSD_OP_NOTIFY:
 399                 ceph_osd_data_release(&op->notify.request_data);
 400                 ceph_osd_data_release(&op->notify.response_data);
 401                 break;
 402         case CEPH_OSD_OP_LIST_WATCHERS:
 403                 ceph_osd_data_release(&op->list_watchers.response_data);
 404                 break;
 405         case CEPH_OSD_OP_COPY_FROM:
 406                 ceph_osd_data_release(&op->copy_from.osd_data);
 407                 break;
 408         default:
 409                 break;
 410         }
 411 }
 412 
 413 /*
 414  * Assumes @t is zero-initialized.
 415  */
 416 static void target_init(struct ceph_osd_request_target *t)
 417 {
 418         ceph_oid_init(&t->base_oid);
 419         ceph_oloc_init(&t->base_oloc);
 420         ceph_oid_init(&t->target_oid);
 421         ceph_oloc_init(&t->target_oloc);
 422 
 423         ceph_osds_init(&t->acting);
 424         ceph_osds_init(&t->up);
 425         t->size = -1;
 426         t->min_size = -1;
 427 
 428         t->osd = CEPH_HOMELESS_OSD;
 429 }
 430 
 431 static void target_copy(struct ceph_osd_request_target *dest,
 432                         const struct ceph_osd_request_target *src)
 433 {
 434         ceph_oid_copy(&dest->base_oid, &src->base_oid);
 435         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
 436         ceph_oid_copy(&dest->target_oid, &src->target_oid);
 437         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
 438 
 439         dest->pgid = src->pgid; /* struct */
 440         dest->spgid = src->spgid; /* struct */
 441         dest->pg_num = src->pg_num;
 442         dest->pg_num_mask = src->pg_num_mask;
 443         ceph_osds_copy(&dest->acting, &src->acting);
 444         ceph_osds_copy(&dest->up, &src->up);
 445         dest->size = src->size;
 446         dest->min_size = src->min_size;
 447         dest->sort_bitwise = src->sort_bitwise;
 448 
 449         dest->flags = src->flags;
 450         dest->paused = src->paused;
 451 
 452         dest->epoch = src->epoch;
 453         dest->last_force_resend = src->last_force_resend;
 454 
 455         dest->osd = src->osd;
 456 }
 457 
 458 static void target_destroy(struct ceph_osd_request_target *t)
 459 {
 460         ceph_oid_destroy(&t->base_oid);
 461         ceph_oloc_destroy(&t->base_oloc);
 462         ceph_oid_destroy(&t->target_oid);
 463         ceph_oloc_destroy(&t->target_oloc);
 464 }
 465 
 466 /*
 467  * requests
 468  */
 469 static void request_release_checks(struct ceph_osd_request *req)
 470 {
 471         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
 472         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
 473         WARN_ON(!list_empty(&req->r_private_item));
 474         WARN_ON(req->r_osd);
 475 }
 476 
 477 static void ceph_osdc_release_request(struct kref *kref)
 478 {
 479         struct ceph_osd_request *req = container_of(kref,
 480                                             struct ceph_osd_request, r_kref);
 481         unsigned int which;
 482 
 483         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
 484              req->r_request, req->r_reply);
 485         request_release_checks(req);
 486 
 487         if (req->r_request)
 488                 ceph_msg_put(req->r_request);
 489         if (req->r_reply)
 490                 ceph_msg_put(req->r_reply);
 491 
 492         for (which = 0; which < req->r_num_ops; which++)
 493                 osd_req_op_data_release(req, which);
 494 
 495         target_destroy(&req->r_t);
 496         ceph_put_snap_context(req->r_snapc);
 497 
 498         if (req->r_mempool)
 499                 mempool_free(req, req->r_osdc->req_mempool);
 500         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
 501                 kmem_cache_free(ceph_osd_request_cache, req);
 502         else
 503                 kfree(req);
 504 }
 505 
 506 void ceph_osdc_get_request(struct ceph_osd_request *req)
 507 {
 508         dout("%s %p (was %d)\n", __func__, req,
 509              kref_read(&req->r_kref));
 510         kref_get(&req->r_kref);
 511 }
 512 EXPORT_SYMBOL(ceph_osdc_get_request);
 513 
 514 void ceph_osdc_put_request(struct ceph_osd_request *req)
 515 {
 516         if (req) {
 517                 dout("%s %p (was %d)\n", __func__, req,
 518                      kref_read(&req->r_kref));
 519                 kref_put(&req->r_kref, ceph_osdc_release_request);
 520         }
 521 }
 522 EXPORT_SYMBOL(ceph_osdc_put_request);
 523 
 524 static void request_init(struct ceph_osd_request *req)
 525 {
 526         /* req only, each op is zeroed in _osd_req_op_init() */
 527         memset(req, 0, sizeof(*req));
 528 
 529         kref_init(&req->r_kref);
 530         init_completion(&req->r_completion);
 531         RB_CLEAR_NODE(&req->r_node);
 532         RB_CLEAR_NODE(&req->r_mc_node);
 533         INIT_LIST_HEAD(&req->r_private_item);
 534 
 535         target_init(&req->r_t);
 536 }
 537 
 538 /*
 539  * This is ugly, but it allows us to reuse linger registration and ping
 540  * requests, keeping the structure of the code around send_linger{_ping}()
 541  * reasonable.  Setting up a min_nr=2 mempool for each linger request
 542  * and dealing with copying ops (this blasts req only, watch op remains
 543  * intact) isn't any better.
 544  */
 545 static void request_reinit(struct ceph_osd_request *req)
 546 {
 547         struct ceph_osd_client *osdc = req->r_osdc;
 548         bool mempool = req->r_mempool;
 549         unsigned int num_ops = req->r_num_ops;
 550         u64 snapid = req->r_snapid;
 551         struct ceph_snap_context *snapc = req->r_snapc;
 552         bool linger = req->r_linger;
 553         struct ceph_msg *request_msg = req->r_request;
 554         struct ceph_msg *reply_msg = req->r_reply;
 555 
 556         dout("%s req %p\n", __func__, req);
 557         WARN_ON(kref_read(&req->r_kref) != 1);
 558         request_release_checks(req);
 559 
 560         WARN_ON(kref_read(&request_msg->kref) != 1);
 561         WARN_ON(kref_read(&reply_msg->kref) != 1);
 562         target_destroy(&req->r_t);
 563 
 564         request_init(req);
 565         req->r_osdc = osdc;
 566         req->r_mempool = mempool;
 567         req->r_num_ops = num_ops;
 568         req->r_snapid = snapid;
 569         req->r_snapc = snapc;
 570         req->r_linger = linger;
 571         req->r_request = request_msg;
 572         req->r_reply = reply_msg;
 573 }
 574 
 575 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
 576                                                struct ceph_snap_context *snapc,
 577                                                unsigned int num_ops,
 578                                                bool use_mempool,
 579                                                gfp_t gfp_flags)
 580 {
 581         struct ceph_osd_request *req;
 582 
 583         if (use_mempool) {
 584                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
 585                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
 586         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
 587                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
 588         } else {
 589                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
 590                 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
 591         }
 592         if (unlikely(!req))
 593                 return NULL;
 594 
 595         request_init(req);
 596         req->r_osdc = osdc;
 597         req->r_mempool = use_mempool;
 598         req->r_num_ops = num_ops;
 599         req->r_snapid = CEPH_NOSNAP;
 600         req->r_snapc = ceph_get_snap_context(snapc);
 601 
 602         dout("%s req %p\n", __func__, req);
 603         return req;
 604 }
 605 EXPORT_SYMBOL(ceph_osdc_alloc_request);
 606 
 607 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
 608 {
 609         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
 610 }
 611 
 612 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
 613                                       int num_request_data_items,
 614                                       int num_reply_data_items)
 615 {
 616         struct ceph_osd_client *osdc = req->r_osdc;
 617         struct ceph_msg *msg;
 618         int msg_size;
 619 
 620         WARN_ON(req->r_request || req->r_reply);
 621         WARN_ON(ceph_oid_empty(&req->r_base_oid));
 622         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
 623 
 624         /* create request message */
 625         msg_size = CEPH_ENCODING_START_BLK_LEN +
 626                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
 627         msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
 628         msg_size += CEPH_ENCODING_START_BLK_LEN +
 629                         sizeof(struct ceph_osd_reqid); /* reqid */
 630         msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
 631         msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
 632         msg_size += CEPH_ENCODING_START_BLK_LEN +
 633                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
 634         msg_size += 4 + req->r_base_oid.name_len; /* oid */
 635         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
 636         msg_size += 8; /* snapid */
 637         msg_size += 8; /* snap_seq */
 638         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
 639         msg_size += 4 + 8; /* retry_attempt, features */
 640 
 641         if (req->r_mempool)
 642                 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
 643                                        num_request_data_items);
 644         else
 645                 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
 646                                     num_request_data_items, gfp, true);
 647         if (!msg)
 648                 return -ENOMEM;
 649 
 650         memset(msg->front.iov_base, 0, msg->front.iov_len);
 651         req->r_request = msg;
 652 
 653         /* create reply message */
 654         msg_size = OSD_OPREPLY_FRONT_LEN;
 655         msg_size += req->r_base_oid.name_len;
 656         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
 657 
 658         if (req->r_mempool)
 659                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
 660                                        num_reply_data_items);
 661         else
 662                 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
 663                                     num_reply_data_items, gfp, true);
 664         if (!msg)
 665                 return -ENOMEM;
 666 
 667         req->r_reply = msg;
 668 
 669         return 0;
 670 }
 671 
 672 static bool osd_req_opcode_valid(u16 opcode)
 673 {
 674         switch (opcode) {
 675 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
 676 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
 677 #undef GENERATE_CASE
 678         default:
 679                 return false;
 680         }
 681 }
 682 
 683 static void get_num_data_items(struct ceph_osd_request *req,
 684                                int *num_request_data_items,
 685                                int *num_reply_data_items)
 686 {
 687         struct ceph_osd_req_op *op;
 688 
 689         *num_request_data_items = 0;
 690         *num_reply_data_items = 0;
 691 
 692         for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
 693                 switch (op->op) {
 694                 /* request */
 695                 case CEPH_OSD_OP_WRITE:
 696                 case CEPH_OSD_OP_WRITEFULL:
 697                 case CEPH_OSD_OP_SETXATTR:
 698                 case CEPH_OSD_OP_CMPXATTR:
 699                 case CEPH_OSD_OP_NOTIFY_ACK:
 700                 case CEPH_OSD_OP_COPY_FROM:
 701                         *num_request_data_items += 1;
 702                         break;
 703 
 704                 /* reply */
 705                 case CEPH_OSD_OP_STAT:
 706                 case CEPH_OSD_OP_READ:
 707                 case CEPH_OSD_OP_LIST_WATCHERS:
 708                         *num_reply_data_items += 1;
 709                         break;
 710 
 711                 /* both */
 712                 case CEPH_OSD_OP_NOTIFY:
 713                         *num_request_data_items += 1;
 714                         *num_reply_data_items += 1;
 715                         break;
 716                 case CEPH_OSD_OP_CALL:
 717                         *num_request_data_items += 2;
 718                         *num_reply_data_items += 1;
 719                         break;
 720 
 721                 default:
 722                         WARN_ON(!osd_req_opcode_valid(op->op));
 723                         break;
 724                 }
 725         }
 726 }
 727 
 728 /*
 729  * oid, oloc and OSD op opcode(s) must be filled in before this function
 730  * is called.
 731  */
 732 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
 733 {
 734         int num_request_data_items, num_reply_data_items;
 735 
 736         get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
 737         return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
 738                                           num_reply_data_items);
 739 }
 740 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
 741 
 742 /*
 743  * This is an osd op init function for opcodes that have no data or
 744  * other information associated with them.  It also serves as a
 745  * common init routine for all the other init functions, below.
 746  */
 747 static struct ceph_osd_req_op *
 748 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
 749                  u16 opcode, u32 flags)
 750 {
 751         struct ceph_osd_req_op *op;
 752 
 753         BUG_ON(which >= osd_req->r_num_ops);
 754         BUG_ON(!osd_req_opcode_valid(opcode));
 755 
 756         op = &osd_req->r_ops[which];
 757         memset(op, 0, sizeof (*op));
 758         op->op = opcode;
 759         op->flags = flags;
 760 
 761         return op;
 762 }
 763 
 764 void osd_req_op_init(struct ceph_osd_request *osd_req,
 765                      unsigned int which, u16 opcode, u32 flags)
 766 {
 767         (void)_osd_req_op_init(osd_req, which, opcode, flags);
 768 }
 769 EXPORT_SYMBOL(osd_req_op_init);
 770 
 771 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
 772                                 unsigned int which, u16 opcode,
 773                                 u64 offset, u64 length,
 774                                 u64 truncate_size, u32 truncate_seq)
 775 {
 776         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
 777                                                       opcode, 0);
 778         size_t payload_len = 0;
 779 
 780         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
 781                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
 782                opcode != CEPH_OSD_OP_TRUNCATE);
 783 
 784         op->extent.offset = offset;
 785         op->extent.length = length;
 786         op->extent.truncate_size = truncate_size;
 787         op->extent.truncate_seq = truncate_seq;
 788         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
 789                 payload_len += length;
 790 
 791         op->indata_len = payload_len;
 792 }
 793 EXPORT_SYMBOL(osd_req_op_extent_init);
 794 
 795 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
 796                                 unsigned int which, u64 length)
 797 {
 798         struct ceph_osd_req_op *op;
 799         u64 previous;
 800 
 801         BUG_ON(which >= osd_req->r_num_ops);
 802         op = &osd_req->r_ops[which];
 803         previous = op->extent.length;
 804 
 805         if (length == previous)
 806                 return;         /* Nothing to do */
 807         BUG_ON(length > previous);
 808 
 809         op->extent.length = length;
 810         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
 811                 op->indata_len -= previous - length;
 812 }
 813 EXPORT_SYMBOL(osd_req_op_extent_update);
 814 
 815 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
 816                                 unsigned int which, u64 offset_inc)
 817 {
 818         struct ceph_osd_req_op *op, *prev_op;
 819 
 820         BUG_ON(which + 1 >= osd_req->r_num_ops);
 821 
 822         prev_op = &osd_req->r_ops[which];
 823         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
 824         /* dup previous one */
 825         op->indata_len = prev_op->indata_len;
 826         op->outdata_len = prev_op->outdata_len;
 827         op->extent = prev_op->extent;
 828         /* adjust offset */
 829         op->extent.offset += offset_inc;
 830         op->extent.length -= offset_inc;
 831 
 832         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
 833                 op->indata_len -= offset_inc;
 834 }
 835 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
 836 
 837 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
 838                         const char *class, const char *method)
 839 {
 840         struct ceph_osd_req_op *op;
 841         struct ceph_pagelist *pagelist;
 842         size_t payload_len = 0;
 843         size_t size;
 844         int ret;
 845 
 846         op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
 847 
 848         pagelist = ceph_pagelist_alloc(GFP_NOFS);
 849         if (!pagelist)
 850                 return -ENOMEM;
 851 
 852         op->cls.class_name = class;
 853         size = strlen(class);
 854         BUG_ON(size > (size_t) U8_MAX);
 855         op->cls.class_len = size;
 856         ret = ceph_pagelist_append(pagelist, class, size);
 857         if (ret)
 858                 goto err_pagelist_free;
 859         payload_len += size;
 860 
 861         op->cls.method_name = method;
 862         size = strlen(method);
 863         BUG_ON(size > (size_t) U8_MAX);
 864         op->cls.method_len = size;
 865         ret = ceph_pagelist_append(pagelist, method, size);
 866         if (ret)
 867                 goto err_pagelist_free;
 868         payload_len += size;
 869 
 870         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
 871         op->indata_len = payload_len;
 872         return 0;
 873 
 874 err_pagelist_free:
 875         ceph_pagelist_release(pagelist);
 876         return ret;
 877 }
 878 EXPORT_SYMBOL(osd_req_op_cls_init);
 879 
 880 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
 881                           u16 opcode, const char *name, const void *value,
 882                           size_t size, u8 cmp_op, u8 cmp_mode)
 883 {
 884         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
 885                                                       opcode, 0);
 886         struct ceph_pagelist *pagelist;
 887         size_t payload_len;
 888         int ret;
 889 
 890         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
 891 
 892         pagelist = ceph_pagelist_alloc(GFP_NOFS);
 893         if (!pagelist)
 894                 return -ENOMEM;
 895 
 896         payload_len = strlen(name);
 897         op->xattr.name_len = payload_len;
 898         ret = ceph_pagelist_append(pagelist, name, payload_len);
 899         if (ret)
 900                 goto err_pagelist_free;
 901 
 902         op->xattr.value_len = size;
 903         ret = ceph_pagelist_append(pagelist, value, size);
 904         if (ret)
 905                 goto err_pagelist_free;
 906         payload_len += size;
 907 
 908         op->xattr.cmp_op = cmp_op;
 909         op->xattr.cmp_mode = cmp_mode;
 910 
 911         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
 912         op->indata_len = payload_len;
 913         return 0;
 914 
 915 err_pagelist_free:
 916         ceph_pagelist_release(pagelist);
 917         return ret;
 918 }
 919 EXPORT_SYMBOL(osd_req_op_xattr_init);
 920 
 921 /*
 922  * @watch_opcode: CEPH_OSD_WATCH_OP_*
 923  */
 924 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
 925                                   u64 cookie, u8 watch_opcode)
 926 {
 927         struct ceph_osd_req_op *op;
 928 
 929         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
 930         op->watch.cookie = cookie;
 931         op->watch.op = watch_opcode;
 932         op->watch.gen = 0;
 933 }
 934 
 935 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
 936                                 unsigned int which,
 937                                 u64 expected_object_size,
 938                                 u64 expected_write_size)
 939 {
 940         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
 941                                                       CEPH_OSD_OP_SETALLOCHINT,
 942                                                       0);
 943 
 944         op->alloc_hint.expected_object_size = expected_object_size;
 945         op->alloc_hint.expected_write_size = expected_write_size;
 946 
 947         /*
 948          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
 949          * not worth a feature bit.  Set FAILOK per-op flag to make
 950          * sure older osds don't trip over an unsupported opcode.
 951          */
 952         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
 953 }
 954 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
 955 
 956 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
 957                                 struct ceph_osd_data *osd_data)
 958 {
 959         u64 length = ceph_osd_data_length(osd_data);
 960 
 961         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
 962                 BUG_ON(length > (u64) SIZE_MAX);
 963                 if (length)
 964                         ceph_msg_data_add_pages(msg, osd_data->pages,
 965                                         length, osd_data->alignment, false);
 966         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
 967                 BUG_ON(!length);
 968                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
 969 #ifdef CONFIG_BLOCK
 970         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
 971                 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
 972 #endif
 973         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
 974                 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
 975         } else {
 976                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
 977         }
 978 }
 979 
 980 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
 981                              const struct ceph_osd_req_op *src)
 982 {
 983         switch (src->op) {
 984         case CEPH_OSD_OP_STAT:
 985                 break;
 986         case CEPH_OSD_OP_READ:
 987         case CEPH_OSD_OP_WRITE:
 988         case CEPH_OSD_OP_WRITEFULL:
 989         case CEPH_OSD_OP_ZERO:
 990         case CEPH_OSD_OP_TRUNCATE:
 991                 dst->extent.offset = cpu_to_le64(src->extent.offset);
 992                 dst->extent.length = cpu_to_le64(src->extent.length);
 993                 dst->extent.truncate_size =
 994                         cpu_to_le64(src->extent.truncate_size);
 995                 dst->extent.truncate_seq =
 996                         cpu_to_le32(src->extent.truncate_seq);
 997                 break;
 998         case CEPH_OSD_OP_CALL:
 999                 dst->cls.class_len = src->cls.class_len;
1000                 dst->cls.method_len = src->cls.method_len;
1001                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
1002                 break;
1003         case CEPH_OSD_OP_WATCH:
1004                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
1005                 dst->watch.ver = cpu_to_le64(0);
1006                 dst->watch.op = src->watch.op;
1007                 dst->watch.gen = cpu_to_le32(src->watch.gen);
1008                 break;
1009         case CEPH_OSD_OP_NOTIFY_ACK:
1010                 break;
1011         case CEPH_OSD_OP_NOTIFY:
1012                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
1013                 break;
1014         case CEPH_OSD_OP_LIST_WATCHERS:
1015                 break;
1016         case CEPH_OSD_OP_SETALLOCHINT:
1017                 dst->alloc_hint.expected_object_size =
1018                     cpu_to_le64(src->alloc_hint.expected_object_size);
1019                 dst->alloc_hint.expected_write_size =
1020                     cpu_to_le64(src->alloc_hint.expected_write_size);
1021                 break;
1022         case CEPH_OSD_OP_SETXATTR:
1023         case CEPH_OSD_OP_CMPXATTR:
1024                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
1025                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
1026                 dst->xattr.cmp_op = src->xattr.cmp_op;
1027                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
1028                 break;
1029         case CEPH_OSD_OP_CREATE:
1030         case CEPH_OSD_OP_DELETE:
1031                 break;
1032         case CEPH_OSD_OP_COPY_FROM:
1033                 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
1034                 dst->copy_from.src_version =
1035                         cpu_to_le64(src->copy_from.src_version);
1036                 dst->copy_from.flags = src->copy_from.flags;
1037                 dst->copy_from.src_fadvise_flags =
1038                         cpu_to_le32(src->copy_from.src_fadvise_flags);
1039                 break;
1040         default:
1041                 pr_err("unsupported osd opcode %s\n",
1042                         ceph_osd_op_name(src->op));
1043                 WARN_ON(1);
1044 
1045                 return 0;
1046         }
1047 
1048         dst->op = cpu_to_le16(src->op);
1049         dst->flags = cpu_to_le32(src->flags);
1050         dst->payload_len = cpu_to_le32(src->indata_len);
1051 
1052         return src->indata_len;
1053 }
1054 
1055 /*
1056  * build new request AND message, calculate layout, and adjust file
1057  * extent as needed.
1058  *
1059  * if the file was recently truncated, we include information about its
1060  * old and new size so that the object can be updated appropriately.  (we
1061  * avoid synchronously deleting truncated objects because it's slow.)
1062  */
1063 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
1064                                                struct ceph_file_layout *layout,
1065                                                struct ceph_vino vino,
1066                                                u64 off, u64 *plen,
1067                                                unsigned int which, int num_ops,
1068                                                int opcode, int flags,
1069                                                struct ceph_snap_context *snapc,
1070                                                u32 truncate_seq,
1071                                                u64 truncate_size,
1072                                                bool use_mempool)
1073 {
1074         struct ceph_osd_request *req;
1075         u64 objnum = 0;
1076         u64 objoff = 0;
1077         u64 objlen = 0;
1078         int r;
1079 
1080         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
1081                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
1082                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
1083 
1084         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1085                                         GFP_NOFS);
1086         if (!req) {
1087                 r = -ENOMEM;
1088                 goto fail;
1089         }
1090 
1091         /* calculate max write size */
1092         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
1093         if (r)
1094                 goto fail;
1095 
1096         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
1097                 osd_req_op_init(req, which, opcode, 0);
1098         } else {
1099                 u32 object_size = layout->object_size;
1100                 u32 object_base = off - objoff;
1101                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
1102                         if (truncate_size <= object_base) {
1103                                 truncate_size = 0;
1104                         } else {
1105                                 truncate_size -= object_base;
1106                                 if (truncate_size > object_size)
1107                                         truncate_size = object_size;
1108                         }
1109                 }
1110                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
1111                                        truncate_size, truncate_seq);
1112         }
1113 
1114         req->r_flags = flags;
1115         req->r_base_oloc.pool = layout->pool_id;
1116         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
1117         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
1118 
1119         req->r_snapid = vino.snap;
1120         if (flags & CEPH_OSD_FLAG_WRITE)
1121                 req->r_data_offset = off;
1122 
1123         if (num_ops > 1)
1124                 /*
1125                  * This is a special case for ceph_writepages_start(), but it
1126                  * also covers ceph_uninline_data().  If more multi-op request
1127                  * use cases emerge, we will need a separate helper.
1128                  */
1129                 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
1130         else
1131                 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
1132         if (r)
1133                 goto fail;
1134 
1135         return req;
1136 
1137 fail:
1138         ceph_osdc_put_request(req);
1139         return ERR_PTR(r);
1140 }
1141 EXPORT_SYMBOL(ceph_osdc_new_request);
1142 
1143 /*
1144  * We keep osd requests in an rbtree, sorted by ->r_tid.
1145  */
1146 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1147 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1148 
1149 /*
1150  * Call @fn on each OSD request as long as @fn returns 0.
1151  */
1152 static void for_each_request(struct ceph_osd_client *osdc,
1153                         int (*fn)(struct ceph_osd_request *req, void *arg),
1154                         void *arg)
1155 {
1156         struct rb_node *n, *p;
1157 
1158         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1159                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1160 
1161                 for (p = rb_first(&osd->o_requests); p; ) {
1162                         struct ceph_osd_request *req =
1163                             rb_entry(p, struct ceph_osd_request, r_node);
1164 
1165                         p = rb_next(p);
1166                         if (fn(req, arg))
1167                                 return;
1168                 }
1169         }
1170 
1171         for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1172                 struct ceph_osd_request *req =
1173                     rb_entry(p, struct ceph_osd_request, r_node);
1174 
1175                 p = rb_next(p);
1176                 if (fn(req, arg))
1177                         return;
1178         }
1179 }
1180 
1181 static bool osd_homeless(struct ceph_osd *osd)
1182 {
1183         return osd->o_osd == CEPH_HOMELESS_OSD;
1184 }
1185 
1186 static bool osd_registered(struct ceph_osd *osd)
1187 {
1188         verify_osdc_locked(osd->o_osdc);
1189 
1190         return !RB_EMPTY_NODE(&osd->o_node);
1191 }
1192 
1193 /*
1194  * Assumes @osd is zero-initialized.
1195  */
1196 static void osd_init(struct ceph_osd *osd)
1197 {
1198         refcount_set(&osd->o_ref, 1);
1199         RB_CLEAR_NODE(&osd->o_node);
1200         osd->o_requests = RB_ROOT;
1201         osd->o_linger_requests = RB_ROOT;
1202         osd->o_backoff_mappings = RB_ROOT;
1203         osd->o_backoffs_by_id = RB_ROOT;
1204         INIT_LIST_HEAD(&osd->o_osd_lru);
1205         INIT_LIST_HEAD(&osd->o_keepalive_item);
1206         osd->o_incarnation = 1;
1207         mutex_init(&osd->lock);
1208 }
1209 
1210 static void osd_cleanup(struct ceph_osd *osd)
1211 {
1212         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1213         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1214         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1215         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1216         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1217         WARN_ON(!list_empty(&osd->o_osd_lru));
1218         WARN_ON(!list_empty(&osd->o_keepalive_item));
1219 
1220         if (osd->o_auth.authorizer) {
1221                 WARN_ON(osd_homeless(osd));
1222                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1223         }
1224 }
1225 
1226 /*
1227  * Track open sessions with osds.
1228  */
1229 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1230 {
1231         struct ceph_osd *osd;
1232 
1233         WARN_ON(onum == CEPH_HOMELESS_OSD);
1234 
1235         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1236         osd_init(osd);
1237         osd->o_osdc = osdc;
1238         osd->o_osd = onum;
1239 
1240         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1241 
1242         return osd;
1243 }
1244 
1245 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1246 {
1247         if (refcount_inc_not_zero(&osd->o_ref)) {
1248                 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1249                      refcount_read(&osd->o_ref));
1250                 return osd;
1251         } else {
1252                 dout("get_osd %p FAIL\n", osd);
1253                 return NULL;
1254         }
1255 }
1256 
1257 static void put_osd(struct ceph_osd *osd)
1258 {
1259         dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1260              refcount_read(&osd->o_ref) - 1);
1261         if (refcount_dec_and_test(&osd->o_ref)) {
1262                 osd_cleanup(osd);
1263                 kfree(osd);
1264         }
1265 }
1266 
1267 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1268 
1269 static void __move_osd_to_lru(struct ceph_osd *osd)
1270 {
1271         struct ceph_osd_client *osdc = osd->o_osdc;
1272 
1273         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1274         BUG_ON(!list_empty(&osd->o_osd_lru));
1275 
1276         spin_lock(&osdc->osd_lru_lock);
1277         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1278         spin_unlock(&osdc->osd_lru_lock);
1279 
1280         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1281 }
1282 
1283 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1284 {
1285         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1286             RB_EMPTY_ROOT(&osd->o_linger_requests))
1287                 __move_osd_to_lru(osd);
1288 }
1289 
1290 static void __remove_osd_from_lru(struct ceph_osd *osd)
1291 {
1292         struct ceph_osd_client *osdc = osd->o_osdc;
1293 
1294         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1295 
1296         spin_lock(&osdc->osd_lru_lock);
1297         if (!list_empty(&osd->o_osd_lru))
1298                 list_del_init(&osd->o_osd_lru);
1299         spin_unlock(&osdc->osd_lru_lock);
1300 }
1301 
1302 /*
1303  * Close the connection and assign any leftover requests to the
1304  * homeless session.
1305  */
1306 static void close_osd(struct ceph_osd *osd)
1307 {
1308         struct ceph_osd_client *osdc = osd->o_osdc;
1309         struct rb_node *n;
1310 
1311         verify_osdc_wrlocked(osdc);
1312         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1313 
1314         ceph_con_close(&osd->o_con);
1315 
1316         for (n = rb_first(&osd->o_requests); n; ) {
1317                 struct ceph_osd_request *req =
1318                     rb_entry(n, struct ceph_osd_request, r_node);
1319 
1320                 n = rb_next(n); /* unlink_request() */
1321 
1322                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1323                 unlink_request(osd, req);
1324                 link_request(&osdc->homeless_osd, req);
1325         }
1326         for (n = rb_first(&osd->o_linger_requests); n; ) {
1327                 struct ceph_osd_linger_request *lreq =
1328                     rb_entry(n, struct ceph_osd_linger_request, node);
1329 
1330                 n = rb_next(n); /* unlink_linger() */
1331 
1332                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1333                      lreq->linger_id);
1334                 unlink_linger(osd, lreq);
1335                 link_linger(&osdc->homeless_osd, lreq);
1336         }
1337         clear_backoffs(osd);
1338 
1339         __remove_osd_from_lru(osd);
1340         erase_osd(&osdc->osds, osd);
1341         put_osd(osd);
1342 }
1343 
1344 /*
1345  * reset osd connect
1346  */
1347 static int reopen_osd(struct ceph_osd *osd)
1348 {
1349         struct ceph_entity_addr *peer_addr;
1350 
1351         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1352 
1353         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1354             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1355                 close_osd(osd);
1356                 return -ENODEV;
1357         }
1358 
1359         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1360         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1361                         !ceph_con_opened(&osd->o_con)) {
1362                 struct rb_node *n;
1363 
1364                 dout("osd addr hasn't changed and connection never opened, "
1365                      "letting msgr retry\n");
1366                 /* touch each r_stamp for handle_timeout()'s benfit */
1367                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1368                         struct ceph_osd_request *req =
1369                             rb_entry(n, struct ceph_osd_request, r_node);
1370                         req->r_stamp = jiffies;
1371                 }
1372 
1373                 return -EAGAIN;
1374         }
1375 
1376         ceph_con_close(&osd->o_con);
1377         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1378         osd->o_incarnation++;
1379 
1380         return 0;
1381 }
1382 
1383 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1384                                           bool wrlocked)
1385 {
1386         struct ceph_osd *osd;
1387 
1388         if (wrlocked)
1389                 verify_osdc_wrlocked(osdc);
1390         else
1391                 verify_osdc_locked(osdc);
1392 
1393         if (o != CEPH_HOMELESS_OSD)
1394                 osd = lookup_osd(&osdc->osds, o);
1395         else
1396                 osd = &osdc->homeless_osd;
1397         if (!osd) {
1398                 if (!wrlocked)
1399                         return ERR_PTR(-EAGAIN);
1400 
1401                 osd = create_osd(osdc, o);
1402                 insert_osd(&osdc->osds, osd);
1403                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1404                               &osdc->osdmap->osd_addr[osd->o_osd]);
1405         }
1406 
1407         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1408         return osd;
1409 }
1410 
1411 /*
1412  * Create request <-> OSD session relation.
1413  *
1414  * @req has to be assigned a tid, @osd may be homeless.
1415  */
1416 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1417 {
1418         verify_osd_locked(osd);
1419         WARN_ON(!req->r_tid || req->r_osd);
1420         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1421              req, req->r_tid);
1422 
1423         if (!osd_homeless(osd))
1424                 __remove_osd_from_lru(osd);
1425         else
1426                 atomic_inc(&osd->o_osdc->num_homeless);
1427 
1428         get_osd(osd);
1429         insert_request(&osd->o_requests, req);
1430         req->r_osd = osd;
1431 }
1432 
1433 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1434 {
1435         verify_osd_locked(osd);
1436         WARN_ON(req->r_osd != osd);
1437         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1438              req, req->r_tid);
1439 
1440         req->r_osd = NULL;
1441         erase_request(&osd->o_requests, req);
1442         put_osd(osd);
1443 
1444         if (!osd_homeless(osd))
1445                 maybe_move_osd_to_lru(osd);
1446         else
1447                 atomic_dec(&osd->o_osdc->num_homeless);
1448 }
1449 
1450 static bool __pool_full(struct ceph_pg_pool_info *pi)
1451 {
1452         return pi->flags & CEPH_POOL_FLAG_FULL;
1453 }
1454 
1455 static bool have_pool_full(struct ceph_osd_client *osdc)
1456 {
1457         struct rb_node *n;
1458 
1459         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1460                 struct ceph_pg_pool_info *pi =
1461                     rb_entry(n, struct ceph_pg_pool_info, node);
1462 
1463                 if (__pool_full(pi))
1464                         return true;
1465         }
1466 
1467         return false;
1468 }
1469 
1470 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1471 {
1472         struct ceph_pg_pool_info *pi;
1473 
1474         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1475         if (!pi)
1476                 return false;
1477 
1478         return __pool_full(pi);
1479 }
1480 
1481 /*
1482  * Returns whether a request should be blocked from being sent
1483  * based on the current osdmap and osd_client settings.
1484  */
1485 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1486                                     const struct ceph_osd_request_target *t,
1487                                     struct ceph_pg_pool_info *pi)
1488 {
1489         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1490         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1491                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1492                        __pool_full(pi);
1493 
1494         WARN_ON(pi->id != t->target_oloc.pool);
1495         return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1496                ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1497                (osdc->osdmap->epoch < osdc->epoch_barrier);
1498 }
1499 
1500 enum calc_target_result {
1501         CALC_TARGET_NO_ACTION = 0,
1502         CALC_TARGET_NEED_RESEND,
1503         CALC_TARGET_POOL_DNE,
1504 };
1505 
1506 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1507                                            struct ceph_osd_request_target *t,
1508                                            bool any_change)
1509 {
1510         struct ceph_pg_pool_info *pi;
1511         struct ceph_pg pgid, last_pgid;
1512         struct ceph_osds up, acting;
1513         bool force_resend = false;
1514         bool unpaused = false;
1515         bool legacy_change = false;
1516         bool split = false;
1517         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1518         bool recovery_deletes = ceph_osdmap_flag(osdc,
1519                                                  CEPH_OSDMAP_RECOVERY_DELETES);
1520         enum calc_target_result ct_res;
1521 
1522         t->epoch = osdc->osdmap->epoch;
1523         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1524         if (!pi) {
1525                 t->osd = CEPH_HOMELESS_OSD;
1526                 ct_res = CALC_TARGET_POOL_DNE;
1527                 goto out;
1528         }
1529 
1530         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1531                 if (t->last_force_resend < pi->last_force_request_resend) {
1532                         t->last_force_resend = pi->last_force_request_resend;
1533                         force_resend = true;
1534                 } else if (t->last_force_resend == 0) {
1535                         force_resend = true;
1536                 }
1537         }
1538 
1539         /* apply tiering */
1540         ceph_oid_copy(&t->target_oid, &t->base_oid);
1541         ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1542         if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1543                 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1544                         t->target_oloc.pool = pi->read_tier;
1545                 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1546                         t->target_oloc.pool = pi->write_tier;
1547 
1548                 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1549                 if (!pi) {
1550                         t->osd = CEPH_HOMELESS_OSD;
1551                         ct_res = CALC_TARGET_POOL_DNE;
1552                         goto out;
1553                 }
1554         }
1555 
1556         __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
1557         last_pgid.pool = pgid.pool;
1558         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1559 
1560         ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1561         if (any_change &&
1562             ceph_is_new_interval(&t->acting,
1563                                  &acting,
1564                                  &t->up,
1565                                  &up,
1566                                  t->size,
1567                                  pi->size,
1568                                  t->min_size,
1569                                  pi->min_size,
1570                                  t->pg_num,
1571                                  pi->pg_num,
1572                                  t->sort_bitwise,
1573                                  sort_bitwise,
1574                                  t->recovery_deletes,
1575                                  recovery_deletes,
1576                                  &last_pgid))
1577                 force_resend = true;
1578 
1579         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1580                 t->paused = false;
1581                 unpaused = true;
1582         }
1583         legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1584                         ceph_osds_changed(&t->acting, &acting, any_change);
1585         if (t->pg_num)
1586                 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1587 
1588         if (legacy_change || force_resend || split) {
1589                 t->pgid = pgid; /* struct */
1590                 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1591                 ceph_osds_copy(&t->acting, &acting);
1592                 ceph_osds_copy(&t->up, &up);
1593                 t->size = pi->size;
1594                 t->min_size = pi->min_size;
1595                 t->pg_num = pi->pg_num;
1596                 t->pg_num_mask = pi->pg_num_mask;
1597                 t->sort_bitwise = sort_bitwise;
1598                 t->recovery_deletes = recovery_deletes;
1599 
1600                 t->osd = acting.primary;
1601         }
1602 
1603         if (unpaused || legacy_change || force_resend || split)
1604                 ct_res = CALC_TARGET_NEED_RESEND;
1605         else
1606                 ct_res = CALC_TARGET_NO_ACTION;
1607 
1608 out:
1609         dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1610              legacy_change, force_resend, split, ct_res, t->osd);
1611         return ct_res;
1612 }
1613 
1614 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1615 {
1616         struct ceph_spg_mapping *spg;
1617 
1618         spg = kmalloc(sizeof(*spg), GFP_NOIO);
1619         if (!spg)
1620                 return NULL;
1621 
1622         RB_CLEAR_NODE(&spg->node);
1623         spg->backoffs = RB_ROOT;
1624         return spg;
1625 }
1626 
1627 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1628 {
1629         WARN_ON(!RB_EMPTY_NODE(&spg->node));
1630         WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1631 
1632         kfree(spg);
1633 }
1634 
1635 /*
1636  * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1637  * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
1638  * defined only within a specific spgid; it does not pass anything to
1639  * children on split, or to another primary.
1640  */
1641 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1642                  RB_BYPTR, const struct ceph_spg *, node)
1643 
1644 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1645 {
1646         return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1647 }
1648 
1649 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1650                                    void **pkey, size_t *pkey_len)
1651 {
1652         if (hoid->key_len) {
1653                 *pkey = hoid->key;
1654                 *pkey_len = hoid->key_len;
1655         } else {
1656                 *pkey = hoid->oid;
1657                 *pkey_len = hoid->oid_len;
1658         }
1659 }
1660 
1661 static int compare_names(const void *name1, size_t name1_len,
1662                          const void *name2, size_t name2_len)
1663 {
1664         int ret;
1665 
1666         ret = memcmp(name1, name2, min(name1_len, name2_len));
1667         if (!ret) {
1668                 if (name1_len < name2_len)
1669                         ret = -1;
1670                 else if (name1_len > name2_len)
1671                         ret = 1;
1672         }
1673         return ret;
1674 }
1675 
1676 static int hoid_compare(const struct ceph_hobject_id *lhs,
1677                         const struct ceph_hobject_id *rhs)
1678 {
1679         void *effective_key1, *effective_key2;
1680         size_t effective_key1_len, effective_key2_len;
1681         int ret;
1682 
1683         if (lhs->is_max < rhs->is_max)
1684                 return -1;
1685         if (lhs->is_max > rhs->is_max)
1686                 return 1;
1687 
1688         if (lhs->pool < rhs->pool)
1689                 return -1;
1690         if (lhs->pool > rhs->pool)
1691                 return 1;
1692 
1693         if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1694                 return -1;
1695         if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1696                 return 1;
1697 
1698         ret = compare_names(lhs->nspace, lhs->nspace_len,
1699                             rhs->nspace, rhs->nspace_len);
1700         if (ret)
1701                 return ret;
1702 
1703         hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1704         hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1705         ret = compare_names(effective_key1, effective_key1_len,
1706                             effective_key2, effective_key2_len);
1707         if (ret)
1708                 return ret;
1709 
1710         ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1711         if (ret)
1712                 return ret;
1713 
1714         if (lhs->snapid < rhs->snapid)
1715                 return -1;
1716         if (lhs->snapid > rhs->snapid)
1717                 return 1;
1718 
1719         return 0;
1720 }
1721 
1722 /*
1723  * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1724  * compat stuff here.
1725  *
1726  * Assumes @hoid is zero-initialized.
1727  */
1728 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1729 {
1730         u8 struct_v;
1731         u32 struct_len;
1732         int ret;
1733 
1734         ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1735                                   &struct_len);
1736         if (ret)
1737                 return ret;
1738 
1739         if (struct_v < 4) {
1740                 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1741                 goto e_inval;
1742         }
1743 
1744         hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1745                                                 GFP_NOIO);
1746         if (IS_ERR(hoid->key)) {
1747                 ret = PTR_ERR(hoid->key);
1748                 hoid->key = NULL;
1749                 return ret;
1750         }
1751 
1752         hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1753                                                 GFP_NOIO);
1754         if (IS_ERR(hoid->oid)) {
1755                 ret = PTR_ERR(hoid->oid);
1756                 hoid->oid = NULL;
1757                 return ret;
1758         }
1759 
1760         ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1761         ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1762         ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1763 
1764         hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1765                                                    GFP_NOIO);
1766         if (IS_ERR(hoid->nspace)) {
1767                 ret = PTR_ERR(hoid->nspace);
1768                 hoid->nspace = NULL;
1769                 return ret;
1770         }
1771 
1772         ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1773 
1774         ceph_hoid_build_hash_cache(hoid);
1775         return 0;
1776 
1777 e_inval:
1778         return -EINVAL;
1779 }
1780 
1781 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1782 {
1783         return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1784                4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1785 }
1786 
1787 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1788 {
1789         ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1790         ceph_encode_string(p, end, hoid->key, hoid->key_len);
1791         ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1792         ceph_encode_64(p, hoid->snapid);
1793         ceph_encode_32(p, hoid->hash);
1794         ceph_encode_8(p, hoid->is_max);
1795         ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1796         ceph_encode_64(p, hoid->pool);
1797 }
1798 
1799 static void free_hoid(struct ceph_hobject_id *hoid)
1800 {
1801         if (hoid) {
1802                 kfree(hoid->key);
1803                 kfree(hoid->oid);
1804                 kfree(hoid->nspace);
1805                 kfree(hoid);
1806         }
1807 }
1808 
1809 static struct ceph_osd_backoff *alloc_backoff(void)
1810 {
1811         struct ceph_osd_backoff *backoff;
1812 
1813         backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1814         if (!backoff)
1815                 return NULL;
1816 
1817         RB_CLEAR_NODE(&backoff->spg_node);
1818         RB_CLEAR_NODE(&backoff->id_node);
1819         return backoff;
1820 }
1821 
1822 static void free_backoff(struct ceph_osd_backoff *backoff)
1823 {
1824         WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1825         WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1826 
1827         free_hoid(backoff->begin);
1828         free_hoid(backoff->end);
1829         kfree(backoff);
1830 }
1831 
1832 /*
1833  * Within a specific spgid, backoffs are managed by ->begin hoid.
1834  */
1835 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1836                         RB_BYVAL, spg_node);
1837 
1838 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1839                                             const struct ceph_hobject_id *hoid)
1840 {
1841         struct rb_node *n = root->rb_node;
1842 
1843         while (n) {
1844                 struct ceph_osd_backoff *cur =
1845                     rb_entry(n, struct ceph_osd_backoff, spg_node);
1846                 int cmp;
1847 
1848                 cmp = hoid_compare(hoid, cur->begin);
1849                 if (cmp < 0) {
1850                         n = n->rb_left;
1851                 } else if (cmp > 0) {
1852                         if (hoid_compare(hoid, cur->end) < 0)
1853                                 return cur;
1854 
1855                         n = n->rb_right;
1856                 } else {
1857                         return cur;
1858                 }
1859         }
1860 
1861         return NULL;
1862 }
1863 
1864 /*
1865  * Each backoff has a unique id within its OSD session.
1866  */
1867 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1868 
1869 static void clear_backoffs(struct ceph_osd *osd)
1870 {
1871         while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1872                 struct ceph_spg_mapping *spg =
1873                     rb_entry(rb_first(&osd->o_backoff_mappings),
1874                              struct ceph_spg_mapping, node);
1875 
1876                 while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1877                         struct ceph_osd_backoff *backoff =
1878                             rb_entry(rb_first(&spg->backoffs),
1879                                      struct ceph_osd_backoff, spg_node);
1880 
1881                         erase_backoff(&spg->backoffs, backoff);
1882                         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1883                         free_backoff(backoff);
1884                 }
1885                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
1886                 free_spg_mapping(spg);
1887         }
1888 }
1889 
1890 /*
1891  * Set up a temporary, non-owning view into @t.
1892  */
1893 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1894                                   const struct ceph_osd_request_target *t)
1895 {
1896         hoid->key = NULL;
1897         hoid->key_len = 0;
1898         hoid->oid = t->target_oid.name;
1899         hoid->oid_len = t->target_oid.name_len;
1900         hoid->snapid = CEPH_NOSNAP;
1901         hoid->hash = t->pgid.seed;
1902         hoid->is_max = false;
1903         if (t->target_oloc.pool_ns) {
1904                 hoid->nspace = t->target_oloc.pool_ns->str;
1905                 hoid->nspace_len = t->target_oloc.pool_ns->len;
1906         } else {
1907                 hoid->nspace = NULL;
1908                 hoid->nspace_len = 0;
1909         }
1910         hoid->pool = t->target_oloc.pool;
1911         ceph_hoid_build_hash_cache(hoid);
1912 }
1913 
1914 static bool should_plug_request(struct ceph_osd_request *req)
1915 {
1916         struct ceph_osd *osd = req->r_osd;
1917         struct ceph_spg_mapping *spg;
1918         struct ceph_osd_backoff *backoff;
1919         struct ceph_hobject_id hoid;
1920 
1921         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1922         if (!spg)
1923                 return false;
1924 
1925         hoid_fill_from_target(&hoid, &req->r_t);
1926         backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1927         if (!backoff)
1928                 return false;
1929 
1930         dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1931              __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1932              backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1933         return true;
1934 }
1935 
1936 /*
1937  * Keep get_num_data_items() in sync with this function.
1938  */
1939 static void setup_request_data(struct ceph_osd_request *req)
1940 {
1941         struct ceph_msg *request_msg = req->r_request;
1942         struct ceph_msg *reply_msg = req->r_reply;
1943         struct ceph_osd_req_op *op;
1944 
1945         if (req->r_request->num_data_items || req->r_reply->num_data_items)
1946                 return;
1947 
1948         WARN_ON(request_msg->data_length || reply_msg->data_length);
1949         for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
1950                 switch (op->op) {
1951                 /* request */
1952                 case CEPH_OSD_OP_WRITE:
1953                 case CEPH_OSD_OP_WRITEFULL:
1954                         WARN_ON(op->indata_len != op->extent.length);
1955                         ceph_osdc_msg_data_add(request_msg,
1956                                                &op->extent.osd_data);
1957                         break;
1958                 case CEPH_OSD_OP_SETXATTR:
1959                 case CEPH_OSD_OP_CMPXATTR:
1960                         WARN_ON(op->indata_len != op->xattr.name_len +
1961                                                   op->xattr.value_len);
1962                         ceph_osdc_msg_data_add(request_msg,
1963                                                &op->xattr.osd_data);
1964                         break;
1965                 case CEPH_OSD_OP_NOTIFY_ACK:
1966                         ceph_osdc_msg_data_add(request_msg,
1967                                                &op->notify_ack.request_data);
1968                         break;
1969                 case CEPH_OSD_OP_COPY_FROM:
1970                         ceph_osdc_msg_data_add(request_msg,
1971                                                &op->copy_from.osd_data);
1972                         break;
1973 
1974                 /* reply */
1975                 case CEPH_OSD_OP_STAT:
1976                         ceph_osdc_msg_data_add(reply_msg,
1977                                                &op->raw_data_in);
1978                         break;
1979                 case CEPH_OSD_OP_READ:
1980                         ceph_osdc_msg_data_add(reply_msg,
1981                                                &op->extent.osd_data);
1982                         break;
1983                 case CEPH_OSD_OP_LIST_WATCHERS:
1984                         ceph_osdc_msg_data_add(reply_msg,
1985                                                &op->list_watchers.response_data);
1986                         break;
1987 
1988                 /* both */
1989                 case CEPH_OSD_OP_CALL:
1990                         WARN_ON(op->indata_len != op->cls.class_len +
1991                                                   op->cls.method_len +
1992                                                   op->cls.indata_len);
1993                         ceph_osdc_msg_data_add(request_msg,
1994                                                &op->cls.request_info);
1995                         /* optional, can be NONE */
1996                         ceph_osdc_msg_data_add(request_msg,
1997                                                &op->cls.request_data);
1998                         /* optional, can be NONE */
1999                         ceph_osdc_msg_data_add(reply_msg,
2000                                                &op->cls.response_data);
2001                         break;
2002                 case CEPH_OSD_OP_NOTIFY:
2003                         ceph_osdc_msg_data_add(request_msg,
2004                                                &op->notify.request_data);
2005                         ceph_osdc_msg_data_add(reply_msg,
2006                                                &op->notify.response_data);
2007                         break;
2008                 }
2009         }
2010 }
2011 
2012 static void encode_pgid(void **p, const struct ceph_pg *pgid)
2013 {
2014         ceph_encode_8(p, 1);
2015         ceph_encode_64(p, pgid->pool);
2016         ceph_encode_32(p, pgid->seed);
2017         ceph_encode_32(p, -1); /* preferred */
2018 }
2019 
2020 static void encode_spgid(void **p, const struct ceph_spg *spgid)
2021 {
2022         ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
2023         encode_pgid(p, &spgid->pgid);
2024         ceph_encode_8(p, spgid->shard);
2025 }
2026 
2027 static void encode_oloc(void **p, void *end,
2028                         const struct ceph_object_locator *oloc)
2029 {
2030         ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
2031         ceph_encode_64(p, oloc->pool);
2032         ceph_encode_32(p, -1); /* preferred */
2033         ceph_encode_32(p, 0);  /* key len */
2034         if (oloc->pool_ns)
2035                 ceph_encode_string(p, end, oloc->pool_ns->str,
2036                                    oloc->pool_ns->len);
2037         else
2038                 ceph_encode_32(p, 0);
2039 }
2040 
2041 static void encode_request_partial(struct ceph_osd_request *req,
2042                                    struct ceph_msg *msg)
2043 {
2044         void *p = msg->front.iov_base;
2045         void *const end = p + msg->front_alloc_len;
2046         u32 data_len = 0;
2047         int i;
2048 
2049         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
2050                 /* snapshots aren't writeable */
2051                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
2052         } else {
2053                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
2054                         req->r_data_offset || req->r_snapc);
2055         }
2056 
2057         setup_request_data(req);
2058 
2059         encode_spgid(&p, &req->r_t.spgid); /* actual spg */
2060         ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
2061         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
2062         ceph_encode_32(&p, req->r_flags);
2063 
2064         /* reqid */
2065         ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
2066         memset(p, 0, sizeof(struct ceph_osd_reqid));
2067         p += sizeof(struct ceph_osd_reqid);
2068 
2069         /* trace */
2070         memset(p, 0, sizeof(struct ceph_blkin_trace_info));
2071         p += sizeof(struct ceph_blkin_trace_info);
2072 
2073         ceph_encode_32(&p, 0); /* client_inc, always 0 */
2074         ceph_encode_timespec64(p, &req->r_mtime);
2075         p += sizeof(struct ceph_timespec);
2076 
2077         encode_oloc(&p, end, &req->r_t.target_oloc);
2078         ceph_encode_string(&p, end, req->r_t.target_oid.name,
2079                            req->r_t.target_oid.name_len);
2080 
2081         /* ops, can imply data */
2082         ceph_encode_16(&p, req->r_num_ops);
2083         for (i = 0; i < req->r_num_ops; i++) {
2084                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
2085                 p += sizeof(struct ceph_osd_op);
2086         }
2087 
2088         ceph_encode_64(&p, req->r_snapid); /* snapid */
2089         if (req->r_snapc) {
2090                 ceph_encode_64(&p, req->r_snapc->seq);
2091                 ceph_encode_32(&p, req->r_snapc->num_snaps);
2092                 for (i = 0; i < req->r_snapc->num_snaps; i++)
2093                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
2094         } else {
2095                 ceph_encode_64(&p, 0); /* snap_seq */
2096                 ceph_encode_32(&p, 0); /* snaps len */
2097         }
2098 
2099         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
2100         BUG_ON(p > end - 8); /* space for features */
2101 
2102         msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
2103         /* front_len is finalized in encode_request_finish() */
2104         msg->front.iov_len = p - msg->front.iov_base;
2105         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2106         msg->hdr.data_len = cpu_to_le32(data_len);
2107         /*
2108          * The header "data_off" is a hint to the receiver allowing it
2109          * to align received data into its buffers such that there's no
2110          * need to re-copy it before writing it to disk (direct I/O).
2111          */
2112         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
2113 
2114         dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
2115              req->r_t.target_oid.name, req->r_t.target_oid.name_len);
2116 }
2117 
2118 static void encode_request_finish(struct ceph_msg *msg)
2119 {
2120         void *p = msg->front.iov_base;
2121         void *const partial_end = p + msg->front.iov_len;
2122         void *const end = p + msg->front_alloc_len;
2123 
2124         if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
2125                 /* luminous OSD -- encode features and be done */
2126                 p = partial_end;
2127                 ceph_encode_64(&p, msg->con->peer_features);
2128         } else {
2129                 struct {
2130                         char spgid[CEPH_ENCODING_START_BLK_LEN +
2131                                    CEPH_PGID_ENCODING_LEN + 1];
2132                         __le32 hash;
2133                         __le32 epoch;
2134                         __le32 flags;
2135                         char reqid[CEPH_ENCODING_START_BLK_LEN +
2136                                    sizeof(struct ceph_osd_reqid)];
2137                         char trace[sizeof(struct ceph_blkin_trace_info)];
2138                         __le32 client_inc;
2139                         struct ceph_timespec mtime;
2140                 } __packed head;
2141                 struct ceph_pg pgid;
2142                 void *oloc, *oid, *tail;
2143                 int oloc_len, oid_len, tail_len;
2144                 int len;
2145 
2146                 /*
2147                  * Pre-luminous OSD -- reencode v8 into v4 using @head
2148                  * as a temporary buffer.  Encode the raw PG; the rest
2149                  * is just a matter of moving oloc, oid and tail blobs
2150                  * around.
2151                  */
2152                 memcpy(&head, p, sizeof(head));
2153                 p += sizeof(head);
2154 
2155                 oloc = p;
2156                 p += CEPH_ENCODING_START_BLK_LEN;
2157                 pgid.pool = ceph_decode_64(&p);
2158                 p += 4 + 4; /* preferred, key len */
2159                 len = ceph_decode_32(&p);
2160                 p += len;   /* nspace */
2161                 oloc_len = p - oloc;
2162 
2163                 oid = p;
2164                 len = ceph_decode_32(&p);
2165                 p += len;
2166                 oid_len = p - oid;
2167 
2168                 tail = p;
2169                 tail_len = partial_end - p;
2170 
2171                 p = msg->front.iov_base;
2172                 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
2173                 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2174                 ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2175                 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2176 
2177                 /* reassert_version */
2178                 memset(p, 0, sizeof(struct ceph_eversion));
2179                 p += sizeof(struct ceph_eversion);
2180 
2181                 BUG_ON(p >= oloc);
2182                 memmove(p, oloc, oloc_len);
2183                 p += oloc_len;
2184 
2185                 pgid.seed = le32_to_cpu(head.hash);
2186                 encode_pgid(&p, &pgid); /* raw pg */
2187 
2188                 BUG_ON(p >= oid);
2189                 memmove(p, oid, oid_len);
2190                 p += oid_len;
2191 
2192                 /* tail -- ops, snapid, snapc, retry_attempt */
2193                 BUG_ON(p >= tail);
2194                 memmove(p, tail, tail_len);
2195                 p += tail_len;
2196 
2197                 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2198         }
2199 
2200         BUG_ON(p > end);
2201         msg->front.iov_len = p - msg->front.iov_base;
2202         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2203 
2204         dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2205              le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2206              le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2207              le16_to_cpu(msg->hdr.version));
2208 }
2209 
2210 /*
2211  * @req has to be assigned a tid and registered.
2212  */
2213 static void send_request(struct ceph_osd_request *req)
2214 {
2215         struct ceph_osd *osd = req->r_osd;
2216 
2217         verify_osd_locked(osd);
2218         WARN_ON(osd->o_osd != req->r_t.osd);
2219 
2220         /* backoff? */
2221         if (should_plug_request(req))
2222                 return;
2223 
2224         /*
2225          * We may have a previously queued request message hanging
2226          * around.  Cancel it to avoid corrupting the msgr.
2227          */
2228         if (req->r_sent)
2229                 ceph_msg_revoke(req->r_request);
2230 
2231         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2232         if (req->r_attempts)
2233                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2234         else
2235                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2236 
2237         encode_request_partial(req, req->r_request);
2238 
2239         dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2240              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2241              req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2242              req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2243              req->r_attempts);
2244 
2245         req->r_t.paused = false;
2246         req->r_stamp = jiffies;
2247         req->r_attempts++;
2248 
2249         req->r_sent = osd->o_incarnation;
2250         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2251         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2252 }
2253 
2254 static void maybe_request_map(struct ceph_osd_client *osdc)
2255 {
2256         bool continuous = false;
2257 
2258         verify_osdc_locked(osdc);
2259         WARN_ON(!osdc->osdmap->epoch);
2260 
2261         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2262             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2263             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2264                 dout("%s osdc %p continuous\n", __func__, osdc);
2265                 continuous = true;
2266         } else {
2267                 dout("%s osdc %p onetime\n", __func__, osdc);
2268         }
2269 
2270         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2271                                osdc->osdmap->epoch + 1, continuous))
2272                 ceph_monc_renew_subs(&osdc->client->monc);
2273 }
2274 
2275 static void complete_request(struct ceph_osd_request *req, int err);
2276 static void send_map_check(struct ceph_osd_request *req);
2277 
2278 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2279 {
2280         struct ceph_osd_client *osdc = req->r_osdc;
2281         struct ceph_osd *osd;
2282         enum calc_target_result ct_res;
2283         int err = 0;
2284         bool need_send = false;
2285         bool promoted = false;
2286 
2287         WARN_ON(req->r_tid);
2288         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2289 
2290 again:
2291         ct_res = calc_target(osdc, &req->r_t, false);
2292         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2293                 goto promote;
2294 
2295         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2296         if (IS_ERR(osd)) {
2297                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2298                 goto promote;
2299         }
2300 
2301         if (osdc->abort_err) {
2302                 dout("req %p abort_err %d\n", req, osdc->abort_err);
2303                 err = osdc->abort_err;
2304         } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2305                 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2306                      osdc->epoch_barrier);
2307                 req->r_t.paused = true;
2308                 maybe_request_map(osdc);
2309         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2310                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2311                 dout("req %p pausewr\n", req);
2312                 req->r_t.paused = true;
2313                 maybe_request_map(osdc);
2314         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2315                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2316                 dout("req %p pauserd\n", req);
2317                 req->r_t.paused = true;
2318                 maybe_request_map(osdc);
2319         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2320                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2321                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
2322                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2323                     pool_full(osdc, req->r_t.base_oloc.pool))) {
2324                 dout("req %p full/pool_full\n", req);
2325                 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2326                         err = -ENOSPC;
2327                 } else {
2328                         pr_warn_ratelimited("FULL or reached pool quota\n");
2329                         req->r_t.paused = true;
2330                         maybe_request_map(osdc);
2331                 }
2332         } else if (!osd_homeless(osd)) {
2333                 need_send = true;
2334         } else {
2335                 maybe_request_map(osdc);
2336         }
2337 
2338         mutex_lock(&osd->lock);
2339         /*
2340          * Assign the tid atomically with send_request() to protect
2341          * multiple writes to the same object from racing with each
2342          * other, resulting in out of order ops on the OSDs.
2343          */
2344         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2345         link_request(osd, req);
2346         if (need_send)
2347                 send_request(req);
2348         else if (err)
2349                 complete_request(req, err);
2350         mutex_unlock(&osd->lock);
2351 
2352         if (!err && ct_res == CALC_TARGET_POOL_DNE)
2353                 send_map_check(req);
2354 
2355         if (promoted)
2356                 downgrade_write(&osdc->lock);
2357         return;
2358 
2359 promote:
2360         up_read(&osdc->lock);
2361         down_write(&osdc->lock);
2362         wrlocked = true;
2363         promoted = true;
2364         goto again;
2365 }
2366 
2367 static void account_request(struct ceph_osd_request *req)
2368 {
2369         WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2370         WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2371 
2372         req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2373         atomic_inc(&req->r_osdc->num_requests);
2374 
2375         req->r_start_stamp = jiffies;
2376 }
2377 
2378 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2379 {
2380         ceph_osdc_get_request(req);
2381         account_request(req);
2382         __submit_request(req, wrlocked);
2383 }
2384 
2385 static void finish_request(struct ceph_osd_request *req)
2386 {
2387         struct ceph_osd_client *osdc = req->r_osdc;
2388 
2389         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2390         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2391 
2392         if (req->r_osd)
2393                 unlink_request(req->r_osd, req);
2394         atomic_dec(&osdc->num_requests);
2395 
2396         /*
2397          * If an OSD has failed or returned and a request has been sent
2398          * twice, it's possible to get a reply and end up here while the
2399          * request message is queued for delivery.  We will ignore the
2400          * reply, so not a big deal, but better to try and catch it.
2401          */
2402         ceph_msg_revoke(req->r_request);
2403         ceph_msg_revoke_incoming(req->r_reply);
2404 }
2405 
2406 static void __complete_request(struct ceph_osd_request *req)
2407 {
2408         dout("%s req %p tid %llu cb %ps result %d\n", __func__, req,
2409              req->r_tid, req->r_callback, req->r_result);
2410 
2411         if (req->r_callback)
2412                 req->r_callback(req);
2413         complete_all(&req->r_completion);
2414         ceph_osdc_put_request(req);
2415 }
2416 
2417 static void complete_request_workfn(struct work_struct *work)
2418 {
2419         struct ceph_osd_request *req =
2420             container_of(work, struct ceph_osd_request, r_complete_work);
2421 
2422         __complete_request(req);
2423 }
2424 
2425 /*
2426  * This is open-coded in handle_reply().
2427  */
2428 static void complete_request(struct ceph_osd_request *req, int err)
2429 {
2430         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2431 
2432         req->r_result = err;
2433         finish_request(req);
2434 
2435         INIT_WORK(&req->r_complete_work, complete_request_workfn);
2436         queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
2437 }
2438 
2439 static void cancel_map_check(struct ceph_osd_request *req)
2440 {
2441         struct ceph_osd_client *osdc = req->r_osdc;
2442         struct ceph_osd_request *lookup_req;
2443 
2444         verify_osdc_wrlocked(osdc);
2445 
2446         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2447         if (!lookup_req)
2448                 return;
2449 
2450         WARN_ON(lookup_req != req);
2451         erase_request_mc(&osdc->map_checks, req);
2452         ceph_osdc_put_request(req);
2453 }
2454 
2455 static void cancel_request(struct ceph_osd_request *req)
2456 {
2457         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2458 
2459         cancel_map_check(req);
2460         finish_request(req);
2461         complete_all(&req->r_completion);
2462         ceph_osdc_put_request(req);
2463 }
2464 
2465 static void abort_request(struct ceph_osd_request *req, int err)
2466 {
2467         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2468 
2469         cancel_map_check(req);
2470         complete_request(req, err);
2471 }
2472 
2473 static int abort_fn(struct ceph_osd_request *req, void *arg)
2474 {
2475         int err = *(int *)arg;
2476 
2477         abort_request(req, err);
2478         return 0; /* continue iteration */
2479 }
2480 
2481 /*
2482  * Abort all in-flight requests with @err and arrange for all future
2483  * requests to be failed immediately.
2484  */
2485 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2486 {
2487         dout("%s osdc %p err %d\n", __func__, osdc, err);
2488         down_write(&osdc->lock);
2489         for_each_request(osdc, abort_fn, &err);
2490         osdc->abort_err = err;
2491         up_write(&osdc->lock);
2492 }
2493 EXPORT_SYMBOL(ceph_osdc_abort_requests);
2494 
2495 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
2496 {
2497         down_write(&osdc->lock);
2498         osdc->abort_err = 0;
2499         up_write(&osdc->lock);
2500 }
2501 EXPORT_SYMBOL(ceph_osdc_clear_abort_err);
2502 
2503 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2504 {
2505         if (likely(eb > osdc->epoch_barrier)) {
2506                 dout("updating epoch_barrier from %u to %u\n",
2507                                 osdc->epoch_barrier, eb);
2508                 osdc->epoch_barrier = eb;
2509                 /* Request map if we're not to the barrier yet */
2510                 if (eb > osdc->osdmap->epoch)
2511                         maybe_request_map(osdc);
2512         }
2513 }
2514 
2515 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2516 {
2517         down_read(&osdc->lock);
2518         if (unlikely(eb > osdc->epoch_barrier)) {
2519                 up_read(&osdc->lock);
2520                 down_write(&osdc->lock);
2521                 update_epoch_barrier(osdc, eb);
2522                 up_write(&osdc->lock);
2523         } else {
2524                 up_read(&osdc->lock);
2525         }
2526 }
2527 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2528 
2529 /*
2530  * We can end up releasing caps as a result of abort_request().
2531  * In that case, we probably want to ensure that the cap release message
2532  * has an updated epoch barrier in it, so set the epoch barrier prior to
2533  * aborting the first request.
2534  */
2535 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
2536 {
2537         struct ceph_osd_client *osdc = req->r_osdc;
2538         bool *victims = arg;
2539 
2540         if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2541             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2542              pool_full(osdc, req->r_t.base_oloc.pool))) {
2543                 if (!*victims) {
2544                         update_epoch_barrier(osdc, osdc->osdmap->epoch);
2545                         *victims = true;
2546                 }
2547                 abort_request(req, -ENOSPC);
2548         }
2549 
2550         return 0; /* continue iteration */
2551 }
2552 
2553 /*
2554  * Drop all pending requests that are stalled waiting on a full condition to
2555  * clear, and complete them with ENOSPC as the return code. Set the
2556  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2557  * cancelled.
2558  */
2559 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2560 {
2561         bool victims = false;
2562 
2563         if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2564             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2565                 for_each_request(osdc, abort_on_full_fn, &victims);
2566 }
2567 
2568 static void check_pool_dne(struct ceph_osd_request *req)
2569 {
2570         struct ceph_osd_client *osdc = req->r_osdc;
2571         struct ceph_osdmap *map = osdc->osdmap;
2572 
2573         verify_osdc_wrlocked(osdc);
2574         WARN_ON(!map->epoch);
2575 
2576         if (req->r_attempts) {
2577                 /*
2578                  * We sent a request earlier, which means that
2579                  * previously the pool existed, and now it does not
2580                  * (i.e., it was deleted).
2581                  */
2582                 req->r_map_dne_bound = map->epoch;
2583                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2584                      req->r_tid);
2585         } else {
2586                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2587                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
2588         }
2589 
2590         if (req->r_map_dne_bound) {
2591                 if (map->epoch >= req->r_map_dne_bound) {
2592                         /* we had a new enough map */
2593                         pr_info_ratelimited("tid %llu pool does not exist\n",
2594                                             req->r_tid);
2595                         complete_request(req, -ENOENT);
2596                 }
2597         } else {
2598                 send_map_check(req);
2599         }
2600 }
2601 
2602 static void map_check_cb(struct ceph_mon_generic_request *greq)
2603 {
2604         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2605         struct ceph_osd_request *req;
2606         u64 tid = greq->private_data;
2607 
2608         WARN_ON(greq->result || !greq->u.newest);
2609 
2610         down_write(&osdc->lock);
2611         req = lookup_request_mc(&osdc->map_checks, tid);
2612         if (!req) {
2613                 dout("%s tid %llu dne\n", __func__, tid);
2614                 goto out_unlock;
2615         }
2616 
2617         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2618              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2619         if (!req->r_map_dne_bound)
2620                 req->r_map_dne_bound = greq->u.newest;
2621         erase_request_mc(&osdc->map_checks, req);
2622         check_pool_dne(req);
2623 
2624         ceph_osdc_put_request(req);
2625 out_unlock:
2626         up_write(&osdc->lock);
2627 }
2628 
2629 static void send_map_check(struct ceph_osd_request *req)
2630 {
2631         struct ceph_osd_client *osdc = req->r_osdc;
2632         struct ceph_osd_request *lookup_req;
2633         int ret;
2634 
2635         verify_osdc_wrlocked(osdc);
2636 
2637         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2638         if (lookup_req) {
2639                 WARN_ON(lookup_req != req);
2640                 return;
2641         }
2642 
2643         ceph_osdc_get_request(req);
2644         insert_request_mc(&osdc->map_checks, req);
2645         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2646                                           map_check_cb, req->r_tid);
2647         WARN_ON(ret);
2648 }
2649 
2650 /*
2651  * lingering requests, watch/notify v2 infrastructure
2652  */
2653 static void linger_release(struct kref *kref)
2654 {
2655         struct ceph_osd_linger_request *lreq =
2656             container_of(kref, struct ceph_osd_linger_request, kref);
2657 
2658         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2659              lreq->reg_req, lreq->ping_req);
2660         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2661         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2662         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2663         WARN_ON(!list_empty(&lreq->scan_item));
2664         WARN_ON(!list_empty(&lreq->pending_lworks));
2665         WARN_ON(lreq->osd);
2666 
2667         if (lreq->reg_req)
2668                 ceph_osdc_put_request(lreq->reg_req);
2669         if (lreq->ping_req)
2670                 ceph_osdc_put_request(lreq->ping_req);
2671         target_destroy(&lreq->t);
2672         kfree(lreq);
2673 }
2674 
2675 static void linger_put(struct ceph_osd_linger_request *lreq)
2676 {
2677         if (lreq)
2678                 kref_put(&lreq->kref, linger_release);
2679 }
2680 
2681 static struct ceph_osd_linger_request *
2682 linger_get(struct ceph_osd_linger_request *lreq)
2683 {
2684         kref_get(&lreq->kref);
2685         return lreq;
2686 }
2687 
2688 static struct ceph_osd_linger_request *
2689 linger_alloc(struct ceph_osd_client *osdc)
2690 {
2691         struct ceph_osd_linger_request *lreq;
2692 
2693         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2694         if (!lreq)
2695                 return NULL;
2696 
2697         kref_init(&lreq->kref);
2698         mutex_init(&lreq->lock);
2699         RB_CLEAR_NODE(&lreq->node);
2700         RB_CLEAR_NODE(&lreq->osdc_node);
2701         RB_CLEAR_NODE(&lreq->mc_node);
2702         INIT_LIST_HEAD(&lreq->scan_item);
2703         INIT_LIST_HEAD(&lreq->pending_lworks);
2704         init_completion(&lreq->reg_commit_wait);
2705         init_completion(&lreq->notify_finish_wait);
2706 
2707         lreq->osdc = osdc;
2708         target_init(&lreq->t);
2709 
2710         dout("%s lreq %p\n", __func__, lreq);
2711         return lreq;
2712 }
2713 
2714 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2715 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2716 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2717 
2718 /*
2719  * Create linger request <-> OSD session relation.
2720  *
2721  * @lreq has to be registered, @osd may be homeless.
2722  */
2723 static void link_linger(struct ceph_osd *osd,
2724                         struct ceph_osd_linger_request *lreq)
2725 {
2726         verify_osd_locked(osd);
2727         WARN_ON(!lreq->linger_id || lreq->osd);
2728         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2729              osd->o_osd, lreq, lreq->linger_id);
2730 
2731         if (!osd_homeless(osd))
2732                 __remove_osd_from_lru(osd);
2733         else
2734                 atomic_inc(&osd->o_osdc->num_homeless);
2735 
2736         get_osd(osd);
2737         insert_linger(&osd->o_linger_requests, lreq);
2738         lreq->osd = osd;
2739 }
2740 
2741 static void unlink_linger(struct ceph_osd *osd,
2742                           struct ceph_osd_linger_request *lreq)
2743 {
2744         verify_osd_locked(osd);
2745         WARN_ON(lreq->osd != osd);
2746         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2747              osd->o_osd, lreq, lreq->linger_id);
2748 
2749         lreq->osd = NULL;
2750         erase_linger(&osd->o_linger_requests, lreq);
2751         put_osd(osd);
2752 
2753         if (!osd_homeless(osd))
2754                 maybe_move_osd_to_lru(osd);
2755         else
2756                 atomic_dec(&osd->o_osdc->num_homeless);
2757 }
2758 
2759 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2760 {
2761         verify_osdc_locked(lreq->osdc);
2762 
2763         return !RB_EMPTY_NODE(&lreq->osdc_node);
2764 }
2765 
2766 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2767 {
2768         struct ceph_osd_client *osdc = lreq->osdc;
2769         bool registered;
2770 
2771         down_read(&osdc->lock);
2772         registered = __linger_registered(lreq);
2773         up_read(&osdc->lock);
2774 
2775         return registered;
2776 }
2777 
2778 static void linger_register(struct ceph_osd_linger_request *lreq)
2779 {
2780         struct ceph_osd_client *osdc = lreq->osdc;
2781 
2782         verify_osdc_wrlocked(osdc);
2783         WARN_ON(lreq->linger_id);
2784 
2785         linger_get(lreq);
2786         lreq->linger_id = ++osdc->last_linger_id;
2787         insert_linger_osdc(&osdc->linger_requests, lreq);
2788 }
2789 
2790 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2791 {
2792         struct ceph_osd_client *osdc = lreq->osdc;
2793 
2794         verify_osdc_wrlocked(osdc);
2795 
2796         erase_linger_osdc(&osdc->linger_requests, lreq);
2797         linger_put(lreq);
2798 }
2799 
2800 static void cancel_linger_request(struct ceph_osd_request *req)
2801 {
2802         struct ceph_osd_linger_request *lreq = req->r_priv;
2803 
2804         WARN_ON(!req->r_linger);
2805         cancel_request(req);
2806         linger_put(lreq);
2807 }
2808 
2809 struct linger_work {
2810         struct work_struct work;
2811         struct ceph_osd_linger_request *lreq;
2812         struct list_head pending_item;
2813         unsigned long queued_stamp;
2814 
2815         union {
2816                 struct {
2817                         u64 notify_id;
2818                         u64 notifier_id;
2819                         void *payload; /* points into @msg front */
2820                         size_t payload_len;
2821 
2822                         struct ceph_msg *msg; /* for ceph_msg_put() */
2823                 } notify;
2824                 struct {
2825                         int err;
2826                 } error;
2827         };
2828 };
2829 
2830 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2831                                        work_func_t workfn)
2832 {
2833         struct linger_work *lwork;
2834 
2835         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2836         if (!lwork)
2837                 return NULL;
2838 
2839         INIT_WORK(&lwork->work, workfn);
2840         INIT_LIST_HEAD(&lwork->pending_item);
2841         lwork->lreq = linger_get(lreq);
2842 
2843         return lwork;
2844 }
2845 
2846 static void lwork_free(struct linger_work *lwork)
2847 {
2848         struct ceph_osd_linger_request *lreq = lwork->lreq;
2849 
2850         mutex_lock(&lreq->lock);
2851         list_del(&lwork->pending_item);
2852         mutex_unlock(&lreq->lock);
2853 
2854         linger_put(lreq);
2855         kfree(lwork);
2856 }
2857 
2858 static void lwork_queue(struct linger_work *lwork)
2859 {
2860         struct ceph_osd_linger_request *lreq = lwork->lreq;
2861         struct ceph_osd_client *osdc = lreq->osdc;
2862 
2863         verify_lreq_locked(lreq);
2864         WARN_ON(!list_empty(&lwork->pending_item));
2865 
2866         lwork->queued_stamp = jiffies;
2867         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2868         queue_work(osdc->notify_wq, &lwork->work);
2869 }
2870 
2871 static void do_watch_notify(struct work_struct *w)
2872 {
2873         struct linger_work *lwork = container_of(w, struct linger_work, work);
2874         struct ceph_osd_linger_request *lreq = lwork->lreq;
2875 
2876         if (!linger_registered(lreq)) {
2877                 dout("%s lreq %p not registered\n", __func__, lreq);
2878                 goto out;
2879         }
2880 
2881         WARN_ON(!lreq->is_watch);
2882         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2883              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2884              lwork->notify.payload_len);
2885         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2886                   lwork->notify.notifier_id, lwork->notify.payload,
2887                   lwork->notify.payload_len);
2888 
2889 out:
2890         ceph_msg_put(lwork->notify.msg);
2891         lwork_free(lwork);
2892 }
2893 
2894 static void do_watch_error(struct work_struct *w)
2895 {
2896         struct linger_work *lwork = container_of(w, struct linger_work, work);
2897         struct ceph_osd_linger_request *lreq = lwork->lreq;
2898 
2899         if (!linger_registered(lreq)) {
2900                 dout("%s lreq %p not registered\n", __func__, lreq);
2901                 goto out;
2902         }
2903 
2904         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2905         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2906 
2907 out:
2908         lwork_free(lwork);
2909 }
2910 
2911 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2912 {
2913         struct linger_work *lwork;
2914 
2915         lwork = lwork_alloc(lreq, do_watch_error);
2916         if (!lwork) {
2917                 pr_err("failed to allocate error-lwork\n");
2918                 return;
2919         }
2920 
2921         lwork->error.err = lreq->last_error;
2922         lwork_queue(lwork);
2923 }
2924 
2925 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2926                                        int result)
2927 {
2928         if (!completion_done(&lreq->reg_commit_wait)) {
2929                 lreq->reg_commit_error = (result <= 0 ? result : 0);
2930                 complete_all(&lreq->reg_commit_wait);
2931         }
2932 }
2933 
2934 static void linger_commit_cb(struct ceph_osd_request *req)
2935 {
2936         struct ceph_osd_linger_request *lreq = req->r_priv;
2937 
2938         mutex_lock(&lreq->lock);
2939         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2940              lreq->linger_id, req->r_result);
2941         linger_reg_commit_complete(lreq, req->r_result);
2942         lreq->committed = true;
2943 
2944         if (!lreq->is_watch) {
2945                 struct ceph_osd_data *osd_data =
2946                     osd_req_op_data(req, 0, notify, response_data);
2947                 void *p = page_address(osd_data->pages[0]);
2948 
2949                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2950                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2951 
2952                 /* make note of the notify_id */
2953                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2954                         lreq->notify_id = ceph_decode_64(&p);
2955                         dout("lreq %p notify_id %llu\n", lreq,
2956                              lreq->notify_id);
2957                 } else {
2958                         dout("lreq %p no notify_id\n", lreq);
2959                 }
2960         }
2961 
2962         mutex_unlock(&lreq->lock);
2963         linger_put(lreq);
2964 }
2965 
2966 static int normalize_watch_error(int err)
2967 {
2968         /*
2969          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2970          * notification and a failure to reconnect because we raced with
2971          * the delete appear the same to the user.
2972          */
2973         if (err == -ENOENT)
2974                 err = -ENOTCONN;
2975 
2976         return err;
2977 }
2978 
2979 static void linger_reconnect_cb(struct ceph_osd_request *req)
2980 {
2981         struct ceph_osd_linger_request *lreq = req->r_priv;
2982 
2983         mutex_lock(&lreq->lock);
2984         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2985              lreq, lreq->linger_id, req->r_result, lreq->last_error);
2986         if (req->r_result < 0) {
2987                 if (!lreq->last_error) {
2988                         lreq->last_error = normalize_watch_error(req->r_result);
2989                         queue_watch_error(lreq);
2990                 }
2991         }
2992 
2993         mutex_unlock(&lreq->lock);
2994         linger_put(lreq);
2995 }
2996 
2997 static void send_linger(struct ceph_osd_linger_request *lreq)
2998 {
2999         struct ceph_osd_request *req = lreq->reg_req;
3000         struct ceph_osd_req_op *op = &req->r_ops[0];
3001 
3002         verify_osdc_wrlocked(req->r_osdc);
3003         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3004 
3005         if (req->r_osd)
3006                 cancel_linger_request(req);
3007 
3008         request_reinit(req);
3009         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3010         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3011         req->r_flags = lreq->t.flags;
3012         req->r_mtime = lreq->mtime;
3013 
3014         mutex_lock(&lreq->lock);
3015         if (lreq->is_watch && lreq->committed) {
3016                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3017                         op->watch.cookie != lreq->linger_id);
3018                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
3019                 op->watch.gen = ++lreq->register_gen;
3020                 dout("lreq %p reconnect register_gen %u\n", lreq,
3021                      op->watch.gen);
3022                 req->r_callback = linger_reconnect_cb;
3023         } else {
3024                 if (!lreq->is_watch)
3025                         lreq->notify_id = 0;
3026                 else
3027                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
3028                 dout("lreq %p register\n", lreq);
3029                 req->r_callback = linger_commit_cb;
3030         }
3031         mutex_unlock(&lreq->lock);
3032 
3033         req->r_priv = linger_get(lreq);
3034         req->r_linger = true;
3035 
3036         submit_request(req, true);
3037 }
3038 
3039 static void linger_ping_cb(struct ceph_osd_request *req)
3040 {
3041         struct ceph_osd_linger_request *lreq = req->r_priv;
3042 
3043         mutex_lock(&lreq->lock);
3044         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
3045              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
3046              lreq->last_error);
3047         if (lreq->register_gen == req->r_ops[0].watch.gen) {
3048                 if (!req->r_result) {
3049                         lreq->watch_valid_thru = lreq->ping_sent;
3050                 } else if (!lreq->last_error) {
3051                         lreq->last_error = normalize_watch_error(req->r_result);
3052                         queue_watch_error(lreq);
3053                 }
3054         } else {
3055                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
3056                      lreq->register_gen, req->r_ops[0].watch.gen);
3057         }
3058 
3059         mutex_unlock(&lreq->lock);
3060         linger_put(lreq);
3061 }
3062 
3063 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
3064 {
3065         struct ceph_osd_client *osdc = lreq->osdc;
3066         struct ceph_osd_request *req = lreq->ping_req;
3067         struct ceph_osd_req_op *op = &req->r_ops[0];
3068 
3069         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
3070                 dout("%s PAUSERD\n", __func__);
3071                 return;
3072         }
3073 
3074         lreq->ping_sent = jiffies;
3075         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
3076              __func__, lreq, lreq->linger_id, lreq->ping_sent,
3077              lreq->register_gen);
3078 
3079         if (req->r_osd)
3080                 cancel_linger_request(req);
3081 
3082         request_reinit(req);
3083         target_copy(&req->r_t, &lreq->t);
3084 
3085         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3086                 op->watch.cookie != lreq->linger_id ||
3087                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
3088         op->watch.gen = lreq->register_gen;
3089         req->r_callback = linger_ping_cb;
3090         req->r_priv = linger_get(lreq);
3091         req->r_linger = true;
3092 
3093         ceph_osdc_get_request(req);
3094         account_request(req);
3095         req->r_tid = atomic64_inc_return(&osdc->last_tid);
3096         link_request(lreq->osd, req);
3097         send_request(req);
3098 }
3099 
3100 static void linger_submit(struct ceph_osd_linger_request *lreq)
3101 {
3102         struct ceph_osd_client *osdc = lreq->osdc;
3103         struct ceph_osd *osd;
3104 
3105         down_write(&osdc->lock);
3106         linger_register(lreq);
3107         if (lreq->is_watch) {
3108                 lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
3109                 lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
3110         } else {
3111                 lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
3112         }
3113 
3114         calc_target(osdc, &lreq->t, false);
3115         osd = lookup_create_osd(osdc, lreq->t.osd, true);
3116         link_linger(osd, lreq);
3117 
3118         send_linger(lreq);
3119         up_write(&osdc->lock);
3120 }
3121 
3122 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
3123 {
3124         struct ceph_osd_client *osdc = lreq->osdc;
3125         struct ceph_osd_linger_request *lookup_lreq;
3126 
3127         verify_osdc_wrlocked(osdc);
3128 
3129         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3130                                        lreq->linger_id);
3131         if (!lookup_lreq)
3132                 return;
3133 
3134         WARN_ON(lookup_lreq != lreq);
3135         erase_linger_mc(&osdc->linger_map_checks, lreq);
3136         linger_put(lreq);
3137 }
3138 
3139 /*
3140  * @lreq has to be both registered and linked.
3141  */
3142 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
3143 {
3144         if (lreq->is_watch && lreq->ping_req->r_osd)
3145                 cancel_linger_request(lreq->ping_req);
3146         if (lreq->reg_req->r_osd)
3147                 cancel_linger_request(lreq->reg_req);
3148         cancel_linger_map_check(lreq);
3149         unlink_linger(lreq->osd, lreq);
3150         linger_unregister(lreq);
3151 }
3152 
3153 static void linger_cancel(struct ceph_osd_linger_request *lreq)
3154 {
3155         struct ceph_osd_client *osdc = lreq->osdc;
3156 
3157         down_write(&osdc->lock);
3158         if (__linger_registered(lreq))
3159                 __linger_cancel(lreq);
3160         up_write(&osdc->lock);
3161 }
3162 
3163 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
3164 
3165 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3166 {
3167         struct ceph_osd_client *osdc = lreq->osdc;
3168         struct ceph_osdmap *map = osdc->osdmap;
3169 
3170         verify_osdc_wrlocked(osdc);
3171         WARN_ON(!map->epoch);
3172 
3173         if (lreq->register_gen) {
3174                 lreq->map_dne_bound = map->epoch;
3175                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3176                      lreq, lreq->linger_id);
3177         } else {
3178                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3179                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3180                      map->epoch);
3181         }
3182 
3183         if (lreq->map_dne_bound) {
3184                 if (map->epoch >= lreq->map_dne_bound) {
3185                         /* we had a new enough map */
3186                         pr_info("linger_id %llu pool does not exist\n",
3187                                 lreq->linger_id);
3188                         linger_reg_commit_complete(lreq, -ENOENT);
3189                         __linger_cancel(lreq);
3190                 }
3191         } else {
3192                 send_linger_map_check(lreq);
3193         }
3194 }
3195 
3196 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
3197 {
3198         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3199         struct ceph_osd_linger_request *lreq;
3200         u64 linger_id = greq->private_data;
3201 
3202         WARN_ON(greq->result || !greq->u.newest);
3203 
3204         down_write(&osdc->lock);
3205         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3206         if (!lreq) {
3207                 dout("%s linger_id %llu dne\n", __func__, linger_id);
3208                 goto out_unlock;
3209         }
3210 
3211         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3212              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3213              greq->u.newest);
3214         if (!lreq->map_dne_bound)
3215                 lreq->map_dne_bound = greq->u.newest;
3216         erase_linger_mc(&osdc->linger_map_checks, lreq);
3217         check_linger_pool_dne(lreq);
3218 
3219         linger_put(lreq);
3220 out_unlock:
3221         up_write(&osdc->lock);
3222 }
3223 
3224 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3225 {
3226         struct ceph_osd_client *osdc = lreq->osdc;
3227         struct ceph_osd_linger_request *lookup_lreq;
3228         int ret;
3229 
3230         verify_osdc_wrlocked(osdc);
3231 
3232         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3233                                        lreq->linger_id);
3234         if (lookup_lreq) {
3235                 WARN_ON(lookup_lreq != lreq);
3236                 return;
3237         }
3238 
3239         linger_get(lreq);
3240         insert_linger_mc(&osdc->linger_map_checks, lreq);
3241         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3242                                           linger_map_check_cb, lreq->linger_id);
3243         WARN_ON(ret);
3244 }
3245 
3246 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3247 {
3248         int ret;
3249 
3250         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3251         ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3252         return ret ?: lreq->reg_commit_error;
3253 }
3254 
3255 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3256 {
3257         int ret;
3258 
3259         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3260         ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3261         return ret ?: lreq->notify_finish_error;
3262 }
3263 
3264 /*
3265  * Timeout callback, called every N seconds.  When 1 or more OSD
3266  * requests has been active for more than N seconds, we send a keepalive
3267  * (tag + timestamp) to its OSD to ensure any communications channel
3268  * reset is detected.
3269  */
3270 static void handle_timeout(struct work_struct *work)
3271 {
3272         struct ceph_osd_client *osdc =
3273                 container_of(work, struct ceph_osd_client, timeout_work.work);
3274         struct ceph_options *opts = osdc->client->options;
3275         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3276         unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3277         LIST_HEAD(slow_osds);
3278         struct rb_node *n, *p;
3279 
3280         dout("%s osdc %p\n", __func__, osdc);
3281         down_write(&osdc->lock);
3282 
3283         /*
3284          * ping osds that are a bit slow.  this ensures that if there
3285          * is a break in the TCP connection we will notice, and reopen
3286          * a connection with that osd (from the fault callback).
3287          */
3288         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3289                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3290                 bool found = false;
3291 
3292                 for (p = rb_first(&osd->o_requests); p; ) {
3293                         struct ceph_osd_request *req =
3294                             rb_entry(p, struct ceph_osd_request, r_node);
3295 
3296                         p = rb_next(p); /* abort_request() */
3297 
3298                         if (time_before(req->r_stamp, cutoff)) {
3299                                 dout(" req %p tid %llu on osd%d is laggy\n",
3300                                      req, req->r_tid, osd->o_osd);
3301                                 found = true;
3302                         }
3303                         if (opts->osd_request_timeout &&
3304                             time_before(req->r_start_stamp, expiry_cutoff)) {
3305                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3306                                        req->r_tid, osd->o_osd);
3307                                 abort_request(req, -ETIMEDOUT);
3308                         }
3309                 }
3310                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3311                         struct ceph_osd_linger_request *lreq =
3312                             rb_entry(p, struct ceph_osd_linger_request, node);
3313 
3314                         dout(" lreq %p linger_id %llu is served by osd%d\n",
3315                              lreq, lreq->linger_id, osd->o_osd);
3316                         found = true;
3317 
3318                         mutex_lock(&lreq->lock);
3319                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
3320                                 send_linger_ping(lreq);
3321                         mutex_unlock(&lreq->lock);
3322                 }
3323 
3324                 if (found)
3325                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
3326         }
3327 
3328         if (opts->osd_request_timeout) {
3329                 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3330                         struct ceph_osd_request *req =
3331                             rb_entry(p, struct ceph_osd_request, r_node);
3332 
3333                         p = rb_next(p); /* abort_request() */
3334 
3335                         if (time_before(req->r_start_stamp, expiry_cutoff)) {
3336                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3337                                        req->r_tid, osdc->homeless_osd.o_osd);
3338                                 abort_request(req, -ETIMEDOUT);
3339                         }
3340                 }
3341         }
3342 
3343         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3344                 maybe_request_map(osdc);
3345 
3346         while (!list_empty(&slow_osds)) {
3347                 struct ceph_osd *osd = list_first_entry(&slow_osds,
3348                                                         struct ceph_osd,
3349                                                         o_keepalive_item);
3350                 list_del_init(&osd->o_keepalive_item);
3351                 ceph_con_keepalive(&osd->o_con);
3352         }
3353 
3354         up_write(&osdc->lock);
3355         schedule_delayed_work(&osdc->timeout_work,
3356                               osdc->client->options->osd_keepalive_timeout);
3357 }
3358 
3359 static void handle_osds_timeout(struct work_struct *work)
3360 {
3361         struct ceph_osd_client *osdc =
3362                 container_of(work, struct ceph_osd_client,
3363                              osds_timeout_work.work);
3364         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3365         struct ceph_osd *osd, *nosd;
3366 
3367         dout("%s osdc %p\n", __func__, osdc);
3368         down_write(&osdc->lock);
3369         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3370                 if (time_before(jiffies, osd->lru_ttl))
3371                         break;
3372 
3373                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3374                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3375                 close_osd(osd);
3376         }
3377 
3378         up_write(&osdc->lock);
3379         schedule_delayed_work(&osdc->osds_timeout_work,
3380                               round_jiffies_relative(delay));
3381 }
3382 
3383 static int ceph_oloc_decode(void **p, void *end,
3384                             struct ceph_object_locator *oloc)
3385 {
3386         u8 struct_v, struct_cv;
3387         u32 len;
3388         void *struct_end;
3389         int ret = 0;
3390 
3391         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3392         struct_v = ceph_decode_8(p);
3393         struct_cv = ceph_decode_8(p);
3394         if (struct_v < 3) {
3395                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3396                         struct_v, struct_cv);
3397                 goto e_inval;
3398         }
3399         if (struct_cv > 6) {
3400                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3401                         struct_v, struct_cv);
3402                 goto e_inval;
3403         }
3404         len = ceph_decode_32(p);
3405         ceph_decode_need(p, end, len, e_inval);
3406         struct_end = *p + len;
3407 
3408         oloc->pool = ceph_decode_64(p);
3409         *p += 4; /* skip preferred */
3410 
3411         len = ceph_decode_32(p);
3412         if (len > 0) {
3413                 pr_warn("ceph_object_locator::key is set\n");
3414                 goto e_inval;
3415         }
3416 
3417         if (struct_v >= 5) {
3418                 bool changed = false;
3419 
3420                 len = ceph_decode_32(p);
3421                 if (len > 0) {
3422                         ceph_decode_need(p, end, len, e_inval);
3423                         if (!oloc->pool_ns ||
3424                             ceph_compare_string(oloc->pool_ns, *p, len))
3425                                 changed = true;
3426                         *p += len;
3427                 } else {
3428                         if (oloc->pool_ns)
3429                                 changed = true;
3430                 }
3431                 if (changed) {
3432                         /* redirect changes namespace */
3433                         pr_warn("ceph_object_locator::nspace is changed\n");
3434                         goto e_inval;
3435                 }
3436         }
3437 
3438         if (struct_v >= 6) {
3439                 s64 hash = ceph_decode_64(p);
3440                 if (hash != -1) {
3441                         pr_warn("ceph_object_locator::hash is set\n");
3442                         goto e_inval;
3443                 }
3444         }
3445 
3446         /* skip the rest */
3447         *p = struct_end;
3448 out:
3449         return ret;
3450 
3451 e_inval:
3452         ret = -EINVAL;
3453         goto out;
3454 }
3455 
3456 static int ceph_redirect_decode(void **p, void *end,
3457                                 struct ceph_request_redirect *redir)
3458 {
3459         u8 struct_v, struct_cv;
3460         u32 len;
3461         void *struct_end;
3462         int ret;
3463 
3464         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3465         struct_v = ceph_decode_8(p);
3466         struct_cv = ceph_decode_8(p);
3467         if (struct_cv > 1) {
3468                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3469                         struct_v, struct_cv);
3470                 goto e_inval;
3471         }
3472         len = ceph_decode_32(p);
3473         ceph_decode_need(p, end, len, e_inval);
3474         struct_end = *p + len;
3475 
3476         ret = ceph_oloc_decode(p, end, &redir->oloc);
3477         if (ret)
3478                 goto out;
3479 
3480         len = ceph_decode_32(p);
3481         if (len > 0) {
3482                 pr_warn("ceph_request_redirect::object_name is set\n");
3483                 goto e_inval;
3484         }
3485 
3486         len = ceph_decode_32(p);
3487         *p += len; /* skip osd_instructions */
3488 
3489         /* skip the rest */
3490         *p = struct_end;
3491 out:
3492         return ret;
3493 
3494 e_inval:
3495         ret = -EINVAL;
3496         goto out;
3497 }
3498 
3499 struct MOSDOpReply {
3500         struct ceph_pg pgid;
3501         u64 flags;
3502         int result;
3503         u32 epoch;
3504         int num_ops;
3505         u32 outdata_len[CEPH_OSD_MAX_OPS];
3506         s32 rval[CEPH_OSD_MAX_OPS];
3507         int retry_attempt;
3508         struct ceph_eversion replay_version;
3509         u64 user_version;
3510         struct ceph_request_redirect redirect;
3511 };
3512 
3513 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3514 {
3515         void *p = msg->front.iov_base;
3516         void *const end = p + msg->front.iov_len;
3517         u16 version = le16_to_cpu(msg->hdr.version);
3518         struct ceph_eversion bad_replay_version;
3519         u8 decode_redir;
3520         u32 len;
3521         int ret;
3522         int i;
3523 
3524         ceph_decode_32_safe(&p, end, len, e_inval);
3525         ceph_decode_need(&p, end, len, e_inval);
3526         p += len; /* skip oid */
3527 
3528         ret = ceph_decode_pgid(&p, end, &m->pgid);
3529         if (ret)
3530                 return ret;
3531 
3532         ceph_decode_64_safe(&p, end, m->flags, e_inval);
3533         ceph_decode_32_safe(&p, end, m->result, e_inval);
3534         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3535         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3536         p += sizeof(bad_replay_version);
3537         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3538 
3539         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3540         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3541                 goto e_inval;
3542 
3543         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3544                          e_inval);
3545         for (i = 0; i < m->num_ops; i++) {
3546                 struct ceph_osd_op *op = p;
3547 
3548                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
3549                 p += sizeof(*op);
3550         }
3551 
3552         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3553         for (i = 0; i < m->num_ops; i++)
3554                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3555 
3556         if (version >= 5) {
3557                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3558                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
3559                 p += sizeof(m->replay_version);
3560                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3561         } else {
3562                 m->replay_version = bad_replay_version; /* struct */
3563                 m->user_version = le64_to_cpu(m->replay_version.version);
3564         }
3565 
3566         if (version >= 6) {
3567                 if (version >= 7)
3568                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3569                 else
3570                         decode_redir = 1;
3571         } else {
3572                 decode_redir = 0;
3573         }
3574 
3575         if (decode_redir) {
3576                 ret = ceph_redirect_decode(&p, end, &m->redirect);
3577                 if (ret)
3578                         return ret;
3579         } else {
3580                 ceph_oloc_init(&m->redirect.oloc);
3581         }
3582 
3583         return 0;
3584 
3585 e_inval:
3586         return -EINVAL;
3587 }
3588 
3589 /*
3590  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
3591  * specified.
3592  */
3593 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3594 {
3595         struct ceph_osd_client *osdc = osd->o_osdc;
3596         struct ceph_osd_request *req;
3597         struct MOSDOpReply m;
3598         u64 tid = le64_to_cpu(msg->hdr.tid);
3599         u32 data_len = 0;
3600         int ret;
3601         int i;
3602 
3603         dout("%s msg %p tid %llu\n", __func__, msg, tid);
3604 
3605         down_read(&osdc->lock);
3606         if (!osd_registered(osd)) {
3607                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3608                 goto out_unlock_osdc;
3609         }
3610         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3611 
3612         mutex_lock(&osd->lock);
3613         req = lookup_request(&osd->o_requests, tid);
3614         if (!req) {
3615                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3616                 goto out_unlock_session;
3617         }
3618 
3619         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3620         ret = decode_MOSDOpReply(msg, &m);
3621         m.redirect.oloc.pool_ns = NULL;
3622         if (ret) {
3623                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3624                        req->r_tid, ret);
3625                 ceph_msg_dump(msg);
3626                 goto fail_request;
3627         }
3628         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3629              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3630              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3631              le64_to_cpu(m.replay_version.version), m.user_version);
3632 
3633         if (m.retry_attempt >= 0) {
3634                 if (m.retry_attempt != req->r_attempts - 1) {
3635                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3636                              req, req->r_tid, m.retry_attempt,
3637                              req->r_attempts - 1);
3638                         goto out_unlock_session;
3639                 }
3640         } else {
3641                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3642         }
3643 
3644         if (!ceph_oloc_empty(&m.redirect.oloc)) {
3645                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3646                      m.redirect.oloc.pool);
3647                 unlink_request(osd, req);
3648                 mutex_unlock(&osd->lock);
3649 
3650                 /*
3651                  * Not ceph_oloc_copy() - changing pool_ns is not
3652                  * supported.
3653                  */
3654                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3655                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
3656                                 CEPH_OSD_FLAG_IGNORE_OVERLAY |
3657                                 CEPH_OSD_FLAG_IGNORE_CACHE;
3658                 req->r_tid = 0;
3659                 __submit_request(req, false);
3660                 goto out_unlock_osdc;
3661         }
3662 
3663         if (m.num_ops != req->r_num_ops) {
3664                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3665                        req->r_num_ops, req->r_tid);
3666                 goto fail_request;
3667         }
3668         for (i = 0; i < req->r_num_ops; i++) {
3669                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3670                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
3671                 req->r_ops[i].rval = m.rval[i];
3672                 req->r_ops[i].outdata_len = m.outdata_len[i];
3673                 data_len += m.outdata_len[i];
3674         }
3675         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3676                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3677                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
3678                 goto fail_request;
3679         }
3680         dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3681              req, req->r_tid, m.result, data_len);
3682 
3683         /*
3684          * Since we only ever request ONDISK, we should only ever get
3685          * one (type of) reply back.
3686          */
3687         WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3688         req->r_result = m.result ?: data_len;
3689         finish_request(req);
3690         mutex_unlock(&osd->lock);
3691         up_read(&osdc->lock);
3692 
3693         __complete_request(req);
3694         return;
3695 
3696 fail_request:
3697         complete_request(req, -EIO);
3698 out_unlock_session:
3699         mutex_unlock(&osd->lock);
3700 out_unlock_osdc:
3701         up_read(&osdc->lock);
3702 }
3703 
3704 static void set_pool_was_full(struct ceph_osd_client *osdc)
3705 {
3706         struct rb_node *n;
3707 
3708         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3709                 struct ceph_pg_pool_info *pi =
3710                     rb_entry(n, struct ceph_pg_pool_info, node);
3711 
3712                 pi->was_full = __pool_full(pi);
3713         }
3714 }
3715 
3716 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3717 {
3718         struct ceph_pg_pool_info *pi;
3719 
3720         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3721         if (!pi)
3722                 return false;
3723 
3724         return pi->was_full && !__pool_full(pi);
3725 }
3726 
3727 static enum calc_target_result
3728 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3729 {
3730         struct ceph_osd_client *osdc = lreq->osdc;
3731         enum calc_target_result ct_res;
3732 
3733         ct_res = calc_target(osdc, &lreq->t, true);
3734         if (ct_res == CALC_TARGET_NEED_RESEND) {
3735                 struct ceph_osd *osd;
3736 
3737                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3738                 if (osd != lreq->osd) {
3739                         unlink_linger(lreq->osd, lreq);
3740                         link_linger(osd, lreq);
3741                 }
3742         }
3743 
3744         return ct_res;
3745 }
3746 
3747 /*
3748  * Requeue requests whose mapping to an OSD has changed.
3749  */
3750 static void scan_requests(struct ceph_osd *osd,
3751                           bool force_resend,
3752                           bool cleared_full,
3753                           bool check_pool_cleared_full,
3754                           struct rb_root *need_resend,
3755                           struct list_head *need_resend_linger)
3756 {
3757         struct ceph_osd_client *osdc = osd->o_osdc;
3758         struct rb_node *n;
3759         bool force_resend_writes;
3760 
3761         for (n = rb_first(&osd->o_linger_requests); n; ) {
3762                 struct ceph_osd_linger_request *lreq =
3763                     rb_entry(n, struct ceph_osd_linger_request, node);
3764                 enum calc_target_result ct_res;
3765 
3766                 n = rb_next(n); /* recalc_linger_target() */
3767 
3768                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3769                      lreq->linger_id);
3770                 ct_res = recalc_linger_target(lreq);
3771                 switch (ct_res) {
3772                 case CALC_TARGET_NO_ACTION:
3773                         force_resend_writes = cleared_full ||
3774                             (check_pool_cleared_full &&
3775                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3776                         if (!force_resend && !force_resend_writes)
3777                                 break;
3778 
3779                         /* fall through */
3780                 case CALC_TARGET_NEED_RESEND:
3781                         cancel_linger_map_check(lreq);
3782                         /*
3783                          * scan_requests() for the previous epoch(s)
3784                          * may have already added it to the list, since
3785                          * it's not unlinked here.
3786                          */
3787                         if (list_empty(&lreq->scan_item))
3788                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3789                         break;
3790                 case CALC_TARGET_POOL_DNE:
3791                         list_del_init(&lreq->scan_item);
3792                         check_linger_pool_dne(lreq);
3793                         break;
3794                 }
3795         }
3796 
3797         for (n = rb_first(&osd->o_requests); n; ) {
3798                 struct ceph_osd_request *req =
3799                     rb_entry(n, struct ceph_osd_request, r_node);
3800                 enum calc_target_result ct_res;
3801 
3802                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3803 
3804                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3805                 ct_res = calc_target(osdc, &req->r_t, false);
3806                 switch (ct_res) {
3807                 case CALC_TARGET_NO_ACTION:
3808                         force_resend_writes = cleared_full ||
3809                             (check_pool_cleared_full &&
3810                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3811                         if (!force_resend &&
3812                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3813                              !force_resend_writes))
3814                                 break;
3815 
3816                         /* fall through */
3817                 case CALC_TARGET_NEED_RESEND:
3818                         cancel_map_check(req);
3819                         unlink_request(osd, req);
3820                         insert_request(need_resend, req);
3821                         break;
3822                 case CALC_TARGET_POOL_DNE:
3823                         check_pool_dne(req);
3824                         break;
3825                 }
3826         }
3827 }
3828 
3829 static int handle_one_map(struct ceph_osd_client *osdc,
3830                           void *p, void *end, bool incremental,
3831                           struct rb_root *need_resend,
3832                           struct list_head *need_resend_linger)
3833 {
3834         struct ceph_osdmap *newmap;
3835         struct rb_node *n;
3836         bool skipped_map = false;
3837         bool was_full;
3838 
3839         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3840         set_pool_was_full(osdc);
3841 
3842         if (incremental)
3843                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3844         else
3845                 newmap = ceph_osdmap_decode(&p, end);
3846         if (IS_ERR(newmap))
3847                 return PTR_ERR(newmap);
3848 
3849         if (newmap != osdc->osdmap) {
3850                 /*
3851                  * Preserve ->was_full before destroying the old map.
3852                  * For pools that weren't in the old map, ->was_full
3853                  * should be false.
3854                  */
3855                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3856                         struct ceph_pg_pool_info *pi =
3857                             rb_entry(n, struct ceph_pg_pool_info, node);
3858                         struct ceph_pg_pool_info *old_pi;
3859 
3860                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3861                         if (old_pi)
3862                                 pi->was_full = old_pi->was_full;
3863                         else
3864                                 WARN_ON(pi->was_full);
3865                 }
3866 
3867                 if (osdc->osdmap->epoch &&
3868                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3869                         WARN_ON(incremental);
3870                         skipped_map = true;
3871                 }
3872 
3873                 ceph_osdmap_destroy(osdc->osdmap);
3874                 osdc->osdmap = newmap;
3875         }
3876 
3877         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3878         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3879                       need_resend, need_resend_linger);
3880 
3881         for (n = rb_first(&osdc->osds); n; ) {
3882                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3883 
3884                 n = rb_next(n); /* close_osd() */
3885 
3886                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3887                               need_resend_linger);
3888                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3889                     memcmp(&osd->o_con.peer_addr,
3890                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3891                            sizeof(struct ceph_entity_addr)))
3892                         close_osd(osd);
3893         }
3894 
3895         return 0;
3896 }
3897 
3898 static void kick_requests(struct ceph_osd_client *osdc,
3899                           struct rb_root *need_resend,
3900                           struct list_head *need_resend_linger)
3901 {
3902         struct ceph_osd_linger_request *lreq, *nlreq;
3903         enum calc_target_result ct_res;
3904         struct rb_node *n;
3905 
3906         /* make sure need_resend targets reflect latest map */
3907         for (n = rb_first(need_resend); n; ) {
3908                 struct ceph_osd_request *req =
3909                     rb_entry(n, struct ceph_osd_request, r_node);
3910 
3911                 n = rb_next(n);
3912 
3913                 if (req->r_t.epoch < osdc->osdmap->epoch) {
3914                         ct_res = calc_target(osdc, &req->r_t, false);
3915                         if (ct_res == CALC_TARGET_POOL_DNE) {
3916                                 erase_request(need_resend, req);
3917                                 check_pool_dne(req);
3918                         }
3919                 }
3920         }
3921 
3922         for (n = rb_first(need_resend); n; ) {
3923                 struct ceph_osd_request *req =
3924                     rb_entry(n, struct ceph_osd_request, r_node);
3925                 struct ceph_osd *osd;
3926 
3927                 n = rb_next(n);
3928                 erase_request(need_resend, req); /* before link_request() */
3929 
3930                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3931                 link_request(osd, req);
3932                 if (!req->r_linger) {
3933                         if (!osd_homeless(osd) && !req->r_t.paused)
3934                                 send_request(req);
3935                 } else {
3936                         cancel_linger_request(req);
3937                 }
3938         }
3939 
3940         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3941                 if (!osd_homeless(lreq->osd))
3942                         send_linger(lreq);
3943 
3944                 list_del_init(&lreq->scan_item);
3945         }
3946 }
3947 
3948 /*
3949  * Process updated osd map.
3950  *
3951  * The message contains any number of incremental and full maps, normally
3952  * indicating some sort of topology change in the cluster.  Kick requests
3953  * off to different OSDs as needed.
3954  */
3955 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3956 {
3957         void *p = msg->front.iov_base;
3958         void *const end = p + msg->front.iov_len;
3959         u32 nr_maps, maplen;
3960         u32 epoch;
3961         struct ceph_fsid fsid;
3962         struct rb_root need_resend = RB_ROOT;
3963         LIST_HEAD(need_resend_linger);
3964         bool handled_incremental = false;
3965         bool was_pauserd, was_pausewr;
3966         bool pauserd, pausewr;
3967         int err;
3968 
3969         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3970         down_write(&osdc->lock);
3971 
3972         /* verify fsid */
3973         ceph_decode_need(&p, end, sizeof(fsid), bad);
3974         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3975         if (ceph_check_fsid(osdc->client, &fsid) < 0)
3976                 goto bad;
3977 
3978         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3979         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3980                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3981                       have_pool_full(osdc);
3982 
3983         /* incremental maps */
3984         ceph_decode_32_safe(&p, end, nr_maps, bad);
3985         dout(" %d inc maps\n", nr_maps);
3986         while (nr_maps > 0) {
3987                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3988                 epoch = ceph_decode_32(&p);
3989                 maplen = ceph_decode_32(&p);
3990                 ceph_decode_need(&p, end, maplen, bad);
3991                 if (osdc->osdmap->epoch &&
3992                     osdc->osdmap->epoch + 1 == epoch) {
3993                         dout("applying incremental map %u len %d\n",
3994                              epoch, maplen);
3995                         err = handle_one_map(osdc, p, p + maplen, true,
3996                                              &need_resend, &need_resend_linger);
3997                         if (err)
3998                                 goto bad;
3999                         handled_incremental = true;
4000                 } else {
4001                         dout("ignoring incremental map %u len %d\n",
4002                              epoch, maplen);
4003                 }
4004                 p += maplen;
4005                 nr_maps--;
4006         }
4007         if (handled_incremental)
4008                 goto done;
4009 
4010         /* full maps */
4011         ceph_decode_32_safe(&p, end, nr_maps, bad);
4012         dout(" %d full maps\n", nr_maps);
4013         while (nr_maps) {
4014                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4015                 epoch = ceph_decode_32(&p);
4016                 maplen = ceph_decode_32(&p);
4017                 ceph_decode_need(&p, end, maplen, bad);
4018                 if (nr_maps > 1) {
4019                         dout("skipping non-latest full map %u len %d\n",
4020                              epoch, maplen);
4021                 } else if (osdc->osdmap->epoch >= epoch) {
4022                         dout("skipping full map %u len %d, "
4023                              "older than our %u\n", epoch, maplen,
4024                              osdc->osdmap->epoch);
4025                 } else {
4026                         dout("taking full map %u len %d\n", epoch, maplen);
4027                         err = handle_one_map(osdc, p, p + maplen, false,
4028                                              &need_resend, &need_resend_linger);
4029                         if (err)
4030                                 goto bad;
4031                 }
4032                 p += maplen;
4033                 nr_maps--;
4034         }
4035 
4036 done:
4037         /*
4038          * subscribe to subsequent osdmap updates if full to ensure
4039          * we find out when we are no longer full and stop returning
4040          * ENOSPC.
4041          */
4042         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4043         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4044                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4045                   have_pool_full(osdc);
4046         if (was_pauserd || was_pausewr || pauserd || pausewr ||
4047             osdc->osdmap->epoch < osdc->epoch_barrier)
4048                 maybe_request_map(osdc);
4049 
4050         kick_requests(osdc, &need_resend, &need_resend_linger);
4051 
4052         ceph_osdc_abort_on_full(osdc);
4053         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
4054                           osdc->osdmap->epoch);
4055         up_write(&osdc->lock);
4056         wake_up_all(&osdc->client->auth_wq);
4057         return;
4058 
4059 bad:
4060         pr_err("osdc handle_map corrupt msg\n");
4061         ceph_msg_dump(msg);
4062         up_write(&osdc->lock);
4063 }
4064 
4065 /*
4066  * Resubmit requests pending on the given osd.
4067  */
4068 static void kick_osd_requests(struct ceph_osd *osd)
4069 {
4070         struct rb_node *n;
4071 
4072         clear_backoffs(osd);
4073 
4074         for (n = rb_first(&osd->o_requests); n; ) {
4075                 struct ceph_osd_request *req =
4076                     rb_entry(n, struct ceph_osd_request, r_node);
4077 
4078                 n = rb_next(n); /* cancel_linger_request() */
4079 
4080                 if (!req->r_linger) {
4081                         if (!req->r_t.paused)
4082                                 send_request(req);
4083                 } else {
4084                         cancel_linger_request(req);
4085                 }
4086         }
4087         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
4088                 struct ceph_osd_linger_request *lreq =
4089                     rb_entry(n, struct ceph_osd_linger_request, node);
4090 
4091                 send_linger(lreq);
4092         }
4093 }
4094 
4095 /*
4096  * If the osd connection drops, we need to resubmit all requests.
4097  */
4098 static void osd_fault(struct ceph_connection *con)
4099 {
4100         struct ceph_osd *osd = con->private;
4101         struct ceph_osd_client *osdc = osd->o_osdc;
4102 
4103         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
4104 
4105         down_write(&osdc->lock);
4106         if (!osd_registered(osd)) {
4107                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4108                 goto out_unlock;
4109         }
4110 
4111         if (!reopen_osd(osd))
4112                 kick_osd_requests(osd);
4113         maybe_request_map(osdc);
4114 
4115 out_unlock:
4116         up_write(&osdc->lock);
4117 }
4118 
4119 struct MOSDBackoff {
4120         struct ceph_spg spgid;
4121         u32 map_epoch;
4122         u8 op;
4123         u64 id;
4124         struct ceph_hobject_id *begin;
4125         struct ceph_hobject_id *end;
4126 };
4127 
4128 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
4129 {
4130         void *p = msg->front.iov_base;
4131         void *const end = p + msg->front.iov_len;
4132         u8 struct_v;
4133         u32 struct_len;
4134         int ret;
4135 
4136         ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
4137         if (ret)
4138                 return ret;
4139 
4140         ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
4141         if (ret)
4142                 return ret;
4143 
4144         ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
4145         ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
4146         ceph_decode_8_safe(&p, end, m->op, e_inval);
4147         ceph_decode_64_safe(&p, end, m->id, e_inval);
4148 
4149         m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
4150         if (!m->begin)
4151                 return -ENOMEM;
4152 
4153         ret = decode_hoid(&p, end, m->begin);
4154         if (ret) {
4155                 free_hoid(m->begin);
4156                 return ret;
4157         }
4158 
4159         m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
4160         if (!m->end) {
4161                 free_hoid(m->begin);
4162                 return -ENOMEM;
4163         }
4164 
4165         ret = decode_hoid(&p, end, m->end);
4166         if (ret) {
4167                 free_hoid(m->begin);
4168                 free_hoid(m->end);
4169                 return ret;
4170         }
4171 
4172         return 0;
4173 
4174 e_inval:
4175         return -EINVAL;
4176 }
4177 
4178 static struct ceph_msg *create_backoff_message(
4179                                 const struct ceph_osd_backoff *backoff,
4180                                 u32 map_epoch)
4181 {
4182         struct ceph_msg *msg;
4183         void *p, *end;
4184         int msg_size;
4185 
4186         msg_size = CEPH_ENCODING_START_BLK_LEN +
4187                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
4188         msg_size += 4 + 1 + 8; /* map_epoch, op, id */
4189         msg_size += CEPH_ENCODING_START_BLK_LEN +
4190                         hoid_encoding_size(backoff->begin);
4191         msg_size += CEPH_ENCODING_START_BLK_LEN +
4192                         hoid_encoding_size(backoff->end);
4193 
4194         msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
4195         if (!msg)
4196                 return NULL;
4197 
4198         p = msg->front.iov_base;
4199         end = p + msg->front_alloc_len;
4200 
4201         encode_spgid(&p, &backoff->spgid);
4202         ceph_encode_32(&p, map_epoch);
4203         ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4204         ceph_encode_64(&p, backoff->id);
4205         encode_hoid(&p, end, backoff->begin);
4206         encode_hoid(&p, end, backoff->end);
4207         BUG_ON(p != end);
4208 
4209         msg->front.iov_len = p - msg->front.iov_base;
4210         msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4211         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4212 
4213         return msg;
4214 }
4215 
4216 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4217 {
4218         struct ceph_spg_mapping *spg;
4219         struct ceph_osd_backoff *backoff;
4220         struct ceph_msg *msg;
4221 
4222         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4223              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4224 
4225         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4226         if (!spg) {
4227                 spg = alloc_spg_mapping();
4228                 if (!spg) {
4229                         pr_err("%s failed to allocate spg\n", __func__);
4230                         return;
4231                 }
4232                 spg->spgid = m->spgid; /* struct */
4233                 insert_spg_mapping(&osd->o_backoff_mappings, spg);
4234         }
4235 
4236         backoff = alloc_backoff();
4237         if (!backoff) {
4238                 pr_err("%s failed to allocate backoff\n", __func__);
4239                 return;
4240         }
4241         backoff->spgid = m->spgid; /* struct */
4242         backoff->id = m->id;
4243         backoff->begin = m->begin;
4244         m->begin = NULL; /* backoff now owns this */
4245         backoff->end = m->end;
4246         m->end = NULL;   /* ditto */
4247 
4248         insert_backoff(&spg->backoffs, backoff);
4249         insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4250 
4251         /*
4252          * Ack with original backoff's epoch so that the OSD can
4253          * discard this if there was a PG split.
4254          */
4255         msg = create_backoff_message(backoff, m->map_epoch);
4256         if (!msg) {
4257                 pr_err("%s failed to allocate msg\n", __func__);
4258                 return;
4259         }
4260         ceph_con_send(&osd->o_con, msg);
4261 }
4262 
4263 static bool target_contained_by(const struct ceph_osd_request_target *t,
4264                                 const struct ceph_hobject_id *begin,
4265                                 const struct ceph_hobject_id *end)
4266 {
4267         struct ceph_hobject_id hoid;
4268         int cmp;
4269 
4270         hoid_fill_from_target(&hoid, t);
4271         cmp = hoid_compare(&hoid, begin);
4272         return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4273 }
4274 
4275 static void handle_backoff_unblock(struct ceph_osd *osd,
4276                                    const struct MOSDBackoff *m)
4277 {
4278         struct ceph_spg_mapping *spg;
4279         struct ceph_osd_backoff *backoff;
4280         struct rb_node *n;
4281 
4282         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4283              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4284 
4285         backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4286         if (!backoff) {
4287                 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4288                        __func__, osd->o_osd, m->spgid.pgid.pool,
4289                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4290                 return;
4291         }
4292 
4293         if (hoid_compare(backoff->begin, m->begin) &&
4294             hoid_compare(backoff->end, m->end)) {
4295                 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4296                        __func__, osd->o_osd, m->spgid.pgid.pool,
4297                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4298                 /* unblock it anyway... */
4299         }
4300 
4301         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4302         BUG_ON(!spg);
4303 
4304         erase_backoff(&spg->backoffs, backoff);
4305         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4306         free_backoff(backoff);
4307 
4308         if (RB_EMPTY_ROOT(&spg->backoffs)) {
4309                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
4310                 free_spg_mapping(spg);
4311         }
4312 
4313         for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4314                 struct ceph_osd_request *req =
4315                     rb_entry(n, struct ceph_osd_request, r_node);
4316 
4317                 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4318                         /*
4319                          * Match against @m, not @backoff -- the PG may
4320                          * have split on the OSD.
4321                          */
4322                         if (target_contained_by(&req->r_t, m->begin, m->end)) {
4323                                 /*
4324                                  * If no other installed backoff applies,
4325                                  * resend.
4326                                  */
4327                                 send_request(req);
4328                         }
4329                 }
4330         }
4331 }
4332 
4333 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4334 {
4335         struct ceph_osd_client *osdc = osd->o_osdc;
4336         struct MOSDBackoff m;
4337         int ret;
4338 
4339         down_read(&osdc->lock);
4340         if (!osd_registered(osd)) {
4341                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4342                 up_read(&osdc->lock);
4343                 return;
4344         }
4345         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4346 
4347         mutex_lock(&osd->lock);
4348         ret = decode_MOSDBackoff(msg, &m);
4349         if (ret) {
4350                 pr_err("failed to decode MOSDBackoff: %d\n", ret);
4351                 ceph_msg_dump(msg);
4352                 goto out_unlock;
4353         }
4354 
4355         switch (m.op) {
4356         case CEPH_OSD_BACKOFF_OP_BLOCK:
4357                 handle_backoff_block(osd, &m);
4358                 break;
4359         case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4360                 handle_backoff_unblock(osd, &m);
4361                 break;
4362         default:
4363                 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4364         }
4365 
4366         free_hoid(m.begin);
4367         free_hoid(m.end);
4368 
4369 out_unlock:
4370         mutex_unlock(&osd->lock);
4371         up_read(&osdc->lock);
4372 }
4373 
4374 /*
4375  * Process osd watch notifications
4376  */
4377 static void handle_watch_notify(struct ceph_osd_client *osdc,
4378                                 struct ceph_msg *msg)
4379 {
4380         void *p = msg->front.iov_base;
4381         void *const end = p + msg->front.iov_len;
4382         struct ceph_osd_linger_request *lreq;
4383         struct linger_work *lwork;
4384         u8 proto_ver, opcode;
4385         u64 cookie, notify_id;
4386         u64 notifier_id = 0;
4387         s32 return_code = 0;
4388         void *payload = NULL;
4389         u32 payload_len = 0;
4390 
4391         ceph_decode_8_safe(&p, end, proto_ver, bad);
4392         ceph_decode_8_safe(&p, end, opcode, bad);
4393         ceph_decode_64_safe(&p, end, cookie, bad);
4394         p += 8; /* skip ver */
4395         ceph_decode_64_safe(&p, end, notify_id, bad);
4396 
4397         if (proto_ver >= 1) {
4398                 ceph_decode_32_safe(&p, end, payload_len, bad);
4399                 ceph_decode_need(&p, end, payload_len, bad);
4400                 payload = p;
4401                 p += payload_len;
4402         }
4403 
4404         if (le16_to_cpu(msg->hdr.version) >= 2)
4405                 ceph_decode_32_safe(&p, end, return_code, bad);
4406 
4407         if (le16_to_cpu(msg->hdr.version) >= 3)
4408                 ceph_decode_64_safe(&p, end, notifier_id, bad);
4409 
4410         down_read(&osdc->lock);
4411         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4412         if (!lreq) {
4413                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4414                      cookie);
4415                 goto out_unlock_osdc;
4416         }
4417 
4418         mutex_lock(&lreq->lock);
4419         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4420              opcode, cookie, lreq, lreq->is_watch);
4421         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4422                 if (!lreq->last_error) {
4423                         lreq->last_error = -ENOTCONN;
4424                         queue_watch_error(lreq);
4425                 }
4426         } else if (!lreq->is_watch) {
4427                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4428                 if (lreq->notify_id && lreq->notify_id != notify_id) {
4429                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4430                              lreq->notify_id, notify_id);
4431                 } else if (!completion_done(&lreq->notify_finish_wait)) {
4432                         struct ceph_msg_data *data =
4433                             msg->num_data_items ? &msg->data[0] : NULL;
4434 
4435                         if (data) {
4436                                 if (lreq->preply_pages) {
4437                                         WARN_ON(data->type !=
4438                                                         CEPH_MSG_DATA_PAGES);
4439                                         *lreq->preply_pages = data->pages;
4440                                         *lreq->preply_len = data->length;
4441                                         data->own_pages = false;
4442                                 }
4443                         }
4444                         lreq->notify_finish_error = return_code;
4445                         complete_all(&lreq->notify_finish_wait);
4446                 }
4447         } else {
4448                 /* CEPH_WATCH_EVENT_NOTIFY */
4449                 lwork = lwork_alloc(lreq, do_watch_notify);
4450                 if (!lwork) {
4451                         pr_err("failed to allocate notify-lwork\n");
4452                         goto out_unlock_lreq;
4453                 }
4454 
4455                 lwork->notify.notify_id = notify_id;
4456                 lwork->notify.notifier_id = notifier_id;
4457                 lwork->notify.payload = payload;
4458                 lwork->notify.payload_len = payload_len;
4459                 lwork->notify.msg = ceph_msg_get(msg);
4460                 lwork_queue(lwork);
4461         }
4462 
4463 out_unlock_lreq:
4464         mutex_unlock(&lreq->lock);
4465 out_unlock_osdc:
4466         up_read(&osdc->lock);
4467         return;
4468 
4469 bad:
4470         pr_err("osdc handle_watch_notify corrupt msg\n");
4471 }
4472 
4473 /*
4474  * Register request, send initial attempt.
4475  */
4476 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4477                             struct ceph_osd_request *req,
4478                             bool nofail)
4479 {
4480         down_read(&osdc->lock);
4481         submit_request(req, false);
4482         up_read(&osdc->lock);
4483 
4484         return 0;
4485 }
4486 EXPORT_SYMBOL(ceph_osdc_start_request);
4487 
4488 /*
4489  * Unregister a registered request.  The request is not completed:
4490  * ->r_result isn't set and __complete_request() isn't called.
4491  */
4492 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4493 {
4494         struct ceph_osd_client *osdc = req->r_osdc;
4495 
4496         down_write(&osdc->lock);
4497         if (req->r_osd)
4498                 cancel_request(req);
4499         up_write(&osdc->lock);
4500 }
4501 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4502 
4503 /*
4504  * @timeout: in jiffies, 0 means "wait forever"
4505  */
4506 static int wait_request_timeout(struct ceph_osd_request *req,
4507                                 unsigned long timeout)
4508 {
4509         long left;
4510 
4511         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4512         left = wait_for_completion_killable_timeout(&req->r_completion,
4513                                                 ceph_timeout_jiffies(timeout));
4514         if (left <= 0) {
4515                 left = left ?: -ETIMEDOUT;
4516                 ceph_osdc_cancel_request(req);
4517         } else {
4518                 left = req->r_result; /* completed */
4519         }
4520 
4521         return left;
4522 }
4523 
4524 /*
4525  * wait for a request to complete
4526  */
4527 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4528                            struct ceph_osd_request *req)
4529 {
4530         return wait_request_timeout(req, 0);
4531 }
4532 EXPORT_SYMBOL(ceph_osdc_wait_request);
4533 
4534 /*
4535  * sync - wait for all in-flight requests to flush.  avoid starvation.
4536  */
4537 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4538 {
4539         struct rb_node *n, *p;
4540         u64 last_tid = atomic64_read(&osdc->last_tid);
4541 
4542 again:
4543         down_read(&osdc->lock);
4544         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4545                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4546 
4547                 mutex_lock(&osd->lock);
4548                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4549                         struct ceph_osd_request *req =
4550                             rb_entry(p, struct ceph_osd_request, r_node);
4551 
4552                         if (req->r_tid > last_tid)
4553                                 break;
4554 
4555                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4556                                 continue;
4557 
4558                         ceph_osdc_get_request(req);
4559                         mutex_unlock(&osd->lock);
4560                         up_read(&osdc->lock);
4561                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
4562                              __func__, req, req->r_tid, last_tid);
4563                         wait_for_completion(&req->r_completion);
4564                         ceph_osdc_put_request(req);
4565                         goto again;
4566                 }
4567 
4568                 mutex_unlock(&osd->lock);
4569         }
4570 
4571         up_read(&osdc->lock);
4572         dout("%s done last_tid %llu\n", __func__, last_tid);
4573 }
4574 EXPORT_SYMBOL(ceph_osdc_sync);
4575 
4576 static struct ceph_osd_request *
4577 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4578 {
4579         struct ceph_osd_request *req;
4580 
4581         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4582         if (!req)
4583                 return NULL;
4584 
4585         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4586         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4587         return req;
4588 }
4589 
4590 static struct ceph_osd_request *
4591 alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
4592 {
4593         struct ceph_osd_request *req;
4594 
4595         req = alloc_linger_request(lreq);
4596         if (!req)
4597                 return NULL;
4598 
4599         /*
4600          * Pass 0 for cookie because we don't know it yet, it will be
4601          * filled in by linger_submit().
4602          */
4603         osd_req_op_watch_init(req, 0, 0, watch_opcode);
4604 
4605         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4606                 ceph_osdc_put_request(req);
4607                 return NULL;
4608         }
4609 
4610         return req;
4611 }
4612 
4613 /*
4614  * Returns a handle, caller owns a ref.
4615  */
4616 struct ceph_osd_linger_request *
4617 ceph_osdc_watch(struct ceph_osd_client *osdc,
4618                 struct ceph_object_id *oid,
4619                 struct ceph_object_locator *oloc,
4620                 rados_watchcb2_t wcb,
4621                 rados_watcherrcb_t errcb,
4622                 void *data)
4623 {
4624         struct ceph_osd_linger_request *lreq;
4625         int ret;
4626 
4627         lreq = linger_alloc(osdc);
4628         if (!lreq)
4629                 return ERR_PTR(-ENOMEM);
4630 
4631         lreq->is_watch = true;
4632         lreq->wcb = wcb;
4633         lreq->errcb = errcb;
4634         lreq->data = data;
4635         lreq->watch_valid_thru = jiffies;
4636 
4637         ceph_oid_copy(&lreq->t.base_oid, oid);
4638         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4639         lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4640         ktime_get_real_ts64(&lreq->mtime);
4641 
4642         lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
4643         if (!lreq->reg_req) {
4644                 ret = -ENOMEM;
4645                 goto err_put_lreq;
4646         }
4647 
4648         lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
4649         if (!lreq->ping_req) {
4650                 ret = -ENOMEM;
4651                 goto err_put_lreq;
4652         }
4653 
4654         linger_submit(lreq);
4655         ret = linger_reg_commit_wait(lreq);
4656         if (ret) {
4657                 linger_cancel(lreq);
4658                 goto err_put_lreq;
4659         }
4660 
4661         return lreq;
4662 
4663 err_put_lreq:
4664         linger_put(lreq);
4665         return ERR_PTR(ret);
4666 }
4667 EXPORT_SYMBOL(ceph_osdc_watch);
4668 
4669 /*
4670  * Releases a ref.
4671  *
4672  * Times out after mount_timeout to preserve rbd unmap behaviour
4673  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4674  * with mount_timeout").
4675  */
4676 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4677                       struct ceph_osd_linger_request *lreq)
4678 {
4679         struct ceph_options *opts = osdc->client->options;
4680         struct ceph_osd_request *req;
4681         int ret;
4682 
4683         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4684         if (!req)
4685                 return -ENOMEM;
4686 
4687         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4688         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4689         req->r_flags = CEPH_OSD_FLAG_WRITE;
4690         ktime_get_real_ts64(&req->r_mtime);
4691         osd_req_op_watch_init(req, 0, lreq->linger_id,
4692                               CEPH_OSD_WATCH_OP_UNWATCH);
4693 
4694         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4695         if (ret)
4696                 goto out_put_req;
4697 
4698         ceph_osdc_start_request(osdc, req, false);
4699         linger_cancel(lreq);
4700         linger_put(lreq);
4701         ret = wait_request_timeout(req, opts->mount_timeout);
4702 
4703 out_put_req:
4704         ceph_osdc_put_request(req);
4705         return ret;
4706 }
4707 EXPORT_SYMBOL(ceph_osdc_unwatch);
4708 
4709 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4710                                       u64 notify_id, u64 cookie, void *payload,
4711                                       u32 payload_len)
4712 {
4713         struct ceph_osd_req_op *op;
4714         struct ceph_pagelist *pl;
4715         int ret;
4716 
4717         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4718 
4719         pl = ceph_pagelist_alloc(GFP_NOIO);
4720         if (!pl)
4721                 return -ENOMEM;
4722 
4723         ret = ceph_pagelist_encode_64(pl, notify_id);
4724         ret |= ceph_pagelist_encode_64(pl, cookie);
4725         if (payload) {
4726                 ret |= ceph_pagelist_encode_32(pl, payload_len);
4727                 ret |= ceph_pagelist_append(pl, payload, payload_len);
4728         } else {
4729                 ret |= ceph_pagelist_encode_32(pl, 0);
4730         }
4731         if (ret) {
4732                 ceph_pagelist_release(pl);
4733                 return -ENOMEM;
4734         }
4735 
4736         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4737         op->indata_len = pl->length;
4738         return 0;
4739 }
4740 
4741 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4742                          struct ceph_object_id *oid,
4743                          struct ceph_object_locator *oloc,
4744                          u64 notify_id,
4745                          u64 cookie,
4746                          void *payload,
4747                          u32 payload_len)
4748 {
4749         struct ceph_osd_request *req;
4750         int ret;
4751 
4752         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4753         if (!req)
4754                 return -ENOMEM;
4755 
4756         ceph_oid_copy(&req->r_base_oid, oid);
4757         ceph_oloc_copy(&req->r_base_oloc, oloc);
4758         req->r_flags = CEPH_OSD_FLAG_READ;
4759 
4760         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4761                                          payload_len);
4762         if (ret)
4763                 goto out_put_req;
4764 
4765         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4766         if (ret)
4767                 goto out_put_req;
4768 
4769         ceph_osdc_start_request(osdc, req, false);
4770         ret = ceph_osdc_wait_request(osdc, req);
4771 
4772 out_put_req:
4773         ceph_osdc_put_request(req);
4774         return ret;
4775 }
4776 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4777 
4778 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4779                                   u64 cookie, u32 prot_ver, u32 timeout,
4780                                   void *payload, u32 payload_len)
4781 {
4782         struct ceph_osd_req_op *op;
4783         struct ceph_pagelist *pl;
4784         int ret;
4785 
4786         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4787         op->notify.cookie = cookie;
4788 
4789         pl = ceph_pagelist_alloc(GFP_NOIO);
4790         if (!pl)
4791                 return -ENOMEM;
4792 
4793         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4794         ret |= ceph_pagelist_encode_32(pl, timeout);
4795         ret |= ceph_pagelist_encode_32(pl, payload_len);
4796         ret |= ceph_pagelist_append(pl, payload, payload_len);
4797         if (ret) {
4798                 ceph_pagelist_release(pl);
4799                 return -ENOMEM;
4800         }
4801 
4802         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4803         op->indata_len = pl->length;
4804         return 0;
4805 }
4806 
4807 /*
4808  * @timeout: in seconds
4809  *
4810  * @preply_{pages,len} are initialized both on success and error.
4811  * The caller is responsible for:
4812  *
4813  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4814  */
4815 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4816                      struct ceph_object_id *oid,
4817                      struct ceph_object_locator *oloc,
4818                      void *payload,
4819                      u32 payload_len,
4820                      u32 timeout,
4821                      struct page ***preply_pages,
4822                      size_t *preply_len)
4823 {
4824         struct ceph_osd_linger_request *lreq;
4825         struct page **pages;
4826         int ret;
4827 
4828         WARN_ON(!timeout);
4829         if (preply_pages) {
4830                 *preply_pages = NULL;
4831                 *preply_len = 0;
4832         }
4833 
4834         lreq = linger_alloc(osdc);
4835         if (!lreq)
4836                 return -ENOMEM;
4837 
4838         lreq->preply_pages = preply_pages;
4839         lreq->preply_len = preply_len;
4840 
4841         ceph_oid_copy(&lreq->t.base_oid, oid);
4842         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4843         lreq->t.flags = CEPH_OSD_FLAG_READ;
4844 
4845         lreq->reg_req = alloc_linger_request(lreq);
4846         if (!lreq->reg_req) {
4847                 ret = -ENOMEM;
4848                 goto out_put_lreq;
4849         }
4850 
4851         /*
4852          * Pass 0 for cookie because we don't know it yet, it will be
4853          * filled in by linger_submit().
4854          */
4855         ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
4856                                      payload, payload_len);
4857         if (ret)
4858                 goto out_put_lreq;
4859 
4860         /* for notify_id */
4861         pages = ceph_alloc_page_vector(1, GFP_NOIO);
4862         if (IS_ERR(pages)) {
4863                 ret = PTR_ERR(pages);
4864                 goto out_put_lreq;
4865         }
4866         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4867                                                  response_data),
4868                                  pages, PAGE_SIZE, 0, false, true);
4869 
4870         ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
4871         if (ret)
4872                 goto out_put_lreq;
4873 
4874         linger_submit(lreq);
4875         ret = linger_reg_commit_wait(lreq);
4876         if (!ret)
4877                 ret = linger_notify_finish_wait(lreq);
4878         else
4879                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4880 
4881         linger_cancel(lreq);
4882 out_put_lreq:
4883         linger_put(lreq);
4884         return ret;
4885 }
4886 EXPORT_SYMBOL(ceph_osdc_notify);
4887 
4888 /*
4889  * Return the number of milliseconds since the watch was last
4890  * confirmed, or an error.  If there is an error, the watch is no
4891  * longer valid, and should be destroyed with ceph_osdc_unwatch().
4892  */
4893 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4894                           struct ceph_osd_linger_request *lreq)
4895 {
4896         unsigned long stamp, age;
4897         int ret;
4898 
4899         down_read(&osdc->lock);
4900         mutex_lock(&lreq->lock);
4901         stamp = lreq->watch_valid_thru;
4902         if (!list_empty(&lreq->pending_lworks)) {
4903                 struct linger_work *lwork =
4904                     list_first_entry(&lreq->pending_lworks,
4905                                      struct linger_work,
4906                                      pending_item);
4907 
4908                 if (time_before(lwork->queued_stamp, stamp))
4909                         stamp = lwork->queued_stamp;
4910         }
4911         age = jiffies - stamp;
4912         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4913              lreq, lreq->linger_id, age, lreq->last_error);
4914         /* we are truncating to msecs, so return a safe upper bound */
4915         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4916 
4917         mutex_unlock(&lreq->lock);
4918         up_read(&osdc->lock);
4919         return ret;
4920 }
4921 
4922 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
4923 {
4924         u8 struct_v;
4925         u32 struct_len;
4926         int ret;
4927 
4928         ret = ceph_start_decoding(p, end, 2, "watch_item_t",
4929                                   &struct_v, &struct_len);
4930         if (ret)
4931                 goto bad;
4932 
4933         ret = -EINVAL;
4934         ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad);
4935         ceph_decode_64_safe(p, end, item->cookie, bad);
4936         ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */
4937 
4938         if (struct_v >= 2) {
4939                 ret = ceph_decode_entity_addr(p, end, &item->addr);
4940                 if (ret)
4941                         goto bad;
4942         } else {
4943                 ret = 0;
4944         }
4945 
4946         dout("%s %s%llu cookie %llu addr %s\n", __func__,
4947              ENTITY_NAME(item->name), item->cookie,
4948              ceph_pr_addr(&item->addr));
4949 bad:
4950         return ret;
4951 }
4952 
4953 static int decode_watchers(void **p, void *end,
4954                            struct ceph_watch_item **watchers,
4955                            u32 *num_watchers)
4956 {
4957         u8 struct_v;
4958         u32 struct_len;
4959         int i;
4960         int ret;
4961 
4962         ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4963                                   &struct_v, &struct_len);
4964         if (ret)
4965                 return ret;
4966 
4967         *num_watchers = ceph_decode_32(p);
4968         *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4969         if (!*watchers)
4970                 return -ENOMEM;
4971 
4972         for (i = 0; i < *num_watchers; i++) {
4973                 ret = decode_watcher(p, end, *watchers + i);
4974                 if (ret) {
4975                         kfree(*watchers);
4976                         return ret;
4977                 }
4978         }
4979 
4980         return 0;
4981 }
4982 
4983 /*
4984  * On success, the caller is responsible for:
4985  *
4986  *     kfree(watchers);
4987  */
4988 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4989                             struct ceph_object_id *oid,
4990                             struct ceph_object_locator *oloc,
4991                             struct ceph_watch_item **watchers,
4992                             u32 *num_watchers)
4993 {
4994         struct ceph_osd_request *req;
4995         struct page **pages;
4996         int ret;
4997 
4998         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4999         if (!req)
5000                 return -ENOMEM;
5001 
5002         ceph_oid_copy(&req->r_base_oid, oid);
5003         ceph_oloc_copy(&req->r_base_oloc, oloc);
5004         req->r_flags = CEPH_OSD_FLAG_READ;
5005 
5006         pages = ceph_alloc_page_vector(1, GFP_NOIO);
5007         if (IS_ERR(pages)) {
5008                 ret = PTR_ERR(pages);
5009                 goto out_put_req;
5010         }
5011 
5012         osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
5013         ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
5014                                                  response_data),
5015                                  pages, PAGE_SIZE, 0, false, true);
5016 
5017         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5018         if (ret)
5019                 goto out_put_req;
5020 
5021         ceph_osdc_start_request(osdc, req, false);
5022         ret = ceph_osdc_wait_request(osdc, req);
5023         if (ret >= 0) {
5024                 void *p = page_address(pages[0]);
5025                 void *const end = p + req->r_ops[0].outdata_len;
5026 
5027                 ret = decode_watchers(&p, end, watchers, num_watchers);
5028         }
5029 
5030 out_put_req:
5031         ceph_osdc_put_request(req);
5032         return ret;
5033 }
5034 EXPORT_SYMBOL(ceph_osdc_list_watchers);
5035 
5036 /*
5037  * Call all pending notify callbacks - for use after a watch is
5038  * unregistered, to make sure no more callbacks for it will be invoked
5039  */
5040 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
5041 {
5042         dout("%s osdc %p\n", __func__, osdc);
5043         flush_workqueue(osdc->notify_wq);
5044 }
5045 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
5046 
5047 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
5048 {
5049         down_read(&osdc->lock);
5050         maybe_request_map(osdc);
5051         up_read(&osdc->lock);
5052 }
5053 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
5054 
5055 /*
5056  * Execute an OSD class method on an object.
5057  *
5058  * @flags: CEPH_OSD_FLAG_*
5059  * @resp_len: in/out param for reply length
5060  */
5061 int ceph_osdc_call(struct ceph_osd_client *osdc,
5062                    struct ceph_object_id *oid,
5063                    struct ceph_object_locator *oloc,
5064                    const char *class, const char *method,
5065                    unsigned int flags,
5066                    struct page *req_page, size_t req_len,
5067                    struct page **resp_pages, size_t *resp_len)
5068 {
5069         struct ceph_osd_request *req;
5070         int ret;
5071 
5072         if (req_len > PAGE_SIZE)
5073                 return -E2BIG;
5074 
5075         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5076         if (!req)
5077                 return -ENOMEM;
5078 
5079         ceph_oid_copy(&req->r_base_oid, oid);
5080         ceph_oloc_copy(&req->r_base_oloc, oloc);
5081         req->r_flags = flags;
5082 
5083         ret = osd_req_op_cls_init(req, 0, class, method);
5084         if (ret)
5085                 goto out_put_req;
5086 
5087         if (req_page)
5088                 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
5089                                                   0, false, false);
5090         if (resp_pages)
5091                 osd_req_op_cls_response_data_pages(req, 0, resp_pages,
5092                                                    *resp_len, 0, false, false);
5093 
5094         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5095         if (ret)
5096                 goto out_put_req;
5097 
5098         ceph_osdc_start_request(osdc, req, false);
5099         ret = ceph_osdc_wait_request(osdc, req);
5100         if (ret >= 0) {
5101                 ret = req->r_ops[0].rval;
5102                 if (resp_pages)
5103                         *resp_len = req->r_ops[0].outdata_len;
5104         }
5105 
5106 out_put_req:
5107         ceph_osdc_put_request(req);
5108         return ret;
5109 }
5110 EXPORT_SYMBOL(ceph_osdc_call);
5111 
5112 /*
5113  * reset all osd connections
5114  */
5115 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
5116 {
5117         struct rb_node *n;
5118 
5119         down_write(&osdc->lock);
5120         for (n = rb_first(&osdc->osds); n; ) {
5121                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
5122 
5123                 n = rb_next(n);
5124                 if (!reopen_osd(osd))
5125                         kick_osd_requests(osd);
5126         }
5127         up_write(&osdc->lock);
5128 }
5129 
5130 /*
5131  * init, shutdown
5132  */
5133 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
5134 {
5135         int err;
5136 
5137         dout("init\n");
5138         osdc->client = client;
5139         init_rwsem(&osdc->lock);
5140         osdc->osds = RB_ROOT;
5141         INIT_LIST_HEAD(&osdc->osd_lru);
5142         spin_lock_init(&osdc->osd_lru_lock);
5143         osd_init(&osdc->homeless_osd);
5144         osdc->homeless_osd.o_osdc = osdc;
5145         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5146         osdc->last_linger_id = CEPH_LINGER_ID_START;
5147         osdc->linger_requests = RB_ROOT;
5148         osdc->map_checks = RB_ROOT;
5149         osdc->linger_map_checks = RB_ROOT;
5150         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5151         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5152 
5153         err = -ENOMEM;
5154         osdc->osdmap = ceph_osdmap_alloc();
5155         if (!osdc->osdmap)
5156                 goto out;
5157 
5158         osdc->req_mempool = mempool_create_slab_pool(10,
5159                                                      ceph_osd_request_cache);
5160         if (!osdc->req_mempool)
5161                 goto out_map;
5162 
5163         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5164                                 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
5165         if (err < 0)
5166                 goto out_mempool;
5167         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5168                                 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
5169                                 "osd_op_reply");
5170         if (err < 0)
5171                 goto out_msgpool;
5172 
5173         err = -ENOMEM;
5174         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5175         if (!osdc->notify_wq)
5176                 goto out_msgpool_reply;
5177 
5178         osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5179         if (!osdc->completion_wq)
5180                 goto out_notify_wq;
5181 
5182         schedule_delayed_work(&osdc->timeout_work,
5183                               osdc->client->options->osd_keepalive_timeout);
5184         schedule_delayed_work(&osdc->osds_timeout_work,
5185             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5186 
5187         return 0;
5188 
5189 out_notify_wq:
5190         destroy_workqueue(osdc->notify_wq);
5191 out_msgpool_reply:
5192         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5193 out_msgpool:
5194         ceph_msgpool_destroy(&osdc->msgpool_op);
5195 out_mempool:
5196         mempool_destroy(osdc->req_mempool);
5197 out_map:
5198         ceph_osdmap_destroy(osdc->osdmap);
5199 out:
5200         return err;
5201 }
5202 
5203 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5204 {
5205         destroy_workqueue(osdc->completion_wq);
5206         destroy_workqueue(osdc->notify_wq);
5207         cancel_delayed_work_sync(&osdc->timeout_work);
5208         cancel_delayed_work_sync(&osdc->osds_timeout_work);
5209 
5210         down_write(&osdc->lock);
5211         while (!RB_EMPTY_ROOT(&osdc->osds)) {
5212                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5213                                                 struct ceph_osd, o_node);
5214                 close_osd(osd);
5215         }
5216         up_write(&osdc->lock);
5217         WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5218         osd_cleanup(&osdc->homeless_osd);
5219 
5220         WARN_ON(!list_empty(&osdc->osd_lru));
5221         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5222         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5223         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5224         WARN_ON(atomic_read(&osdc->num_requests));
5225         WARN_ON(atomic_read(&osdc->num_homeless));
5226 
5227         ceph_osdmap_destroy(osdc->osdmap);
5228         mempool_destroy(osdc->req_mempool);
5229         ceph_msgpool_destroy(&osdc->msgpool_op);
5230         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5231 }
5232 
5233 /*
5234  * Read some contiguous pages.  If we cross a stripe boundary, shorten
5235  * *plen.  Return number of bytes read, or error.
5236  */
5237 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
5238                         struct ceph_vino vino, struct ceph_file_layout *layout,
5239                         u64 off, u64 *plen,
5240                         u32 truncate_seq, u64 truncate_size,
5241                         struct page **pages, int num_pages, int page_align)
5242 {
5243         struct ceph_osd_request *req;
5244         int rc = 0;
5245 
5246         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
5247              vino.snap, off, *plen);
5248         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
5249                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
5250                                     NULL, truncate_seq, truncate_size,
5251                                     false);
5252         if (IS_ERR(req))
5253                 return PTR_ERR(req);
5254 
5255         /* it may be a short read due to an object boundary */
5256         osd_req_op_extent_osd_data_pages(req, 0,
5257                                 pages, *plen, page_align, false, false);
5258 
5259         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
5260              off, *plen, *plen, page_align);
5261 
5262         rc = ceph_osdc_start_request(osdc, req, false);
5263         if (!rc)
5264                 rc = ceph_osdc_wait_request(osdc, req);
5265 
5266         ceph_osdc_put_request(req);
5267         dout("readpages result %d\n", rc);
5268         return rc;
5269 }
5270 EXPORT_SYMBOL(ceph_osdc_readpages);
5271 
5272 /*
5273  * do a synchronous write on N pages
5274  */
5275 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
5276                          struct ceph_file_layout *layout,
5277                          struct ceph_snap_context *snapc,
5278                          u64 off, u64 len,
5279                          u32 truncate_seq, u64 truncate_size,
5280                          struct timespec64 *mtime,
5281                          struct page **pages, int num_pages)
5282 {
5283         struct ceph_osd_request *req;
5284         int rc = 0;
5285         int page_align = off & ~PAGE_MASK;
5286 
5287         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
5288                                     CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
5289                                     snapc, truncate_seq, truncate_size,
5290                                     true);
5291         if (IS_ERR(req))
5292                 return PTR_ERR(req);
5293 
5294         /* it may be a short write due to an object boundary */
5295         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
5296                                 false, false);
5297         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
5298 
5299         req->r_mtime = *mtime;
5300         rc = ceph_osdc_start_request(osdc, req, true);
5301         if (!rc)
5302                 rc = ceph_osdc_wait_request(osdc, req);
5303 
5304         ceph_osdc_put_request(req);
5305         if (rc == 0)
5306                 rc = len;
5307         dout("writepages result %d\n", rc);
5308         return rc;
5309 }
5310 EXPORT_SYMBOL(ceph_osdc_writepages);
5311 
5312 static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
5313                                      u64 src_snapid, u64 src_version,
5314                                      struct ceph_object_id *src_oid,
5315                                      struct ceph_object_locator *src_oloc,
5316                                      u32 src_fadvise_flags,
5317                                      u32 dst_fadvise_flags,
5318                                      u8 copy_from_flags)
5319 {
5320         struct ceph_osd_req_op *op;
5321         struct page **pages;
5322         void *p, *end;
5323 
5324         pages = ceph_alloc_page_vector(1, GFP_KERNEL);
5325         if (IS_ERR(pages))
5326                 return PTR_ERR(pages);
5327 
5328         op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags);
5329         op->copy_from.snapid = src_snapid;
5330         op->copy_from.src_version = src_version;
5331         op->copy_from.flags = copy_from_flags;
5332         op->copy_from.src_fadvise_flags = src_fadvise_flags;
5333 
5334         p = page_address(pages[0]);
5335         end = p + PAGE_SIZE;
5336         ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
5337         encode_oloc(&p, end, src_oloc);
5338         op->indata_len = PAGE_SIZE - (end - p);
5339 
5340         ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
5341                                  op->indata_len, 0, false, true);
5342         return 0;
5343 }
5344 
5345 int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
5346                         u64 src_snapid, u64 src_version,
5347                         struct ceph_object_id *src_oid,
5348                         struct ceph_object_locator *src_oloc,
5349                         u32 src_fadvise_flags,
5350                         struct ceph_object_id *dst_oid,
5351                         struct ceph_object_locator *dst_oloc,
5352                         u32 dst_fadvise_flags,
5353                         u8 copy_from_flags)
5354 {
5355         struct ceph_osd_request *req;
5356         int ret;
5357 
5358         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
5359         if (!req)
5360                 return -ENOMEM;
5361 
5362         req->r_flags = CEPH_OSD_FLAG_WRITE;
5363 
5364         ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
5365         ceph_oid_copy(&req->r_t.base_oid, dst_oid);
5366 
5367         ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
5368                                         src_oloc, src_fadvise_flags,
5369                                         dst_fadvise_flags, copy_from_flags);
5370         if (ret)
5371                 goto out;
5372 
5373         ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
5374         if (ret)
5375                 goto out;
5376 
5377         ceph_osdc_start_request(osdc, req, false);
5378         ret = ceph_osdc_wait_request(osdc, req);
5379 
5380 out:
5381         ceph_osdc_put_request(req);
5382         return ret;
5383 }
5384 EXPORT_SYMBOL(ceph_osdc_copy_from);
5385 
5386 int __init ceph_osdc_setup(void)
5387 {
5388         size_t size = sizeof(struct ceph_osd_request) +
5389             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5390 
5391         BUG_ON(ceph_osd_request_cache);
5392         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5393                                                    0, 0, NULL);
5394 
5395         return ceph_osd_request_cache ? 0 : -ENOMEM;
5396 }
5397 
5398 void ceph_osdc_cleanup(void)
5399 {
5400         BUG_ON(!ceph_osd_request_cache);
5401         kmem_cache_destroy(ceph_osd_request_cache);
5402         ceph_osd_request_cache = NULL;
5403 }
5404 
5405 /*
5406  * handle incoming message
5407  */
5408 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5409 {
5410         struct ceph_osd *osd = con->private;
5411         struct ceph_osd_client *osdc = osd->o_osdc;
5412         int type = le16_to_cpu(msg->hdr.type);
5413 
5414         switch (type) {
5415         case CEPH_MSG_OSD_MAP:
5416                 ceph_osdc_handle_map(osdc, msg);
5417                 break;
5418         case CEPH_MSG_OSD_OPREPLY:
5419                 handle_reply(osd, msg);
5420                 break;
5421         case CEPH_MSG_OSD_BACKOFF:
5422                 handle_backoff(osd, msg);
5423                 break;
5424         case CEPH_MSG_WATCH_NOTIFY:
5425                 handle_watch_notify(osdc, msg);
5426                 break;
5427 
5428         default:
5429                 pr_err("received unknown message type %d %s\n", type,
5430                        ceph_msg_type_name(type));
5431         }
5432 
5433         ceph_msg_put(msg);
5434 }
5435 
5436 /*
5437  * Lookup and return message for incoming reply.  Don't try to do
5438  * anything about a larger than preallocated data portion of the
5439  * message at the moment - for now, just skip the message.
5440  */
5441 static struct ceph_msg *get_reply(struct ceph_connection *con,
5442                                   struct ceph_msg_header *hdr,
5443                                   int *skip)
5444 {
5445         struct ceph_osd *osd = con->private;
5446         struct ceph_osd_client *osdc = osd->o_osdc;
5447         struct ceph_msg *m = NULL;
5448         struct ceph_osd_request *req;
5449         int front_len = le32_to_cpu(hdr->front_len);
5450         int data_len = le32_to_cpu(hdr->data_len);
5451         u64 tid = le64_to_cpu(hdr->tid);
5452 
5453         down_read(&osdc->lock);
5454         if (!osd_registered(osd)) {
5455                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5456                 *skip = 1;
5457                 goto out_unlock_osdc;
5458         }
5459         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5460 
5461         mutex_lock(&osd->lock);
5462         req = lookup_request(&osd->o_requests, tid);
5463         if (!req) {
5464                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5465                      osd->o_osd, tid);
5466                 *skip = 1;
5467                 goto out_unlock_session;
5468         }
5469 
5470         ceph_msg_revoke_incoming(req->r_reply);
5471 
5472         if (front_len > req->r_reply->front_alloc_len) {
5473                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5474                         __func__, osd->o_osd, req->r_tid, front_len,
5475                         req->r_reply->front_alloc_len);
5476                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5477                                  false);
5478                 if (!m)
5479                         goto out_unlock_session;
5480                 ceph_msg_put(req->r_reply);
5481                 req->r_reply = m;
5482         }
5483 
5484         if (data_len > req->r_reply->data_length) {
5485                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5486                         __func__, osd->o_osd, req->r_tid, data_len,
5487                         req->r_reply->data_length);
5488                 m = NULL;
5489                 *skip = 1;
5490                 goto out_unlock_session;
5491         }
5492 
5493         m = ceph_msg_get(req->r_reply);
5494         dout("get_reply tid %lld %p\n", tid, m);
5495 
5496 out_unlock_session:
5497         mutex_unlock(&osd->lock);
5498 out_unlock_osdc:
5499         up_read(&osdc->lock);
5500         return m;
5501 }
5502 
5503 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5504 {
5505         struct ceph_msg *m;
5506         int type = le16_to_cpu(hdr->type);
5507         u32 front_len = le32_to_cpu(hdr->front_len);
5508         u32 data_len = le32_to_cpu(hdr->data_len);
5509 
5510         m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
5511         if (!m)
5512                 return NULL;
5513 
5514         if (data_len) {
5515                 struct page **pages;
5516 
5517                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5518                                                GFP_NOIO);
5519                 if (IS_ERR(pages)) {
5520                         ceph_msg_put(m);
5521                         return NULL;
5522                 }
5523 
5524                 ceph_msg_data_add_pages(m, pages, data_len, 0, true);
5525         }
5526 
5527         return m;
5528 }
5529 
5530 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5531                                   struct ceph_msg_header *hdr,
5532                                   int *skip)
5533 {
5534         struct ceph_osd *osd = con->private;
5535         int type = le16_to_cpu(hdr->type);
5536 
5537         *skip = 0;
5538         switch (type) {
5539         case CEPH_MSG_OSD_MAP:
5540         case CEPH_MSG_OSD_BACKOFF:
5541         case CEPH_MSG_WATCH_NOTIFY:
5542                 return alloc_msg_with_page_vector(hdr);
5543         case CEPH_MSG_OSD_OPREPLY:
5544                 return get_reply(con, hdr, skip);
5545         default:
5546                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5547                         osd->o_osd, type);
5548                 *skip = 1;
5549                 return NULL;
5550         }
5551 }
5552 
5553 /*
5554  * Wrappers to refcount containing ceph_osd struct
5555  */
5556 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5557 {
5558         struct ceph_osd *osd = con->private;
5559         if (get_osd(osd))
5560                 return con;
5561         return NULL;
5562 }
5563 
5564 static void put_osd_con(struct ceph_connection *con)
5565 {
5566         struct ceph_osd *osd = con->private;
5567         put_osd(osd);
5568 }
5569 
5570 /*
5571  * authentication
5572  */
5573 /*
5574  * Note: returned pointer is the address of a structure that's
5575  * managed separately.  Caller must *not* attempt to free it.
5576  */
5577 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5578                                         int *proto, int force_new)
5579 {
5580         struct ceph_osd *o = con->private;
5581         struct ceph_osd_client *osdc = o->o_osdc;
5582         struct ceph_auth_client *ac = osdc->client->monc.auth;
5583         struct ceph_auth_handshake *auth = &o->o_auth;
5584 
5585         if (force_new && auth->authorizer) {
5586                 ceph_auth_destroy_authorizer(auth->authorizer);
5587                 auth->authorizer = NULL;
5588         }
5589         if (!auth->authorizer) {
5590                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5591                                                       auth);
5592                 if (ret)
5593                         return ERR_PTR(ret);
5594         } else {
5595                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5596                                                      auth);
5597                 if (ret)
5598                         return ERR_PTR(ret);
5599         }
5600         *proto = ac->protocol;
5601 
5602         return auth;
5603 }
5604 
5605 static int add_authorizer_challenge(struct ceph_connection *con,
5606                                     void *challenge_buf, int challenge_buf_len)
5607 {
5608         struct ceph_osd *o = con->private;
5609         struct ceph_osd_client *osdc = o->o_osdc;
5610         struct ceph_auth_client *ac = osdc->client->monc.auth;
5611 
5612         return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5613                                             challenge_buf, challenge_buf_len);
5614 }
5615 
5616 static int verify_authorizer_reply(struct ceph_connection *con)
5617 {
5618         struct ceph_osd *o = con->private;
5619         struct ceph_osd_client *osdc = o->o_osdc;
5620         struct ceph_auth_client *ac = osdc->client->monc.auth;
5621 
5622         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5623 }
5624 
5625 static int invalidate_authorizer(struct ceph_connection *con)
5626 {
5627         struct ceph_osd *o = con->private;
5628         struct ceph_osd_client *osdc = o->o_osdc;
5629         struct ceph_auth_client *ac = osdc->client->monc.auth;
5630 
5631         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5632         return ceph_monc_validate_auth(&osdc->client->monc);
5633 }
5634 
5635 static void osd_reencode_message(struct ceph_msg *msg)
5636 {
5637         int type = le16_to_cpu(msg->hdr.type);
5638 
5639         if (type == CEPH_MSG_OSD_OP)
5640                 encode_request_finish(msg);
5641 }
5642 
5643 static int osd_sign_message(struct ceph_msg *msg)
5644 {
5645         struct ceph_osd *o = msg->con->private;
5646         struct ceph_auth_handshake *auth = &o->o_auth;
5647 
5648         return ceph_auth_sign_message(auth, msg);
5649 }
5650 
5651 static int osd_check_message_signature(struct ceph_msg *msg)
5652 {
5653         struct ceph_osd *o = msg->con->private;
5654         struct ceph_auth_handshake *auth = &o->o_auth;
5655 
5656         return ceph_auth_check_message_signature(auth, msg);
5657 }
5658 
5659 static const struct ceph_connection_operations osd_con_ops = {
5660         .get = get_osd_con,
5661         .put = put_osd_con,
5662         .dispatch = dispatch,
5663         .get_authorizer = get_authorizer,
5664         .add_authorizer_challenge = add_authorizer_challenge,
5665         .verify_authorizer_reply = verify_authorizer_reply,
5666         .invalidate_authorizer = invalidate_authorizer,
5667         .alloc_msg = alloc_msg,
5668         .reencode_message = osd_reencode_message,
5669         .sign_message = osd_sign_message,
5670         .check_message_signature = osd_check_message_signature,
5671         .fault = osd_fault,
5672 };

/* [<][>][^][v][top][bottom][index][help] */