Lines Matching refs:req

515 static void put_request_session(struct ceph_mds_request *req)  in put_request_session()  argument
517 if (req->r_session) { in put_request_session()
518 ceph_put_mds_session(req->r_session); in put_request_session()
519 req->r_session = NULL; in put_request_session()
525 struct ceph_mds_request *req = container_of(kref, in ceph_mdsc_release_request() local
528 destroy_reply_info(&req->r_reply_info); in ceph_mdsc_release_request()
529 if (req->r_request) in ceph_mdsc_release_request()
530 ceph_msg_put(req->r_request); in ceph_mdsc_release_request()
531 if (req->r_reply) in ceph_mdsc_release_request()
532 ceph_msg_put(req->r_reply); in ceph_mdsc_release_request()
533 if (req->r_inode) { in ceph_mdsc_release_request()
534 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); in ceph_mdsc_release_request()
535 iput(req->r_inode); in ceph_mdsc_release_request()
537 if (req->r_locked_dir) in ceph_mdsc_release_request()
538 ceph_put_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); in ceph_mdsc_release_request()
539 iput(req->r_target_inode); in ceph_mdsc_release_request()
540 if (req->r_dentry) in ceph_mdsc_release_request()
541 dput(req->r_dentry); in ceph_mdsc_release_request()
542 if (req->r_old_dentry) in ceph_mdsc_release_request()
543 dput(req->r_old_dentry); in ceph_mdsc_release_request()
544 if (req->r_old_dentry_dir) { in ceph_mdsc_release_request()
551 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir), in ceph_mdsc_release_request()
553 iput(req->r_old_dentry_dir); in ceph_mdsc_release_request()
555 kfree(req->r_path1); in ceph_mdsc_release_request()
556 kfree(req->r_path2); in ceph_mdsc_release_request()
557 if (req->r_pagelist) in ceph_mdsc_release_request()
558 ceph_pagelist_release(req->r_pagelist); in ceph_mdsc_release_request()
559 put_request_session(req); in ceph_mdsc_release_request()
560 ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation); in ceph_mdsc_release_request()
561 kfree(req); in ceph_mdsc_release_request()
572 struct ceph_mds_request *req; in __lookup_request() local
576 req = rb_entry(n, struct ceph_mds_request, r_node); in __lookup_request()
577 if (tid < req->r_tid) in __lookup_request()
579 else if (tid > req->r_tid) in __lookup_request()
582 ceph_mdsc_get_request(req); in __lookup_request()
583 return req; in __lookup_request()
594 struct ceph_mds_request *req = NULL; in __insert_request() local
598 req = rb_entry(parent, struct ceph_mds_request, r_node); in __insert_request()
599 if (new->r_tid < req->r_tid) in __insert_request()
601 else if (new->r_tid > req->r_tid) in __insert_request()
618 struct ceph_mds_request *req, in __register_request() argument
621 req->r_tid = ++mdsc->last_tid; in __register_request()
622 if (req->r_num_caps) in __register_request()
623 ceph_reserve_caps(mdsc, &req->r_caps_reservation, in __register_request()
624 req->r_num_caps); in __register_request()
625 dout("__register_request %p tid %lld\n", req, req->r_tid); in __register_request()
626 ceph_mdsc_get_request(req); in __register_request()
627 __insert_request(mdsc, req); in __register_request()
629 req->r_uid = current_fsuid(); in __register_request()
630 req->r_gid = current_fsgid(); in __register_request()
637 req->r_unsafe_dir = dir; in __register_request()
638 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops); in __register_request()
644 struct ceph_mds_request *req) in __unregister_request() argument
646 dout("__unregister_request %p tid %lld\n", req, req->r_tid); in __unregister_request()
647 rb_erase(&req->r_node, &mdsc->request_tree); in __unregister_request()
648 RB_CLEAR_NODE(&req->r_node); in __unregister_request()
650 if (req->r_unsafe_dir) { in __unregister_request()
651 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir); in __unregister_request()
654 list_del_init(&req->r_unsafe_dir_item); in __unregister_request()
657 iput(req->r_unsafe_dir); in __unregister_request()
658 req->r_unsafe_dir = NULL; in __unregister_request()
661 complete_all(&req->r_safe_completion); in __unregister_request()
663 ceph_mdsc_put_request(req); in __unregister_request()
688 struct ceph_mds_request *req) in __choose_mds() argument
693 int mode = req->r_direct_mode; in __choose_mds()
695 u32 hash = req->r_direct_hash; in __choose_mds()
696 bool is_hash = req->r_direct_is_hash; in __choose_mds()
702 if (req->r_resend_mds >= 0 && in __choose_mds()
703 (__have_session(mdsc, req->r_resend_mds) || in __choose_mds()
704 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { in __choose_mds()
706 req->r_resend_mds); in __choose_mds()
707 return req->r_resend_mds; in __choose_mds()
714 if (req->r_inode) { in __choose_mds()
715 inode = req->r_inode; in __choose_mds()
716 } else if (req->r_dentry) { in __choose_mds()
718 struct dentry *parent = req->r_dentry->d_parent; in __choose_mds()
723 inode = d_inode(req->r_dentry); in __choose_mds()
732 inode = d_inode(req->r_dentry); in __choose_mds()
736 hash = ceph_dentry_hash(dir, req->r_dentry); in __choose_mds()
1027 struct ceph_mds_request *req; in cleanup_session_requests() local
1033 req = list_first_entry(&session->s_unsafe, in cleanup_session_requests()
1035 list_del_init(&req->r_unsafe_item); in cleanup_session_requests()
1036 pr_info(" dropping unsafe request %llu\n", req->r_tid); in cleanup_session_requests()
1037 __unregister_request(mdsc, req); in cleanup_session_requests()
1042 req = rb_entry(p, struct ceph_mds_request, r_node); in cleanup_session_requests()
1044 if (req->r_session && in cleanup_session_requests()
1045 req->r_session->s_mds == session->s_mds) in cleanup_session_requests()
1046 req->r_attempts = 0; in cleanup_session_requests()
1620 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req, in ceph_alloc_readdir_reply_buffer() argument
1624 struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info; in ceph_alloc_readdir_reply_buffer()
1625 struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options; in ceph_alloc_readdir_reply_buffer()
1651 req->r_num_caps = num_entries + 1; in ceph_alloc_readdir_reply_buffer()
1652 req->r_args.readdir.max_entries = cpu_to_le32(num_entries); in ceph_alloc_readdir_reply_buffer()
1653 req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes); in ceph_alloc_readdir_reply_buffer()
1663 struct ceph_mds_request *req = kzalloc(sizeof(*req), GFP_NOFS); in ceph_mdsc_create_request() local
1665 if (!req) in ceph_mdsc_create_request()
1668 mutex_init(&req->r_fill_mutex); in ceph_mdsc_create_request()
1669 req->r_mdsc = mdsc; in ceph_mdsc_create_request()
1670 req->r_started = jiffies; in ceph_mdsc_create_request()
1671 req->r_resend_mds = -1; in ceph_mdsc_create_request()
1672 INIT_LIST_HEAD(&req->r_unsafe_dir_item); in ceph_mdsc_create_request()
1673 req->r_fmode = -1; in ceph_mdsc_create_request()
1674 kref_init(&req->r_kref); in ceph_mdsc_create_request()
1675 INIT_LIST_HEAD(&req->r_wait); in ceph_mdsc_create_request()
1676 init_completion(&req->r_completion); in ceph_mdsc_create_request()
1677 init_completion(&req->r_safe_completion); in ceph_mdsc_create_request()
1678 INIT_LIST_HEAD(&req->r_unsafe_item); in ceph_mdsc_create_request()
1680 req->r_stamp = CURRENT_TIME; in ceph_mdsc_create_request()
1682 req->r_op = op; in ceph_mdsc_create_request()
1683 req->r_direct_mode = mode; in ceph_mdsc_create_request()
1684 return req; in ceph_mdsc_create_request()
1702 struct ceph_mds_request *req = __get_oldest_req(mdsc); in __get_oldest_tid() local
1704 if (req) in __get_oldest_tid()
1705 return req->r_tid; in __get_oldest_tid()
1875 struct ceph_mds_request *req, in create_request_message() argument
1890 ret = set_request_path_attr(req->r_inode, req->r_dentry, in create_request_message()
1891 req->r_path1, req->r_ino1.ino, in create_request_message()
1898 ret = set_request_path_attr(NULL, req->r_old_dentry, in create_request_message()
1899 req->r_path2, req->r_ino2.ino, in create_request_message()
1912 (!!req->r_inode_drop + !!req->r_dentry_drop + in create_request_message()
1913 !!req->r_old_inode_drop + !!req->r_old_dentry_drop); in create_request_message()
1914 if (req->r_dentry_drop) in create_request_message()
1915 len += req->r_dentry->d_name.len; in create_request_message()
1916 if (req->r_old_dentry_drop) in create_request_message()
1917 len += req->r_old_dentry->d_name.len; in create_request_message()
1926 msg->hdr.tid = cpu_to_le64(req->r_tid); in create_request_message()
1933 head->op = cpu_to_le32(req->r_op); in create_request_message()
1934 head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid)); in create_request_message()
1935 head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid)); in create_request_message()
1936 head->args = req->r_args; in create_request_message()
1942 req->r_request_release_offset = p - msg->front.iov_base; in create_request_message()
1946 if (req->r_inode_drop) in create_request_message()
1948 req->r_inode ? req->r_inode : d_inode(req->r_dentry), in create_request_message()
1949 mds, req->r_inode_drop, req->r_inode_unless, 0); in create_request_message()
1950 if (req->r_dentry_drop) in create_request_message()
1951 releases += ceph_encode_dentry_release(&p, req->r_dentry, in create_request_message()
1952 mds, req->r_dentry_drop, req->r_dentry_unless); in create_request_message()
1953 if (req->r_old_dentry_drop) in create_request_message()
1954 releases += ceph_encode_dentry_release(&p, req->r_old_dentry, in create_request_message()
1955 mds, req->r_old_dentry_drop, req->r_old_dentry_unless); in create_request_message()
1956 if (req->r_old_inode_drop) in create_request_message()
1958 d_inode(req->r_old_dentry), in create_request_message()
1959 mds, req->r_old_inode_drop, req->r_old_inode_unless, 0); in create_request_message()
1963 p = msg->front.iov_base + req->r_request_release_offset; in create_request_message()
1971 ceph_encode_timespec(&ts, &req->r_stamp); in create_request_message()
1979 if (req->r_pagelist) { in create_request_message()
1980 struct ceph_pagelist *pagelist = req->r_pagelist; in create_request_message()
2005 struct ceph_mds_request *req) in complete_request() argument
2007 if (req->r_callback) in complete_request()
2008 req->r_callback(mdsc, req); in complete_request()
2010 complete_all(&req->r_completion); in complete_request()
2017 struct ceph_mds_request *req, in __prepare_send_request() argument
2024 req->r_attempts++; in __prepare_send_request()
2025 if (req->r_inode) { in __prepare_send_request()
2027 ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds); in __prepare_send_request()
2030 req->r_sent_on_mseq = cap->mseq; in __prepare_send_request()
2032 req->r_sent_on_mseq = -1; in __prepare_send_request()
2034 dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req, in __prepare_send_request()
2035 req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts); in __prepare_send_request()
2037 if (req->r_got_unsafe) { in __prepare_send_request()
2045 msg = req->r_request; in __prepare_send_request()
2052 if (req->r_target_inode) in __prepare_send_request()
2053 rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode)); in __prepare_send_request()
2055 rhead->num_retry = req->r_attempts - 1; in __prepare_send_request()
2061 p = msg->front.iov_base + req->r_request_release_offset; in __prepare_send_request()
2064 ceph_encode_timespec(&ts, &req->r_stamp); in __prepare_send_request()
2073 if (req->r_request) { in __prepare_send_request()
2074 ceph_msg_put(req->r_request); in __prepare_send_request()
2075 req->r_request = NULL; in __prepare_send_request()
2077 msg = create_request_message(mdsc, req, mds, drop_cap_releases); in __prepare_send_request()
2079 req->r_err = PTR_ERR(msg); in __prepare_send_request()
2080 complete_request(mdsc, req); in __prepare_send_request()
2083 req->r_request = msg; in __prepare_send_request()
2087 if (req->r_got_unsafe) in __prepare_send_request()
2089 if (req->r_locked_dir) in __prepare_send_request()
2092 rhead->num_fwd = req->r_num_fwd; in __prepare_send_request()
2093 rhead->num_retry = req->r_attempts - 1; in __prepare_send_request()
2096 dout(" r_locked_dir = %p\n", req->r_locked_dir); in __prepare_send_request()
2104 struct ceph_mds_request *req) in __do_request() argument
2110 if (req->r_err || req->r_got_result) { in __do_request()
2111 if (req->r_aborted) in __do_request()
2112 __unregister_request(mdsc, req); in __do_request()
2116 if (req->r_timeout && in __do_request()
2117 time_after_eq(jiffies, req->r_started + req->r_timeout)) { in __do_request()
2123 put_request_session(req); in __do_request()
2125 mds = __choose_mds(mdsc, req); in __do_request()
2129 list_add(&req->r_wait, &mdsc->waiting_for_map); in __do_request()
2142 req->r_session = get_session(session); in __do_request()
2151 list_add(&req->r_wait, &session->s_waiting); in __do_request()
2156 req->r_resend_mds = -1; /* forget any previous mds hint */ in __do_request()
2158 if (req->r_request_started == 0) /* note request start time */ in __do_request()
2159 req->r_request_started = jiffies; in __do_request()
2161 err = __prepare_send_request(mdsc, req, mds, false); in __do_request()
2163 ceph_msg_get(req->r_request); in __do_request()
2164 ceph_con_send(&session->s_con, req->r_request); in __do_request()
2173 req->r_err = err; in __do_request()
2174 complete_request(mdsc, req); in __do_request()
2184 struct ceph_mds_request *req; in __wake_requests() local
2190 req = list_entry(tmp_list.next, in __wake_requests()
2192 list_del_init(&req->r_wait); in __wake_requests()
2193 dout(" wake request %p tid %llu\n", req, req->r_tid); in __wake_requests()
2194 __do_request(mdsc, req); in __wake_requests()
2204 struct ceph_mds_request *req; in kick_requests() local
2209 req = rb_entry(p, struct ceph_mds_request, r_node); in kick_requests()
2211 if (req->r_got_unsafe) in kick_requests()
2213 if (req->r_attempts > 0) in kick_requests()
2215 if (req->r_session && in kick_requests()
2216 req->r_session->s_mds == mds) { in kick_requests()
2217 dout(" kicking tid %llu\n", req->r_tid); in kick_requests()
2218 list_del_init(&req->r_wait); in kick_requests()
2219 __do_request(mdsc, req); in kick_requests()
2225 struct ceph_mds_request *req) in ceph_mdsc_submit_request() argument
2227 dout("submit_request on %p\n", req); in ceph_mdsc_submit_request()
2229 __register_request(mdsc, req, NULL); in ceph_mdsc_submit_request()
2230 __do_request(mdsc, req); in ceph_mdsc_submit_request()
2240 struct ceph_mds_request *req) in ceph_mdsc_do_request() argument
2244 dout("do_request on %p\n", req); in ceph_mdsc_do_request()
2247 if (req->r_inode) in ceph_mdsc_do_request()
2248 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN); in ceph_mdsc_do_request()
2249 if (req->r_locked_dir) in ceph_mdsc_do_request()
2250 ceph_get_cap_refs(ceph_inode(req->r_locked_dir), CEPH_CAP_PIN); in ceph_mdsc_do_request()
2251 if (req->r_old_dentry_dir) in ceph_mdsc_do_request()
2252 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir), in ceph_mdsc_do_request()
2257 __register_request(mdsc, req, dir); in ceph_mdsc_do_request()
2258 __do_request(mdsc, req); in ceph_mdsc_do_request()
2260 if (req->r_err) { in ceph_mdsc_do_request()
2261 err = req->r_err; in ceph_mdsc_do_request()
2262 __unregister_request(mdsc, req); in ceph_mdsc_do_request()
2270 if (req->r_timeout) { in ceph_mdsc_do_request()
2272 &req->r_completion, req->r_timeout); in ceph_mdsc_do_request()
2275 } else if (req->r_wait_for_completion) { in ceph_mdsc_do_request()
2276 err = req->r_wait_for_completion(mdsc, req); in ceph_mdsc_do_request()
2278 err = wait_for_completion_killable(&req->r_completion); in ceph_mdsc_do_request()
2284 if (req->r_got_result) { in ceph_mdsc_do_request()
2285 err = le32_to_cpu(req->r_reply_info.head->result); in ceph_mdsc_do_request()
2287 dout("aborted request %lld with %d\n", req->r_tid, err); in ceph_mdsc_do_request()
2294 mutex_lock(&req->r_fill_mutex); in ceph_mdsc_do_request()
2295 req->r_err = err; in ceph_mdsc_do_request()
2296 req->r_aborted = true; in ceph_mdsc_do_request()
2297 mutex_unlock(&req->r_fill_mutex); in ceph_mdsc_do_request()
2299 if (req->r_locked_dir && in ceph_mdsc_do_request()
2300 (req->r_op & CEPH_MDS_OP_WRITE)) in ceph_mdsc_do_request()
2301 ceph_invalidate_dir_request(req); in ceph_mdsc_do_request()
2303 err = req->r_err; in ceph_mdsc_do_request()
2308 dout("do_request %p done, result %d\n", req, err); in ceph_mdsc_do_request()
2316 void ceph_invalidate_dir_request(struct ceph_mds_request *req) in ceph_invalidate_dir_request() argument
2318 struct inode *inode = req->r_locked_dir; in ceph_invalidate_dir_request()
2323 if (req->r_dentry) in ceph_invalidate_dir_request()
2324 ceph_invalidate_dentry_lease(req->r_dentry); in ceph_invalidate_dir_request()
2325 if (req->r_old_dentry) in ceph_invalidate_dir_request()
2326 ceph_invalidate_dentry_lease(req->r_old_dentry); in ceph_invalidate_dir_request()
2339 struct ceph_mds_request *req; in handle_reply() local
2356 req = __lookup_request(mdsc, tid); in handle_reply()
2357 if (!req) { in handle_reply()
2362 dout("handle_reply %p\n", req); in handle_reply()
2365 if (req->r_session != session) { in handle_reply()
2368 req->r_session ? req->r_session->s_mds : -1); in handle_reply()
2374 if ((req->r_got_unsafe && !head->safe) || in handle_reply()
2375 (req->r_got_safe && head->safe)) { in handle_reply()
2381 if (req->r_got_safe && !head->safe) { in handle_reply()
2398 dout("got ESTALE on request %llu", req->r_tid); in handle_reply()
2399 req->r_resend_mds = -1; in handle_reply()
2400 if (req->r_direct_mode != USE_AUTH_MDS) { in handle_reply()
2402 req->r_direct_mode = USE_AUTH_MDS; in handle_reply()
2403 __do_request(mdsc, req); in handle_reply()
2407 int mds = __choose_mds(mdsc, req); in handle_reply()
2408 if (mds >= 0 && mds != req->r_session->s_mds) { in handle_reply()
2410 __do_request(mdsc, req); in handle_reply()
2415 dout("have to return ESTALE on request %llu", req->r_tid); in handle_reply()
2420 req->r_got_safe = true; in handle_reply()
2421 __unregister_request(mdsc, req); in handle_reply()
2423 if (req->r_got_unsafe) { in handle_reply()
2432 list_del_init(&req->r_unsafe_item); in handle_reply()
2441 req->r_got_unsafe = true; in handle_reply()
2442 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe); in handle_reply()
2446 rinfo = &req->r_reply_info; in handle_reply()
2471 mutex_lock(&req->r_fill_mutex); in handle_reply()
2472 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); in handle_reply()
2474 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR || in handle_reply()
2475 req->r_op == CEPH_MDS_OP_LSSNAP)) in handle_reply()
2476 ceph_readdir_prepopulate(req, req->r_session); in handle_reply()
2477 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); in handle_reply()
2479 mutex_unlock(&req->r_fill_mutex); in handle_reply()
2486 if (!req->r_aborted) { in handle_reply()
2488 req->r_err = err; in handle_reply()
2490 req->r_reply = msg; in handle_reply()
2492 req->r_got_result = true; in handle_reply()
2499 ceph_add_cap_releases(mdsc, req->r_session); in handle_reply()
2503 complete_request(mdsc, req); in handle_reply()
2505 ceph_mdsc_put_request(req); in handle_reply()
2518 struct ceph_mds_request *req; in handle_forward() local
2531 req = __lookup_request(mdsc, tid); in handle_forward()
2532 if (!req) { in handle_forward()
2537 if (req->r_aborted) { in handle_forward()
2539 __unregister_request(mdsc, req); in handle_forward()
2540 } else if (fwd_seq <= req->r_num_fwd) { in handle_forward()
2542 tid, next_mds, req->r_num_fwd, fwd_seq); in handle_forward()
2546 BUG_ON(req->r_err); in handle_forward()
2547 BUG_ON(req->r_got_result); in handle_forward()
2548 req->r_attempts = 0; in handle_forward()
2549 req->r_num_fwd = fwd_seq; in handle_forward()
2550 req->r_resend_mds = next_mds; in handle_forward()
2551 put_request_session(req); in handle_forward()
2552 __do_request(mdsc, req); in handle_forward()
2554 ceph_mdsc_put_request(req); in handle_forward()
2680 struct ceph_mds_request *req, *nreq; in replay_unsafe_requests() local
2687 list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item) { in replay_unsafe_requests()
2688 err = __prepare_send_request(mdsc, req, session->s_mds, true); in replay_unsafe_requests()
2690 ceph_msg_get(req->r_request); in replay_unsafe_requests()
2691 ceph_con_send(&session->s_con, req->r_request); in replay_unsafe_requests()
2701 req = rb_entry(p, struct ceph_mds_request, r_node); in replay_unsafe_requests()
2703 if (req->r_got_unsafe) in replay_unsafe_requests()
2705 if (req->r_attempts == 0) in replay_unsafe_requests()
2707 if (req->r_session && in replay_unsafe_requests()
2708 req->r_session->s_mds == session->s_mds) { in replay_unsafe_requests()
2709 err = __prepare_send_request(mdsc, req, in replay_unsafe_requests()
2712 ceph_msg_get(req->r_request); in replay_unsafe_requests()
2713 ceph_con_send(&session->s_con, req->r_request); in replay_unsafe_requests()
3426 struct ceph_mds_request *req; in wait_requests() local
3439 while ((req = __get_oldest_req(mdsc))) { in wait_requests()
3441 req->r_tid); in wait_requests()
3442 __unregister_request(mdsc, req); in wait_requests()
3474 struct ceph_mds_request *req = NULL, *nextreq; in wait_unsafe_requests() local
3480 req = __get_oldest_req(mdsc); in wait_unsafe_requests()
3481 while (req && req->r_tid <= want_tid) { in wait_unsafe_requests()
3483 n = rb_next(&req->r_node); in wait_unsafe_requests()
3488 if ((req->r_op & CEPH_MDS_OP_WRITE)) { in wait_unsafe_requests()
3490 ceph_mdsc_get_request(req); in wait_unsafe_requests()
3495 req->r_tid, want_tid); in wait_unsafe_requests()
3496 wait_for_completion(&req->r_safe_completion); in wait_unsafe_requests()
3498 ceph_mdsc_put_request(req); in wait_unsafe_requests()
3508 req = nextreq; in wait_unsafe_requests()