Lines Matching refs:mdsc
53 static void __wake_requests(struct ceph_mds_client *mdsc,
390 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc, in __ceph_lookup_mds_session() argument
395 if (mds >= mdsc->max_sessions || mdsc->sessions[mds] == NULL) in __ceph_lookup_mds_session()
397 session = mdsc->sessions[mds]; in __ceph_lookup_mds_session()
404 static bool __have_session(struct ceph_mds_client *mdsc, int mds) in __have_session() argument
406 if (mds >= mdsc->max_sessions) in __have_session()
408 return mdsc->sessions[mds]; in __have_session()
411 static int __verify_registered_session(struct ceph_mds_client *mdsc, in __verify_registered_session() argument
414 if (s->s_mds >= mdsc->max_sessions || in __verify_registered_session()
415 mdsc->sessions[s->s_mds] != s) in __verify_registered_session()
424 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc, in register_session() argument
429 if (mds >= mdsc->mdsmap->m_max_mds) in register_session()
435 s->s_mdsc = mdsc; in register_session()
442 ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr); in register_session()
466 if (mds >= mdsc->max_sessions) { in register_session()
474 if (mdsc->sessions) { in register_session()
475 memcpy(sa, mdsc->sessions, in register_session()
476 mdsc->max_sessions * sizeof(void *)); in register_session()
477 kfree(mdsc->sessions); in register_session()
479 mdsc->sessions = sa; in register_session()
480 mdsc->max_sessions = newmax; in register_session()
482 mdsc->sessions[mds] = s; in register_session()
483 atomic_inc(&mdsc->num_sessions); in register_session()
487 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); in register_session()
499 static void __unregister_session(struct ceph_mds_client *mdsc, in __unregister_session() argument
503 BUG_ON(mdsc->sessions[s->s_mds] != s); in __unregister_session()
504 mdsc->sessions[s->s_mds] = NULL; in __unregister_session()
507 atomic_dec(&mdsc->num_sessions); in __unregister_session()
569 static struct ceph_mds_request *__lookup_request(struct ceph_mds_client *mdsc, in __lookup_request() argument
573 struct rb_node *n = mdsc->request_tree.rb_node; in __lookup_request()
589 static void __insert_request(struct ceph_mds_client *mdsc, in __insert_request() argument
592 struct rb_node **p = &mdsc->request_tree.rb_node; in __insert_request()
608 rb_insert_color(&new->r_node, &mdsc->request_tree); in __insert_request()
617 static void __register_request(struct ceph_mds_client *mdsc, in __register_request() argument
621 req->r_tid = ++mdsc->last_tid; in __register_request()
623 ceph_reserve_caps(mdsc, &req->r_caps_reservation, in __register_request()
627 __insert_request(mdsc, req); in __register_request()
643 static void __unregister_request(struct ceph_mds_client *mdsc, in __unregister_request() argument
647 rb_erase(&req->r_node, &mdsc->request_tree); in __unregister_request()
687 static int __choose_mds(struct ceph_mds_client *mdsc, in __choose_mds() argument
703 (__have_session(mdsc, req->r_resend_mds) || in __choose_mds()
704 ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) { in __choose_mds()
721 if (dir->i_sb != mdsc->fsc->sb) { in __choose_mds()
766 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= in __choose_mds()
781 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >= in __choose_mds()
806 mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap); in __choose_mds()
837 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq) in create_session_open_msg() argument
844 struct ceph_options *opt = mdsc->fsc->client->options; in create_session_open_msg()
909 static int __open_session(struct ceph_mds_client *mdsc, in __open_session() argument
917 mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds); in __open_session()
924 msg = create_session_open_msg(mdsc, session->s_seq); in __open_session()
937 __open_export_target_session(struct ceph_mds_client *mdsc, int target) in __open_export_target_session() argument
941 session = __ceph_lookup_mds_session(mdsc, target); in __open_export_target_session()
943 session = register_session(mdsc, target); in __open_export_target_session()
949 __open_session(mdsc, session); in __open_export_target_session()
955 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target) in ceph_mdsc_open_export_target_session() argument
961 mutex_lock(&mdsc->mutex); in ceph_mdsc_open_export_target_session()
962 session = __open_export_target_session(mdsc, target); in ceph_mdsc_open_export_target_session()
963 mutex_unlock(&mdsc->mutex); in ceph_mdsc_open_export_target_session()
968 static void __open_export_target_sessions(struct ceph_mds_client *mdsc, in __open_export_target_sessions() argument
975 if (mds >= mdsc->mdsmap->m_max_mds) in __open_export_target_sessions()
978 mi = &mdsc->mdsmap->m_info[mds]; in __open_export_target_sessions()
983 ts = __open_export_target_session(mdsc, mi->export_targets[i]); in __open_export_target_sessions()
989 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc, in ceph_mdsc_open_export_target_sessions() argument
992 mutex_lock(&mdsc->mutex); in ceph_mdsc_open_export_target_sessions()
993 __open_export_target_sessions(mdsc, session); in ceph_mdsc_open_export_target_sessions()
994 mutex_unlock(&mdsc->mutex); in ceph_mdsc_open_export_target_sessions()
1024 static void cleanup_session_requests(struct ceph_mds_client *mdsc, in cleanup_session_requests() argument
1031 mutex_lock(&mdsc->mutex); in cleanup_session_requests()
1037 __unregister_request(mdsc, req); in cleanup_session_requests()
1040 p = rb_first(&mdsc->request_tree); in cleanup_session_requests()
1048 mutex_unlock(&mdsc->mutex); in cleanup_session_requests()
1129 struct ceph_mds_client *mdsc = in remove_session_caps_cb() local
1130 ceph_sb_to_client(inode->i_sb)->mdsc; in remove_session_caps_cb()
1132 spin_lock(&mdsc->cap_dirty_lock); in remove_session_caps_cb()
1147 mdsc->num_cap_flushing--; in remove_session_caps_cb()
1150 spin_unlock(&mdsc->cap_dirty_lock); in remove_session_caps_cb()
1236 static int send_renew_caps(struct ceph_mds_client *mdsc, in send_renew_caps() argument
1249 state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds); in send_renew_caps()
1266 static int send_flushmsg_ack(struct ceph_mds_client *mdsc, in send_flushmsg_ack() argument
1286 static void renewed_caps(struct ceph_mds_client *mdsc, in renewed_caps() argument
1296 mdsc->mdsmap->m_session_timeout*HZ; in renewed_caps()
1318 static int request_close_session(struct ceph_mds_client *mdsc, in request_close_session() argument
1336 static int __close_session(struct ceph_mds_client *mdsc, in __close_session() argument
1342 return request_close_session(mdsc, session); in __close_session()
1403 static int trim_caps(struct ceph_mds_client *mdsc, in trim_caps() argument
1420 ceph_add_cap_releases(mdsc, session); in trim_caps()
1421 ceph_send_cap_releases(mdsc, session); in trim_caps()
1432 int ceph_add_cap_releases(struct ceph_mds_client *mdsc, in ceph_add_cap_releases() argument
1438 int extra = mdsc->fsc->mount_options->cap_release_safety; in ceph_add_cap_releases()
1508 static void wait_caps_flush(struct ceph_mds_client *mdsc, u64 want_flush_seq) in wait_caps_flush() argument
1513 mutex_lock(&mdsc->mutex); in wait_caps_flush()
1514 for (mds = 0; mds < mdsc->max_sessions; mds++) { in wait_caps_flush()
1515 struct ceph_mds_session *session = mdsc->sessions[mds]; in wait_caps_flush()
1521 mutex_unlock(&mdsc->mutex); in wait_caps_flush()
1542 wait_event(mdsc->cap_flushing_wq, in wait_caps_flush()
1547 mutex_lock(&mdsc->mutex); in wait_caps_flush()
1550 mutex_unlock(&mdsc->mutex); in wait_caps_flush()
1557 void ceph_send_cap_releases(struct ceph_mds_client *mdsc, in ceph_send_cap_releases() argument
1577 static void discard_cap_releases(struct ceph_mds_client *mdsc, in discard_cap_releases() argument
1661 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode) in ceph_mdsc_create_request() argument
1669 req->r_mdsc = mdsc; in ceph_mdsc_create_request()
1692 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc) in __get_oldest_req() argument
1694 if (RB_EMPTY_ROOT(&mdsc->request_tree)) in __get_oldest_req()
1696 return rb_entry(rb_first(&mdsc->request_tree), in __get_oldest_req()
1700 static u64 __get_oldest_tid(struct ceph_mds_client *mdsc) in __get_oldest_tid() argument
1702 struct ceph_mds_request *req = __get_oldest_req(mdsc); in __get_oldest_tid()
1874 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc, in create_request_message() argument
1932 head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch); in create_request_message()
2004 static void complete_request(struct ceph_mds_client *mdsc, in complete_request() argument
2008 req->r_callback(mdsc, req); in complete_request()
2016 static int __prepare_send_request(struct ceph_mds_client *mdsc, in __prepare_send_request() argument
2077 msg = create_request_message(mdsc, req, mds, drop_cap_releases); in __prepare_send_request()
2080 complete_request(mdsc, req); in __prepare_send_request()
2086 rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc)); in __prepare_send_request()
2103 static int __do_request(struct ceph_mds_client *mdsc, in __do_request() argument
2112 __unregister_request(mdsc, req); in __do_request()
2125 mds = __choose_mds(mdsc, req); in __do_request()
2127 ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) { in __do_request()
2129 list_add(&req->r_wait, &mdsc->waiting_for_map); in __do_request()
2134 session = __ceph_lookup_mds_session(mdsc, mds); in __do_request()
2136 session = register_session(mdsc, mds); in __do_request()
2150 __open_session(mdsc, session); in __do_request()
2161 err = __prepare_send_request(mdsc, req, mds, false); in __do_request()
2174 complete_request(mdsc, req); in __do_request()
2181 static void __wake_requests(struct ceph_mds_client *mdsc, in __wake_requests() argument
2194 __do_request(mdsc, req); in __wake_requests()
2202 static void kick_requests(struct ceph_mds_client *mdsc, int mds) in kick_requests() argument
2205 struct rb_node *p = rb_first(&mdsc->request_tree); in kick_requests()
2219 __do_request(mdsc, req); in kick_requests()
2224 void ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, in ceph_mdsc_submit_request() argument
2228 mutex_lock(&mdsc->mutex); in ceph_mdsc_submit_request()
2229 __register_request(mdsc, req, NULL); in ceph_mdsc_submit_request()
2230 __do_request(mdsc, req); in ceph_mdsc_submit_request()
2231 mutex_unlock(&mdsc->mutex); in ceph_mdsc_submit_request()
2238 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc, in ceph_mdsc_do_request() argument
2256 mutex_lock(&mdsc->mutex); in ceph_mdsc_do_request()
2257 __register_request(mdsc, req, dir); in ceph_mdsc_do_request()
2258 __do_request(mdsc, req); in ceph_mdsc_do_request()
2262 __unregister_request(mdsc, req); in ceph_mdsc_do_request()
2268 mutex_unlock(&mdsc->mutex); in ceph_mdsc_do_request()
2276 err = req->r_wait_for_completion(mdsc, req); in ceph_mdsc_do_request()
2281 mutex_lock(&mdsc->mutex); in ceph_mdsc_do_request()
2307 mutex_unlock(&mdsc->mutex); in ceph_mdsc_do_request()
2338 struct ceph_mds_client *mdsc = session->s_mdsc; in handle_reply() local
2355 mutex_lock(&mdsc->mutex); in handle_reply()
2356 req = __lookup_request(mdsc, tid); in handle_reply()
2359 mutex_unlock(&mdsc->mutex); in handle_reply()
2369 mutex_unlock(&mdsc->mutex); in handle_reply()
2378 mutex_unlock(&mdsc->mutex); in handle_reply()
2384 mutex_unlock(&mdsc->mutex); in handle_reply()
2403 __do_request(mdsc, req); in handle_reply()
2404 mutex_unlock(&mdsc->mutex); in handle_reply()
2407 int mds = __choose_mds(mdsc, req); in handle_reply()
2410 __do_request(mdsc, req); in handle_reply()
2411 mutex_unlock(&mdsc->mutex); in handle_reply()
2421 __unregister_request(mdsc, req); in handle_reply()
2435 if (mdsc->stopping && !__get_oldest_req(mdsc)) in handle_reply()
2436 complete_all(&mdsc->safe_umount_waiters); in handle_reply()
2437 mutex_unlock(&mdsc->mutex); in handle_reply()
2448 mutex_unlock(&mdsc->mutex); in handle_reply()
2460 down_write(&mdsc->snap_rwsem); in handle_reply()
2461 ceph_update_snap_trace(mdsc, rinfo->snapblob, in handle_reply()
2465 downgrade_write(&mdsc->snap_rwsem); in handle_reply()
2467 down_read(&mdsc->snap_rwsem); in handle_reply()
2472 err = ceph_fill_trace(mdsc->fsc->sb, req, req->r_session); in handle_reply()
2477 ceph_unreserve_caps(mdsc, &req->r_caps_reservation); in handle_reply()
2481 up_read(&mdsc->snap_rwsem); in handle_reply()
2483 ceph_put_snap_realm(mdsc, realm); in handle_reply()
2485 mutex_lock(&mdsc->mutex); in handle_reply()
2497 mutex_unlock(&mdsc->mutex); in handle_reply()
2499 ceph_add_cap_releases(mdsc, req->r_session); in handle_reply()
2503 complete_request(mdsc, req); in handle_reply()
2514 static void handle_forward(struct ceph_mds_client *mdsc, in handle_forward() argument
2530 mutex_lock(&mdsc->mutex); in handle_forward()
2531 req = __lookup_request(mdsc, tid); in handle_forward()
2539 __unregister_request(mdsc, req); in handle_forward()
2552 __do_request(mdsc, req); in handle_forward()
2556 mutex_unlock(&mdsc->mutex); in handle_forward()
2569 struct ceph_mds_client *mdsc = session->s_mdsc; in handle_session() local
2582 mutex_lock(&mdsc->mutex); in handle_session()
2584 __unregister_session(mdsc, session); in handle_session()
2586 session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose; in handle_session()
2587 mutex_unlock(&mdsc->mutex); in handle_session()
2605 renewed_caps(mdsc, session, 0); in handle_session()
2607 if (mdsc->stopping) in handle_session()
2608 __close_session(mdsc, session); in handle_session()
2613 renewed_caps(mdsc, session, 1); in handle_session()
2619 cleanup_session_requests(mdsc, session); in handle_session()
2622 wake_up_all(&mdsc->session_close_wq); in handle_session()
2632 send_renew_caps(mdsc, session); in handle_session()
2636 trim_caps(mdsc, session, le32_to_cpu(h->max_caps)); in handle_session()
2640 send_flushmsg_ack(mdsc, session, seq); in handle_session()
2658 mutex_lock(&mdsc->mutex); in handle_session()
2659 __wake_requests(mdsc, &session->s_waiting); in handle_session()
2661 kick_requests(mdsc, mds); in handle_session()
2662 mutex_unlock(&mdsc->mutex); in handle_session()
2677 static void replay_unsafe_requests(struct ceph_mds_client *mdsc, in replay_unsafe_requests() argument
2686 mutex_lock(&mdsc->mutex); in replay_unsafe_requests()
2688 err = __prepare_send_request(mdsc, req, session->s_mds, true); in replay_unsafe_requests()
2699 p = rb_first(&mdsc->request_tree); in replay_unsafe_requests()
2709 err = __prepare_send_request(mdsc, req, in replay_unsafe_requests()
2717 mutex_unlock(&mdsc->mutex); in replay_unsafe_requests()
2848 static void send_mds_reconnect(struct ceph_mds_client *mdsc, in send_mds_reconnect() argument
2891 discard_cap_releases(mdsc, session); in send_mds_reconnect()
2895 if (mdsc->fsc->sb->s_root) in send_mds_reconnect()
2896 shrink_dcache_parent(mdsc->fsc->sb->s_root); in send_mds_reconnect()
2901 ceph_mdsmap_get_addr(mdsc->mdsmap, mds)); in send_mds_reconnect()
2904 replay_unsafe_requests(mdsc, session); in send_mds_reconnect()
2906 down_read(&mdsc->snap_rwsem); in send_mds_reconnect()
2930 for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) { in send_mds_reconnect()
2963 mutex_lock(&mdsc->mutex); in send_mds_reconnect()
2964 __wake_requests(mdsc, &session->s_waiting); in send_mds_reconnect()
2965 mutex_unlock(&mdsc->mutex); in send_mds_reconnect()
2967 up_read(&mdsc->snap_rwsem); in send_mds_reconnect()
2972 up_read(&mdsc->snap_rwsem); in send_mds_reconnect()
2988 static void check_new_map(struct ceph_mds_client *mdsc, in check_new_map() argument
2999 for (i = 0; i < oldmap->m_max_mds && i < mdsc->max_sessions; i++) { in check_new_map()
3000 if (mdsc->sessions[i] == NULL) in check_new_map()
3002 s = mdsc->sessions[i]; in check_new_map()
3020 __wake_requests(mdsc, &s->s_waiting); in check_new_map()
3021 __unregister_session(mdsc, s); in check_new_map()
3024 mutex_unlock(&mdsc->mutex); in check_new_map()
3026 mutex_lock(&mdsc->mutex); in check_new_map()
3040 mutex_unlock(&mdsc->mutex); in check_new_map()
3041 send_mds_reconnect(mdsc, s); in check_new_map()
3042 mutex_lock(&mdsc->mutex); in check_new_map()
3053 kick_requests(mdsc, i); in check_new_map()
3054 ceph_kick_flushing_caps(mdsc, s); in check_new_map()
3059 for (i = 0; i < newmap->m_max_mds && i < mdsc->max_sessions; i++) { in check_new_map()
3060 s = mdsc->sessions[i]; in check_new_map()
3070 __open_export_target_sessions(mdsc, s); in check_new_map()
3092 static void handle_lease(struct ceph_mds_client *mdsc, in handle_lease() argument
3096 struct super_block *sb = mdsc->fsc->sb; in handle_lease()
3237 void ceph_mdsc_lease_release(struct ceph_mds_client *mdsc, struct inode *inode, in ceph_mdsc_lease_release() argument
3277 static void drop_leases(struct ceph_mds_client *mdsc) in drop_leases() argument
3282 mutex_lock(&mdsc->mutex); in drop_leases()
3283 for (i = 0; i < mdsc->max_sessions; i++) { in drop_leases()
3284 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); in drop_leases()
3287 mutex_unlock(&mdsc->mutex); in drop_leases()
3291 mutex_lock(&mdsc->mutex); in drop_leases()
3293 mutex_unlock(&mdsc->mutex); in drop_leases()
3301 static void schedule_delayed(struct ceph_mds_client *mdsc) in schedule_delayed() argument
3305 schedule_delayed_work(&mdsc->delayed_work, hz); in schedule_delayed()
3311 struct ceph_mds_client *mdsc = in delayed_work() local
3317 ceph_check_delayed_caps(mdsc); in delayed_work()
3319 mutex_lock(&mdsc->mutex); in delayed_work()
3320 renew_interval = mdsc->mdsmap->m_session_timeout >> 2; in delayed_work()
3322 mdsc->last_renew_caps); in delayed_work()
3324 mdsc->last_renew_caps = jiffies; in delayed_work()
3326 for (i = 0; i < mdsc->max_sessions; i++) { in delayed_work()
3327 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i); in delayed_work()
3333 request_close_session(mdsc, s); in delayed_work()
3348 mutex_unlock(&mdsc->mutex); in delayed_work()
3352 send_renew_caps(mdsc, s); in delayed_work()
3355 ceph_add_cap_releases(mdsc, s); in delayed_work()
3358 ceph_send_cap_releases(mdsc, s); in delayed_work()
3362 mutex_lock(&mdsc->mutex); in delayed_work()
3364 mutex_unlock(&mdsc->mutex); in delayed_work()
3366 schedule_delayed(mdsc); in delayed_work()
3372 struct ceph_mds_client *mdsc; in ceph_mdsc_init() local
3374 mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS); in ceph_mdsc_init()
3375 if (!mdsc) in ceph_mdsc_init()
3377 mdsc->fsc = fsc; in ceph_mdsc_init()
3378 fsc->mdsc = mdsc; in ceph_mdsc_init()
3379 mutex_init(&mdsc->mutex); in ceph_mdsc_init()
3380 mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS); in ceph_mdsc_init()
3381 if (mdsc->mdsmap == NULL) { in ceph_mdsc_init()
3382 kfree(mdsc); in ceph_mdsc_init()
3386 init_completion(&mdsc->safe_umount_waiters); in ceph_mdsc_init()
3387 init_waitqueue_head(&mdsc->session_close_wq); in ceph_mdsc_init()
3388 INIT_LIST_HEAD(&mdsc->waiting_for_map); in ceph_mdsc_init()
3389 mdsc->sessions = NULL; in ceph_mdsc_init()
3390 atomic_set(&mdsc->num_sessions, 0); in ceph_mdsc_init()
3391 mdsc->max_sessions = 0; in ceph_mdsc_init()
3392 mdsc->stopping = 0; in ceph_mdsc_init()
3393 init_rwsem(&mdsc->snap_rwsem); in ceph_mdsc_init()
3394 mdsc->snap_realms = RB_ROOT; in ceph_mdsc_init()
3395 INIT_LIST_HEAD(&mdsc->snap_empty); in ceph_mdsc_init()
3396 spin_lock_init(&mdsc->snap_empty_lock); in ceph_mdsc_init()
3397 mdsc->last_tid = 0; in ceph_mdsc_init()
3398 mdsc->request_tree = RB_ROOT; in ceph_mdsc_init()
3399 INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work); in ceph_mdsc_init()
3400 mdsc->last_renew_caps = jiffies; in ceph_mdsc_init()
3401 INIT_LIST_HEAD(&mdsc->cap_delay_list); in ceph_mdsc_init()
3402 spin_lock_init(&mdsc->cap_delay_lock); in ceph_mdsc_init()
3403 INIT_LIST_HEAD(&mdsc->snap_flush_list); in ceph_mdsc_init()
3404 spin_lock_init(&mdsc->snap_flush_lock); in ceph_mdsc_init()
3405 mdsc->cap_flush_seq = 0; in ceph_mdsc_init()
3406 INIT_LIST_HEAD(&mdsc->cap_dirty); in ceph_mdsc_init()
3407 INIT_LIST_HEAD(&mdsc->cap_dirty_migrating); in ceph_mdsc_init()
3408 mdsc->num_cap_flushing = 0; in ceph_mdsc_init()
3409 spin_lock_init(&mdsc->cap_dirty_lock); in ceph_mdsc_init()
3410 init_waitqueue_head(&mdsc->cap_flushing_wq); in ceph_mdsc_init()
3411 spin_lock_init(&mdsc->dentry_lru_lock); in ceph_mdsc_init()
3412 INIT_LIST_HEAD(&mdsc->dentry_lru); in ceph_mdsc_init()
3414 ceph_caps_init(mdsc); in ceph_mdsc_init()
3415 ceph_adjust_min_caps(mdsc, fsc->min_caps); in ceph_mdsc_init()
3424 static void wait_requests(struct ceph_mds_client *mdsc) in wait_requests() argument
3427 struct ceph_fs_client *fsc = mdsc->fsc; in wait_requests()
3429 mutex_lock(&mdsc->mutex); in wait_requests()
3430 if (__get_oldest_req(mdsc)) { in wait_requests()
3431 mutex_unlock(&mdsc->mutex); in wait_requests()
3434 wait_for_completion_timeout(&mdsc->safe_umount_waiters, in wait_requests()
3438 mutex_lock(&mdsc->mutex); in wait_requests()
3439 while ((req = __get_oldest_req(mdsc))) { in wait_requests()
3442 __unregister_request(mdsc, req); in wait_requests()
3445 mutex_unlock(&mdsc->mutex); in wait_requests()
3453 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc) in ceph_mdsc_pre_umount() argument
3456 mdsc->stopping = 1; in ceph_mdsc_pre_umount()
3458 drop_leases(mdsc); in ceph_mdsc_pre_umount()
3459 ceph_flush_dirty_caps(mdsc); in ceph_mdsc_pre_umount()
3460 wait_requests(mdsc); in ceph_mdsc_pre_umount()
3472 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid) in wait_unsafe_requests() argument
3477 mutex_lock(&mdsc->mutex); in wait_unsafe_requests()
3480 req = __get_oldest_req(mdsc); in wait_unsafe_requests()
3493 mutex_unlock(&mdsc->mutex); in wait_unsafe_requests()
3497 mutex_lock(&mdsc->mutex); in wait_unsafe_requests()
3510 mutex_unlock(&mdsc->mutex); in wait_unsafe_requests()
3514 void ceph_mdsc_sync(struct ceph_mds_client *mdsc) in ceph_mdsc_sync() argument
3518 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) in ceph_mdsc_sync()
3522 mutex_lock(&mdsc->mutex); in ceph_mdsc_sync()
3523 want_tid = mdsc->last_tid; in ceph_mdsc_sync()
3524 mutex_unlock(&mdsc->mutex); in ceph_mdsc_sync()
3526 ceph_flush_dirty_caps(mdsc); in ceph_mdsc_sync()
3527 spin_lock(&mdsc->cap_dirty_lock); in ceph_mdsc_sync()
3528 want_flush = mdsc->cap_flush_seq; in ceph_mdsc_sync()
3529 spin_unlock(&mdsc->cap_dirty_lock); in ceph_mdsc_sync()
3533 wait_unsafe_requests(mdsc, want_tid); in ceph_mdsc_sync()
3534 wait_caps_flush(mdsc, want_flush); in ceph_mdsc_sync()
3540 static bool done_closing_sessions(struct ceph_mds_client *mdsc) in done_closing_sessions() argument
3542 if (mdsc->fsc->mount_state == CEPH_MOUNT_SHUTDOWN) in done_closing_sessions()
3544 return atomic_read(&mdsc->num_sessions) == 0; in done_closing_sessions()
3550 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc) in ceph_mdsc_close_sessions() argument
3554 struct ceph_fs_client *fsc = mdsc->fsc; in ceph_mdsc_close_sessions()
3560 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
3561 for (i = 0; i < mdsc->max_sessions; i++) { in ceph_mdsc_close_sessions()
3562 session = __ceph_lookup_mds_session(mdsc, i); in ceph_mdsc_close_sessions()
3565 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
3567 __close_session(mdsc, session); in ceph_mdsc_close_sessions()
3570 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
3572 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
3575 wait_event_timeout(mdsc->session_close_wq, done_closing_sessions(mdsc), in ceph_mdsc_close_sessions()
3579 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
3580 for (i = 0; i < mdsc->max_sessions; i++) { in ceph_mdsc_close_sessions()
3581 if (mdsc->sessions[i]) { in ceph_mdsc_close_sessions()
3582 session = get_session(mdsc->sessions[i]); in ceph_mdsc_close_sessions()
3583 __unregister_session(mdsc, session); in ceph_mdsc_close_sessions()
3584 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
3589 mutex_lock(&mdsc->mutex); in ceph_mdsc_close_sessions()
3592 WARN_ON(!list_empty(&mdsc->cap_delay_list)); in ceph_mdsc_close_sessions()
3593 mutex_unlock(&mdsc->mutex); in ceph_mdsc_close_sessions()
3595 ceph_cleanup_empty_realms(mdsc); in ceph_mdsc_close_sessions()
3597 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ in ceph_mdsc_close_sessions()
3602 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc) in ceph_mdsc_stop() argument
3605 cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */ in ceph_mdsc_stop()
3606 if (mdsc->mdsmap) in ceph_mdsc_stop()
3607 ceph_mdsmap_destroy(mdsc->mdsmap); in ceph_mdsc_stop()
3608 kfree(mdsc->sessions); in ceph_mdsc_stop()
3609 ceph_caps_finalize(mdsc); in ceph_mdsc_stop()
3614 struct ceph_mds_client *mdsc = fsc->mdsc; in ceph_mdsc_destroy() local
3616 dout("mdsc_destroy %p\n", mdsc); in ceph_mdsc_destroy()
3617 ceph_mdsc_stop(mdsc); in ceph_mdsc_destroy()
3622 fsc->mdsc = NULL; in ceph_mdsc_destroy()
3623 kfree(mdsc); in ceph_mdsc_destroy()
3624 dout("mdsc_destroy %p done\n", mdsc); in ceph_mdsc_destroy()
3631 void ceph_mdsc_handle_map(struct ceph_mds_client *mdsc, struct ceph_msg *msg) in ceph_mdsc_handle_map() argument
3643 if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0) in ceph_mdsc_handle_map()
3650 ceph_monc_got_mdsmap(&mdsc->fsc->client->monc, epoch); in ceph_mdsc_handle_map()
3651 mutex_lock(&mdsc->mutex); in ceph_mdsc_handle_map()
3652 if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) { in ceph_mdsc_handle_map()
3654 epoch, mdsc->mdsmap->m_epoch); in ceph_mdsc_handle_map()
3655 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_map()
3666 if (mdsc->mdsmap) { in ceph_mdsc_handle_map()
3667 oldmap = mdsc->mdsmap; in ceph_mdsc_handle_map()
3668 mdsc->mdsmap = newmap; in ceph_mdsc_handle_map()
3669 check_new_map(mdsc, newmap, oldmap); in ceph_mdsc_handle_map()
3672 mdsc->mdsmap = newmap; /* first mds map */ in ceph_mdsc_handle_map()
3674 mdsc->fsc->sb->s_maxbytes = mdsc->mdsmap->m_max_file_size; in ceph_mdsc_handle_map()
3676 __wake_requests(mdsc, &mdsc->waiting_for_map); in ceph_mdsc_handle_map()
3678 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_map()
3679 schedule_delayed(mdsc); in ceph_mdsc_handle_map()
3683 mutex_unlock(&mdsc->mutex); in ceph_mdsc_handle_map()
3716 struct ceph_mds_client *mdsc = s->s_mdsc; in peer_reset() local
3719 send_mds_reconnect(mdsc, s); in peer_reset()
3725 struct ceph_mds_client *mdsc = s->s_mdsc; in dispatch() local
3728 mutex_lock(&mdsc->mutex); in dispatch()
3729 if (__verify_registered_session(mdsc, s) < 0) { in dispatch()
3730 mutex_unlock(&mdsc->mutex); in dispatch()
3733 mutex_unlock(&mdsc->mutex); in dispatch()
3737 ceph_mdsc_handle_map(mdsc, msg); in dispatch()
3746 handle_forward(mdsc, s, msg); in dispatch()
3752 ceph_handle_snap(mdsc, s, msg); in dispatch()
3755 handle_lease(mdsc, s, msg); in dispatch()
3778 struct ceph_mds_client *mdsc = s->s_mdsc; in get_authorizer() local
3779 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in get_authorizer()
3806 struct ceph_mds_client *mdsc = s->s_mdsc; in verify_authorizer_reply() local
3807 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in verify_authorizer_reply()
3815 struct ceph_mds_client *mdsc = s->s_mdsc; in invalidate_authorizer() local
3816 struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth; in invalidate_authorizer()
3820 return ceph_monc_validate_auth(&mdsc->fsc->client->monc); in invalidate_authorizer()