Lines Matching refs:req

37 static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req)  in _drbd_start_io_acct()  argument
39 generic_start_io_acct(bio_data_dir(req->master_bio), req->i.size >> 9, in _drbd_start_io_acct()
44 static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) in _drbd_end_io_acct() argument
46 generic_end_io_acct(bio_data_dir(req->master_bio), in _drbd_end_io_acct()
47 &device->vdisk->part0, req->start_jif); in _drbd_end_io_acct()
53 struct drbd_request *req; in drbd_req_new() local
55 req = mempool_alloc(drbd_request_mempool, GFP_NOIO); in drbd_req_new()
56 if (!req) in drbd_req_new()
58 memset(req, 0, sizeof(*req)); in drbd_req_new()
60 drbd_req_make_private_bio(req, bio_src); in drbd_req_new()
61 req->rq_state = bio_data_dir(bio_src) == WRITE ? RQ_WRITE : 0; in drbd_req_new()
62 req->device = device; in drbd_req_new()
63 req->master_bio = bio_src; in drbd_req_new()
64 req->epoch = 0; in drbd_req_new()
66 drbd_clear_interval(&req->i); in drbd_req_new()
67 req->i.sector = bio_src->bi_iter.bi_sector; in drbd_req_new()
68 req->i.size = bio_src->bi_iter.bi_size; in drbd_req_new()
69 req->i.local = true; in drbd_req_new()
70 req->i.waiting = false; in drbd_req_new()
72 INIT_LIST_HEAD(&req->tl_requests); in drbd_req_new()
73 INIT_LIST_HEAD(&req->w.list); in drbd_req_new()
74 INIT_LIST_HEAD(&req->req_pending_master_completion); in drbd_req_new()
75 INIT_LIST_HEAD(&req->req_pending_local); in drbd_req_new()
78 atomic_set(&req->completion_ref, 1); in drbd_req_new()
80 kref_init(&req->kref); in drbd_req_new()
81 return req; in drbd_req_new()
85 struct drbd_request *req) in drbd_remove_request_interval() argument
87 struct drbd_device *device = req->device; in drbd_remove_request_interval()
88 struct drbd_interval *i = &req->i; in drbd_remove_request_interval()
99 struct drbd_request *req = container_of(kref, struct drbd_request, kref); in drbd_req_destroy() local
100 struct drbd_device *device = req->device; in drbd_req_destroy()
101 const unsigned s = req->rq_state; in drbd_req_destroy()
103 if ((req->master_bio && !(s & RQ_POSTPONED)) || in drbd_req_destroy()
104 atomic_read(&req->completion_ref) || in drbd_req_destroy()
108 s, atomic_read(&req->completion_ref)); in drbd_req_destroy()
120 list_del_init(&req->tl_requests); in drbd_req_destroy()
124 if (!drbd_interval_empty(&req->i)) { in drbd_req_destroy()
131 drbd_remove_request_interval(root, req); in drbd_req_destroy()
132 } else if (s & (RQ_NET_MASK & ~RQ_NET_DONE) && req->i.size != 0) in drbd_req_destroy()
134 s, (unsigned long long)req->i.sector, req->i.size); in drbd_req_destroy()
153 drbd_set_out_of_sync(device, req->i.sector, req->i.size); in drbd_req_destroy()
156 drbd_set_in_sync(device, req->i.sector, req->i.size); in drbd_req_destroy()
171 drbd_al_complete_io(device, &req->i); in drbd_req_destroy()
176 (unsigned long long) req->i.sector, req->i.size); in drbd_req_destroy()
181 mempool_free(req, drbd_request_mempool); in drbd_req_destroy()
216 void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) in drbd_req_complete() argument
218 const unsigned s = req->rq_state; in drbd_req_complete()
219 struct drbd_device *device = req->device; in drbd_req_complete()
239 if (!req->master_bio) { in drbd_req_complete()
244 rw = bio_rw(req->master_bio); in drbd_req_complete()
260 error = PTR_ERR(req->private_bio); in drbd_req_complete()
270 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) in drbd_req_complete()
274 _drbd_end_io_acct(device, req); in drbd_req_complete()
290 if (!ok && rw == READ && !list_empty(&req->tl_requests)) in drbd_req_complete()
291 req->rq_state |= RQ_POSTPONED; in drbd_req_complete()
293 if (!(req->rq_state & RQ_POSTPONED)) { in drbd_req_complete()
295 m->bio = req->master_bio; in drbd_req_complete()
296 req->master_bio = NULL; in drbd_req_complete()
301 req->i.completed = true; in drbd_req_complete()
304 if (req->i.waiting) in drbd_req_complete()
311 list_del_init(&req->req_pending_master_completion); in drbd_req_complete()
315 static int drbd_req_put_completion_ref(struct drbd_request *req, struct bio_and_error *m, int put) in drbd_req_put_completion_ref() argument
317 struct drbd_device *device = req->device; in drbd_req_put_completion_ref()
318 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); in drbd_req_put_completion_ref()
320 if (!atomic_sub_and_test(put, &req->completion_ref)) in drbd_req_put_completion_ref()
323 drbd_req_complete(req, m); in drbd_req_put_completion_ref()
325 if (req->rq_state & RQ_POSTPONED) { in drbd_req_put_completion_ref()
328 drbd_restart_request(req); in drbd_req_put_completion_ref()
335 static void set_if_null_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) in set_if_null_req_next() argument
341 connection->req_next = req; in set_if_null_req_next()
344 static void advance_conn_req_next(struct drbd_peer_device *peer_device, struct drbd_request *req) in advance_conn_req_next() argument
349 if (connection->req_next != req) in advance_conn_req_next()
351 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { in advance_conn_req_next()
352 const unsigned s = req->rq_state; in advance_conn_req_next()
356 if (&req->tl_requests == &connection->transfer_log) in advance_conn_req_next()
357 req = NULL; in advance_conn_req_next()
358 connection->req_next = req; in advance_conn_req_next()
361 …ic void set_if_null_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) in set_if_null_req_ack_pending() argument
367 connection->req_ack_pending = req; in set_if_null_req_ack_pending()
370 …c void advance_conn_req_ack_pending(struct drbd_peer_device *peer_device, struct drbd_request *req) in advance_conn_req_ack_pending() argument
375 if (connection->req_ack_pending != req) in advance_conn_req_ack_pending()
377 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { in advance_conn_req_ack_pending()
378 const unsigned s = req->rq_state; in advance_conn_req_ack_pending()
382 if (&req->tl_requests == &connection->transfer_log) in advance_conn_req_ack_pending()
383 req = NULL; in advance_conn_req_ack_pending()
384 connection->req_ack_pending = req; in advance_conn_req_ack_pending()
387 …c void set_if_null_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) in set_if_null_req_not_net_done() argument
393 connection->req_not_net_done = req; in set_if_null_req_not_net_done()
396 … void advance_conn_req_not_net_done(struct drbd_peer_device *peer_device, struct drbd_request *req) in advance_conn_req_not_net_done() argument
401 if (connection->req_not_net_done != req) in advance_conn_req_not_net_done()
403 list_for_each_entry_continue(req, &connection->transfer_log, tl_requests) { in advance_conn_req_not_net_done()
404 const unsigned s = req->rq_state; in advance_conn_req_not_net_done()
408 if (&req->tl_requests == &connection->transfer_log) in advance_conn_req_not_net_done()
409 req = NULL; in advance_conn_req_not_net_done()
410 connection->req_not_net_done = req; in advance_conn_req_not_net_done()
415 static void mod_rq_state(struct drbd_request *req, struct bio_and_error *m, in mod_rq_state() argument
418 struct drbd_device *device = req->device; in mod_rq_state()
420 unsigned s = req->rq_state; in mod_rq_state()
429 req->rq_state &= ~clear; in mod_rq_state()
430 req->rq_state |= set; in mod_rq_state()
433 if (req->rq_state == s) in mod_rq_state()
439 atomic_inc(&req->completion_ref); in mod_rq_state()
443 atomic_inc(&req->completion_ref); in mod_rq_state()
447 atomic_inc(&req->completion_ref); in mod_rq_state()
448 set_if_null_req_next(peer_device, req); in mod_rq_state()
452 kref_get(&req->kref); /* wait for the DONE */ in mod_rq_state()
457 atomic_add(req->i.size >> 9, &device->ap_in_flight); in mod_rq_state()
458 set_if_null_req_not_net_done(peer_device, req); in mod_rq_state()
461 set_if_null_req_ack_pending(peer_device, req); in mod_rq_state()
465 atomic_inc(&req->completion_ref); in mod_rq_state()
473 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); in mod_rq_state()
476 kref_get(&req->kref); in mod_rq_state()
481 if (req->rq_state & RQ_LOCAL_ABORTED) in mod_rq_state()
485 list_del_init(&req->req_pending_local); in mod_rq_state()
491 req->acked_jif = jiffies; in mod_rq_state()
492 advance_conn_req_ack_pending(peer_device, req); in mod_rq_state()
497 advance_conn_req_next(peer_device, req); in mod_rq_state()
502 atomic_sub(req->i.size >> 9, &device->ap_in_flight); in mod_rq_state()
505 req->net_done_jif = jiffies; in mod_rq_state()
510 advance_conn_req_next(peer_device, req); in mod_rq_state()
511 advance_conn_req_ack_pending(peer_device, req); in mod_rq_state()
512 advance_conn_req_not_net_done(peer_device, req); in mod_rq_state()
521 int refcount = atomic_read(&req->kref.refcount); in mod_rq_state()
525 s, req->rq_state, refcount, at_least); in mod_rq_state()
529 if (req->i.waiting) in mod_rq_state()
533 k_put += drbd_req_put_completion_ref(req, m, c_put); in mod_rq_state()
535 kref_sub(&req->kref, k_put, drbd_req_destroy); in mod_rq_state()
538 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) in drbd_report_io_error() argument
546 (req->rq_state & RQ_WRITE) ? "WRITE" : "READ", in drbd_report_io_error()
547 (unsigned long long)req->i.sector, in drbd_report_io_error()
548 req->i.size >> 9, in drbd_report_io_error()
558 static inline bool is_pending_write_protocol_A(struct drbd_request *req) in is_pending_write_protocol_A() argument
560 return (req->rq_state & in is_pending_write_protocol_A()
577 int __req_mod(struct drbd_request *req, enum drbd_req_event what, in __req_mod() argument
580 struct drbd_device *const device = req->device; in __req_mod()
603 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); in __req_mod()
608 req->rq_state |= in __req_mod()
611 mod_rq_state(req, m, 0, RQ_NET_PENDING); in __req_mod()
616 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); in __req_mod()
617 mod_rq_state(req, m, 0, RQ_LOCAL_PENDING); in __req_mod()
621 if (req->rq_state & RQ_WRITE) in __req_mod()
622 device->writ_cnt += req->i.size >> 9; in __req_mod()
624 device->read_cnt += req->i.size >> 9; in __req_mod()
626 mod_rq_state(req, m, RQ_LOCAL_PENDING, in __req_mod()
631 mod_rq_state(req, m, 0, RQ_LOCAL_ABORTED); in __req_mod()
635 drbd_report_io_error(device, req); in __req_mod()
637 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
641 drbd_set_out_of_sync(device, req->i.sector, req->i.size); in __req_mod()
642 drbd_report_io_error(device, req); in __req_mod()
647 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
654 mod_rq_state(req, m, RQ_LOCAL_PENDING, RQ_LOCAL_COMPLETED); in __req_mod()
668 D_ASSERT(device, drbd_interval_empty(&req->i)); in __req_mod()
669 drbd_insert_interval(&device->read_requests, &req->i); in __req_mod()
673 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
674 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); in __req_mod()
675 mod_rq_state(req, m, 0, RQ_NET_QUEUED); in __req_mod()
676 req->w.cb = w_send_read_req; in __req_mod()
678 &req->w); in __req_mod()
687 D_ASSERT(device, drbd_interval_empty(&req->i)); in __req_mod()
688 drbd_insert_interval(&device->write_requests, &req->i); in __req_mod()
710 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
711 mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); in __req_mod()
712 req->w.cb = w_send_dblock; in __req_mod()
714 &req->w); in __req_mod()
727 mod_rq_state(req, m, 0, RQ_NET_QUEUED); in __req_mod()
728 req->w.cb = w_send_out_of_sync; in __req_mod()
730 &req->w); in __req_mod()
738 mod_rq_state(req, m, RQ_NET_QUEUED, 0); in __req_mod()
743 if (is_pending_write_protocol_A(req)) in __req_mod()
746 mod_rq_state(req, m, RQ_NET_QUEUED|RQ_NET_PENDING, in __req_mod()
749 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_SENT); in __req_mod()
758 mod_rq_state(req, m, RQ_NET_QUEUED, RQ_NET_DONE); in __req_mod()
763 mod_rq_state(req, m, in __req_mod()
776 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
777 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
778 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_DONE|RQ_NET_OK); in __req_mod()
782 req->rq_state |= RQ_NET_SIS; in __req_mod()
793 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); in __req_mod()
798 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK); in __req_mod()
802 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
807 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
808 req->rq_state |= RQ_POSTPONED; in __req_mod()
809 if (req->i.waiting) in __req_mod()
817 mod_rq_state(req, m, RQ_NET_OK|RQ_NET_PENDING, 0); in __req_mod()
821 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) in __req_mod()
823 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); in __req_mod()
827 if (!(req->rq_state & RQ_LOCAL_COMPLETED)) in __req_mod()
830 mod_rq_state(req, m, in __req_mod()
835 if (bio_data_dir(req->master_bio) == WRITE) in __req_mod()
839 req->w.cb = w_restart_disk_io; in __req_mod()
841 &req->w); in __req_mod()
846 if (!(req->rq_state & RQ_WRITE) && !req->w.cb) { in __req_mod()
847 mod_rq_state(req, m, RQ_COMPLETION_SUSP, 0); in __req_mod()
856 if (!(req->rq_state & RQ_NET_OK)) { in __req_mod()
860 mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); in __req_mod()
861 if (req->w.cb) { in __req_mod()
864 &req->w); in __req_mod()
865 rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; in __req_mod()
873 if (!(req->rq_state & RQ_WRITE)) in __req_mod()
876 if (req->rq_state & RQ_NET_PENDING) { in __req_mod()
886 mod_rq_state(req, m, RQ_COMPLETION_SUSP, in __req_mod()
887 (req->rq_state & RQ_NET_MASK) ? RQ_NET_DONE : 0); in __req_mod()
891 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
892 mod_rq_state(req, m, RQ_NET_PENDING, RQ_NET_OK|RQ_NET_DONE); in __req_mod()
897 mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); in __req_mod()
971 static void complete_conflicting_writes(struct drbd_request *req) in complete_conflicting_writes() argument
974 struct drbd_device *device = req->device; in complete_conflicting_writes()
976 sector_t sector = req->i.sector; in complete_conflicting_writes()
977 int size = req->i.size; in complete_conflicting_writes()
1055 static bool do_remote_read(struct drbd_request *req) in do_remote_read() argument
1057 struct drbd_device *device = req->device; in do_remote_read()
1060 if (req->private_bio) { in do_remote_read()
1062 req->i.sector, req->i.size)) { in do_remote_read()
1063 bio_put(req->private_bio); in do_remote_read()
1064 req->private_bio = NULL; in do_remote_read()
1072 if (req->private_bio == NULL) in do_remote_read()
1082 if (rbm == RB_PREFER_LOCAL && req->private_bio) in do_remote_read()
1085 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { in do_remote_read()
1086 if (req->private_bio) { in do_remote_read()
1087 bio_put(req->private_bio); in do_remote_read()
1088 req->private_bio = NULL; in do_remote_read()
1100 static int drbd_process_write_request(struct drbd_request *req) in drbd_process_write_request() argument
1102 struct drbd_device *device = req->device; in drbd_process_write_request()
1114 if (unlikely(req->i.size == 0)) { in drbd_process_write_request()
1116 D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH); in drbd_process_write_request()
1118 _req_mod(req, QUEUE_AS_DRBD_BARRIER); in drbd_process_write_request()
1128 _req_mod(req, TO_BE_SENT); in drbd_process_write_request()
1129 _req_mod(req, QUEUE_FOR_NET_WRITE); in drbd_process_write_request()
1130 } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) in drbd_process_write_request()
1131 _req_mod(req, QUEUE_FOR_SEND_OOS); in drbd_process_write_request()
1137 drbd_submit_req_private_bio(struct drbd_request *req) in drbd_submit_req_private_bio() argument
1139 struct drbd_device *device = req->device; in drbd_submit_req_private_bio()
1140 struct bio *bio = req->private_bio; in drbd_submit_req_private_bio()
1151 req->pre_submit_jif = jiffies; in drbd_submit_req_private_bio()
1164 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) in drbd_queue_write() argument
1167 list_add_tail(&req->tl_requests, &device->submit.writes); in drbd_queue_write()
1168 list_add_tail(&req->req_pending_master_completion, in drbd_queue_write()
1185 struct drbd_request *req; in drbd_request_prepare() local
1188 req = drbd_req_new(device, bio); in drbd_request_prepare()
1189 if (!req) { in drbd_request_prepare()
1197 req->start_jif = start_jif; in drbd_request_prepare()
1200 bio_put(req->private_bio); in drbd_request_prepare()
1201 req->private_bio = NULL; in drbd_request_prepare()
1205 _drbd_start_io_acct(device, req); in drbd_request_prepare()
1207 if (rw == WRITE && req->private_bio && req->i.size in drbd_request_prepare()
1209 if (!drbd_al_begin_io_fastpath(device, &req->i)) { in drbd_request_prepare()
1211 drbd_queue_write(device, req); in drbd_request_prepare()
1214 req->rq_state |= RQ_IN_ACT_LOG; in drbd_request_prepare()
1215 req->in_actlog_jif = jiffies; in drbd_request_prepare()
1218 return req; in drbd_request_prepare()
1221 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) in drbd_send_and_submit() argument
1224 const int rw = bio_rw(req->master_bio); in drbd_send_and_submit()
1234 complete_conflicting_writes(req); in drbd_send_and_submit()
1245 req->rq_state |= RQ_POSTPONED; in drbd_send_and_submit()
1246 if (req->private_bio) { in drbd_send_and_submit()
1247 bio_put(req->private_bio); in drbd_send_and_submit()
1248 req->private_bio = NULL; in drbd_send_and_submit()
1258 if (!do_remote_read(req) && !req->private_bio) in drbd_send_and_submit()
1263 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr); in drbd_send_and_submit()
1267 if (likely(req->i.size!=0)) { in drbd_send_and_submit()
1271 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log); in drbd_send_and_submit()
1275 if (!drbd_process_write_request(req)) in drbd_send_and_submit()
1280 if (req->private_bio == NULL) { in drbd_send_and_submit()
1281 _req_mod(req, TO_BE_SENT); in drbd_send_and_submit()
1282 _req_mod(req, QUEUE_FOR_NET_READ); in drbd_send_and_submit()
1289 if (list_empty(&req->req_pending_master_completion)) in drbd_send_and_submit()
1290 list_add_tail(&req->req_pending_master_completion, in drbd_send_and_submit()
1292 if (req->private_bio) { in drbd_send_and_submit()
1294 list_add_tail(&req->req_pending_local, in drbd_send_and_submit()
1296 _req_mod(req, TO_BE_SUBMITTED); in drbd_send_and_submit()
1303 (unsigned long long)req->i.sector, req->i.size >> 9); in drbd_send_and_submit()
1309 if (drbd_req_put_completion_ref(req, &m, 1)) in drbd_send_and_submit()
1310 kref_put(&req->kref, drbd_req_destroy); in drbd_send_and_submit()
1320 drbd_submit_req_private_bio(req); in drbd_send_and_submit()
1327 struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); in __drbd_make_request() local
1328 if (IS_ERR_OR_NULL(req)) in __drbd_make_request()
1330 drbd_send_and_submit(device, req); in __drbd_make_request()
1335 struct drbd_request *req, *tmp; in submit_fast_path() local
1336 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { in submit_fast_path()
1337 const int rw = bio_data_dir(req->master_bio); in submit_fast_path()
1340 && req->private_bio && req->i.size in submit_fast_path()
1342 if (!drbd_al_begin_io_fastpath(device, &req->i)) in submit_fast_path()
1345 req->rq_state |= RQ_IN_ACT_LOG; in submit_fast_path()
1346 req->in_actlog_jif = jiffies; in submit_fast_path()
1350 list_del_init(&req->tl_requests); in submit_fast_path()
1351 drbd_send_and_submit(device, req); in submit_fast_path()
1360 struct drbd_request *req, *tmp; in prepare_al_transaction_nonblock() local
1365 list_for_each_entry_safe(req, tmp, incoming, tl_requests) { in prepare_al_transaction_nonblock()
1366 err = drbd_al_begin_io_nonblock(device, &req->i); in prepare_al_transaction_nonblock()
1372 list_move_tail(&req->tl_requests, later); in prepare_al_transaction_nonblock()
1374 list_move_tail(&req->tl_requests, pending); in prepare_al_transaction_nonblock()
1384 struct drbd_request *req, *tmp; in send_and_submit_pending() local
1386 list_for_each_entry_safe(req, tmp, pending, tl_requests) { in send_and_submit_pending()
1387 req->rq_state |= RQ_IN_ACT_LOG; in send_and_submit_pending()
1388 req->in_actlog_jif = jiffies; in send_and_submit_pending()
1390 list_del_init(&req->tl_requests); in send_and_submit_pending()
1391 drbd_send_and_submit(device, req); in send_and_submit_pending()