Lines Matching refs:device

34 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size);
37 static void _drbd_start_io_acct(struct drbd_device *device, struct drbd_request *req) in _drbd_start_io_acct() argument
40 &device->vdisk->part0); in _drbd_start_io_acct()
44 static void _drbd_end_io_acct(struct drbd_device *device, struct drbd_request *req) in _drbd_end_io_acct() argument
47 &device->vdisk->part0, req->start_jif); in _drbd_end_io_acct()
50 static struct drbd_request *drbd_req_new(struct drbd_device *device, in drbd_req_new() argument
62 req->device = device; in drbd_req_new()
87 struct drbd_device *device = req->device; in drbd_remove_request_interval() local
94 wake_up(&device->misc_wait); in drbd_remove_request_interval()
100 struct drbd_device *device = req->device; in drbd_req_destroy() local
107 drbd_err(device, "drbd_req_destroy: Logic BUG rq_state = 0x%x, completion_ref = %d\n", in drbd_req_destroy()
128 root = &device->write_requests; in drbd_req_destroy()
130 root = &device->read_requests; in drbd_req_destroy()
133 …drbd_err(device, "drbd_req_destroy: Logic BUG: interval empty, but: rq_state=0x%x, sect=%llu, size… in drbd_req_destroy()
153 drbd_set_out_of_sync(device, req->i.sector, req->i.size); in drbd_req_destroy()
156 drbd_set_in_sync(device, req->i.sector, req->i.size); in drbd_req_destroy()
170 if (get_ldev_if_state(device, D_FAILED)) { in drbd_req_destroy()
171 drbd_al_complete_io(device, &req->i); in drbd_req_destroy()
172 put_ldev(device); in drbd_req_destroy()
174 drbd_warn(device, "Should have called drbd_al_complete_io(, %llu, %u), " in drbd_req_destroy()
201 void complete_master_bio(struct drbd_device *device, in complete_master_bio() argument
205 dec_ap_bio(device); in complete_master_bio()
219 struct drbd_device *device = req->device; in drbd_req_complete() local
235 drbd_err(device, "drbd_req_complete: Logic BUG rq_state = 0x%x\n", s); in drbd_req_complete()
240 drbd_err(device, "drbd_req_complete: Logic BUG, master_bio == NULL!\n"); in drbd_req_complete()
270 req->epoch == atomic_read(&first_peer_device(device)->connection->current_tle_nr)) in drbd_req_complete()
271 start_new_tl_epoch(first_peer_device(device)->connection); in drbd_req_complete()
274 _drbd_end_io_acct(device, req); in drbd_req_complete()
305 wake_up(&device->misc_wait); in drbd_req_complete()
317 struct drbd_device *device = req->device; in drbd_req_put_completion_ref() local
318 D_ASSERT(device, m || (req->rq_state & RQ_POSTPONED)); in drbd_req_put_completion_ref()
418 struct drbd_device *device = req->device; in mod_rq_state() local
419 struct drbd_peer_device *peer_device = first_peer_device(device); in mod_rq_state()
424 if (drbd_suspended(device) && !((s | clear) & RQ_COMPLETION_SUSP)) in mod_rq_state()
442 inc_ap_pending(device); in mod_rq_state()
457 atomic_add(req->i.size >> 9, &device->ap_in_flight); in mod_rq_state()
473 D_ASSERT(device, req->rq_state & RQ_LOCAL_PENDING); in mod_rq_state()
489 dec_ap_pending(device); in mod_rq_state()
502 atomic_sub(req->i.size >> 9, &device->ap_in_flight); in mod_rq_state()
523 drbd_err(device, in mod_rq_state()
530 wake_up(&device->misc_wait); in mod_rq_state()
538 static void drbd_report_io_error(struct drbd_device *device, struct drbd_request *req) in drbd_report_io_error() argument
545 drbd_warn(device, "local %s IO error sector %llu+%u on %s\n", in drbd_report_io_error()
549 bdevname(device->ldev->backing_bdev, b)); in drbd_report_io_error()
580 struct drbd_device *const device = req->device; in __req_mod() local
581 struct drbd_peer_device *const peer_device = first_peer_device(device); in __req_mod()
591 drbd_err(device, "LOGIC BUG in %s:%u\n", __FILE__ , __LINE__); in __req_mod()
603 D_ASSERT(device, !(req->rq_state & RQ_NET_MASK)); in __req_mod()
616 D_ASSERT(device, !(req->rq_state & RQ_LOCAL_MASK)); in __req_mod()
622 device->writ_cnt += req->i.size >> 9; in __req_mod()
624 device->read_cnt += req->i.size >> 9; in __req_mod()
635 drbd_report_io_error(device, req); in __req_mod()
636 __drbd_chk_io_error(device, DRBD_WRITE_ERROR); in __req_mod()
641 drbd_set_out_of_sync(device, req->i.sector, req->i.size); in __req_mod()
642 drbd_report_io_error(device, req); in __req_mod()
643 __drbd_chk_io_error(device, DRBD_READ_ERROR); in __req_mod()
668 D_ASSERT(device, drbd_interval_empty(&req->i)); in __req_mod()
669 drbd_insert_interval(&device->read_requests, &req->i); in __req_mod()
671 set_bit(UNPLUG_REMOTE, &device->flags); in __req_mod()
673 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
674 D_ASSERT(device, (req->rq_state & RQ_LOCAL_MASK) == 0); in __req_mod()
687 D_ASSERT(device, drbd_interval_empty(&req->i)); in __req_mod()
688 drbd_insert_interval(&device->write_requests, &req->i); in __req_mod()
707 set_bit(UNPLUG_REMOTE, &device->flags); in __req_mod()
710 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
776 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
777 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
793 D_ASSERT(device, req->rq_state & RQ_EXP_RECEIVE_ACK); in __req_mod()
802 D_ASSERT(device, req->rq_state & RQ_EXP_WRITE_ACK); in __req_mod()
807 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
810 wake_up(&device->misc_wait); in __req_mod()
838 get_ldev(device); /* always succeeds in this call path */ in __req_mod()
880 drbd_err(device, "FIXME (BARRIER_ACKED but pending)\n"); in __req_mod()
891 D_ASSERT(device, req->rq_state & RQ_NET_PENDING); in __req_mod()
911 static bool drbd_may_do_local_read(struct drbd_device *device, sector_t sector, int size) in drbd_may_do_local_read() argument
916 if (device->state.disk == D_UP_TO_DATE) in drbd_may_do_local_read()
918 if (device->state.disk != D_INCONSISTENT) in drbd_may_do_local_read()
921 nr_sectors = drbd_get_capacity(device->this_bdev); in drbd_may_do_local_read()
922 D_ASSERT(device, sector < nr_sectors); in drbd_may_do_local_read()
923 D_ASSERT(device, esector < nr_sectors); in drbd_may_do_local_read()
928 return drbd_bm_count_bits(device, sbnr, ebnr) == 0; in drbd_may_do_local_read()
931 static bool remote_due_to_read_balancing(struct drbd_device *device, sector_t sector, in remote_due_to_read_balancing() argument
939 bdi = &device->ldev->backing_bdev->bd_disk->queue->backing_dev_info; in remote_due_to_read_balancing()
942 return atomic_read(&device->local_cnt) > in remote_due_to_read_balancing()
943 atomic_read(&device->ap_pending_cnt) + atomic_read(&device->rs_pending_cnt); in remote_due_to_read_balancing()
953 return test_and_change_bit(READ_BALANCE_RR, &device->flags); in remote_due_to_read_balancing()
974 struct drbd_device *device = req->device; in complete_conflicting_writes() local
979 i = drbd_find_overlap(&device->write_requests, sector, size); in complete_conflicting_writes()
984 prepare_to_wait(&device->misc_wait, &wait, TASK_UNINTERRUPTIBLE); in complete_conflicting_writes()
985 i = drbd_find_overlap(&device->write_requests, sector, size); in complete_conflicting_writes()
990 spin_unlock_irq(&device->resource->req_lock); in complete_conflicting_writes()
992 spin_lock_irq(&device->resource->req_lock); in complete_conflicting_writes()
994 finish_wait(&device->misc_wait, &wait); in complete_conflicting_writes()
998 static void maybe_pull_ahead(struct drbd_device *device) in maybe_pull_ahead() argument
1000 struct drbd_connection *connection = first_peer_device(device)->connection; in maybe_pull_ahead()
1013 if (on_congestion == OC_PULL_AHEAD && device->state.conn == C_AHEAD) in maybe_pull_ahead()
1020 if (!get_ldev_if_state(device, D_UP_TO_DATE)) in maybe_pull_ahead()
1024 atomic_read(&device->ap_in_flight) >= nc->cong_fill) { in maybe_pull_ahead()
1025 drbd_info(device, "Congestion-fill threshold reached\n"); in maybe_pull_ahead()
1029 if (device->act_log->used >= nc->cong_extents) { in maybe_pull_ahead()
1030 drbd_info(device, "Congestion-extents threshold reached\n"); in maybe_pull_ahead()
1036 start_new_tl_epoch(first_peer_device(device)->connection); in maybe_pull_ahead()
1039 _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL); in maybe_pull_ahead()
1041 _drbd_set_state(_NS(device, conn, C_DISCONNECTING), 0, NULL); in maybe_pull_ahead()
1043 put_ldev(device); in maybe_pull_ahead()
1057 struct drbd_device *device = req->device; in do_remote_read() local
1061 if (!drbd_may_do_local_read(device, in do_remote_read()
1065 put_ldev(device); in do_remote_read()
1069 if (device->state.pdsk != D_UP_TO_DATE) in do_remote_read()
1079 rbm = rcu_dereference(device->ldev->disk_conf)->read_balancing; in do_remote_read()
1085 if (remote_due_to_read_balancing(device, req->i.sector, rbm)) { in do_remote_read()
1089 put_ldev(device); in do_remote_read()
1102 struct drbd_device *device = req->device; in drbd_process_write_request() local
1105 remote = drbd_should_do_remote(device->state); in drbd_process_write_request()
1106 send_oos = drbd_should_send_out_of_sync(device->state); in drbd_process_write_request()
1116 D_ASSERT(device, req->master_bio->bi_rw & REQ_FLUSH); in drbd_process_write_request()
1125 D_ASSERT(device, !(remote && send_oos)); in drbd_process_write_request()
1130 } else if (drbd_set_out_of_sync(device, req->i.sector, req->i.size)) in drbd_process_write_request()
1139 struct drbd_device *device = req->device; in drbd_submit_req_private_bio() local
1143 bio->bi_bdev = device->ldev->backing_bdev; in drbd_submit_req_private_bio()
1150 if (get_ldev(device)) { in drbd_submit_req_private_bio()
1152 if (drbd_insert_fault(device, in drbd_submit_req_private_bio()
1159 put_ldev(device); in drbd_submit_req_private_bio()
1164 static void drbd_queue_write(struct drbd_device *device, struct drbd_request *req) in drbd_queue_write() argument
1166 spin_lock_irq(&device->resource->req_lock); in drbd_queue_write()
1167 list_add_tail(&req->tl_requests, &device->submit.writes); in drbd_queue_write()
1169 &device->pending_master_completion[1 /* WRITE */]); in drbd_queue_write()
1170 spin_unlock_irq(&device->resource->req_lock); in drbd_queue_write()
1171 queue_work(device->submit.wq, &device->submit.worker); in drbd_queue_write()
1173 wake_up(&device->al_wait); in drbd_queue_write()
1182 drbd_request_prepare(struct drbd_device *device, struct bio *bio, unsigned long start_jif) in drbd_request_prepare() argument
1188 req = drbd_req_new(device, bio); in drbd_request_prepare()
1190 dec_ap_bio(device); in drbd_request_prepare()
1193 drbd_err(device, "could not kmalloc() req\n"); in drbd_request_prepare()
1199 if (!get_ldev(device)) { in drbd_request_prepare()
1205 _drbd_start_io_acct(device, req); in drbd_request_prepare()
1208 && !test_bit(AL_SUSPENDED, &device->flags)) { in drbd_request_prepare()
1209 if (!drbd_al_begin_io_fastpath(device, &req->i)) { in drbd_request_prepare()
1210 atomic_inc(&device->ap_actlog_cnt); in drbd_request_prepare()
1211 drbd_queue_write(device, req); in drbd_request_prepare()
1221 static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request *req) in drbd_send_and_submit() argument
1223 struct drbd_resource *resource = device->resource; in drbd_send_and_submit()
1239 maybe_pull_ahead(device); in drbd_send_and_submit()
1243 if (drbd_suspended(device)) { in drbd_send_and_submit()
1249 put_ldev(device); in drbd_send_and_submit()
1263 req->epoch = atomic_read(&first_peer_device(device)->connection->current_tle_nr); in drbd_send_and_submit()
1269 first_peer_device(device)->connection->current_tle_writes++; in drbd_send_and_submit()
1271 list_add_tail(&req->tl_requests, &first_peer_device(device)->connection->transfer_log); in drbd_send_and_submit()
1291 &device->pending_master_completion[rw == WRITE]); in drbd_send_and_submit()
1295 &device->pending_completion[rw == WRITE]); in drbd_send_and_submit()
1302 drbd_err(device, "IO ERROR: neither local nor remote data, sector %llu+%u\n", in drbd_send_and_submit()
1322 complete_master_bio(device, &m); in drbd_send_and_submit()
1325 void __drbd_make_request(struct drbd_device *device, struct bio *bio, unsigned long start_jif) in __drbd_make_request() argument
1327 struct drbd_request *req = drbd_request_prepare(device, bio, start_jif); in __drbd_make_request()
1330 drbd_send_and_submit(device, req); in __drbd_make_request()
1333 static void submit_fast_path(struct drbd_device *device, struct list_head *incoming) in submit_fast_path() argument
1341 && !test_bit(AL_SUSPENDED, &device->flags)) { in submit_fast_path()
1342 if (!drbd_al_begin_io_fastpath(device, &req->i)) in submit_fast_path()
1347 atomic_dec(&device->ap_actlog_cnt); in submit_fast_path()
1351 drbd_send_and_submit(device, req); in submit_fast_path()
1355 static bool prepare_al_transaction_nonblock(struct drbd_device *device, in prepare_al_transaction_nonblock() argument
1364 spin_lock_irq(&device->al_lock); in prepare_al_transaction_nonblock()
1366 err = drbd_al_begin_io_nonblock(device, &req->i); in prepare_al_transaction_nonblock()
1376 spin_unlock_irq(&device->al_lock); in prepare_al_transaction_nonblock()
1378 wake_up(&device->al_wait); in prepare_al_transaction_nonblock()
1382 void send_and_submit_pending(struct drbd_device *device, struct list_head *pending) in send_and_submit_pending() argument
1389 atomic_dec(&device->ap_actlog_cnt); in send_and_submit_pending()
1391 drbd_send_and_submit(device, req); in send_and_submit_pending()
1397 struct drbd_device *device = container_of(ws, struct drbd_device, submit.worker); in do_submit() local
1403 spin_lock_irq(&device->resource->req_lock); in do_submit()
1404 list_splice_tail_init(&device->submit.writes, &incoming); in do_submit()
1405 spin_unlock_irq(&device->resource->req_lock); in do_submit()
1412 submit_fast_path(device, &incoming); in do_submit()
1417 prepare_to_wait(&device->al_wait, &wait, TASK_UNINTERRUPTIBLE); in do_submit()
1420 prepare_al_transaction_nonblock(device, &incoming, &pending, &busy); in do_submit()
1443 spin_lock_irq(&device->resource->req_lock); in do_submit()
1444 list_splice_tail_init(&device->submit.writes, &incoming); in do_submit()
1445 spin_unlock_irq(&device->resource->req_lock); in do_submit()
1447 finish_wait(&device->al_wait, &wait); in do_submit()
1472 if (list_empty(&device->submit.writes)) in do_submit()
1475 spin_lock_irq(&device->resource->req_lock); in do_submit()
1476 list_splice_tail_init(&device->submit.writes, &more_incoming); in do_submit()
1477 spin_unlock_irq(&device->resource->req_lock); in do_submit()
1482 made_progress = prepare_al_transaction_nonblock(device, &more_incoming, &more_pending, &busy); in do_submit()
1490 drbd_al_begin_io_commit(device); in do_submit()
1491 send_and_submit_pending(device, &pending); in do_submit()
1497 struct drbd_device *device = (struct drbd_device *) q->queuedata; in drbd_make_request() local
1505 D_ASSERT(device, IS_ALIGNED(bio->bi_iter.bi_size, 512)); in drbd_make_request()
1507 inc_ap_bio(device); in drbd_make_request()
1508 __drbd_make_request(device, bio, start_jif); in drbd_make_request()
1525 struct drbd_device *device = (struct drbd_device *) q->queuedata; in drbd_merge_bvec() local
1530 if (bio_size && get_ldev(device)) { in drbd_merge_bvec()
1533 device->ldev->backing_bdev->bd_disk->queue; in drbd_merge_bvec()
1535 bvm->bi_bdev = device->ldev->backing_bdev; in drbd_merge_bvec()
1539 put_ldev(device); in drbd_merge_bvec()
1548 struct drbd_device *device = (struct drbd_device *) data; in request_timer_fn() local
1549 struct drbd_connection *connection = first_peer_device(device)->connection; in request_timer_fn()
1558 if (nc && device->state.conn >= C_WF_REPORT_PARAMS) in request_timer_fn()
1561 if (get_ldev(device)) { /* implicit state.disk >= D_INCONSISTENT */ in request_timer_fn()
1562 dt = rcu_dereference(device->ldev->disk_conf)->disk_timeout * HZ / 10; in request_timer_fn()
1563 put_ldev(device); in request_timer_fn()
1575 spin_lock_irq(&device->resource->req_lock); in request_timer_fn()
1576 …req_read = list_first_entry_or_null(&device->pending_completion[0], struct drbd_request, req_pendi… in request_timer_fn()
1577 …req_write = list_first_entry_or_null(&device->pending_completion[1], struct drbd_request, req_pend… in request_timer_fn()
1585 if (req_peer && req_peer->device != device) in request_timer_fn()
1618 drbd_warn(device, "Remote failed to finish a request within ko-count * timeout\n"); in request_timer_fn()
1623 !time_in_range(now, device->last_reattach_jif, device->last_reattach_jif + dt)) { in request_timer_fn()
1624 drbd_warn(device, "Local backing device failed to meet the disk-timeout\n"); in request_timer_fn()
1625 __drbd_chk_io_error(device, DRBD_FORCE_DETACH); in request_timer_fn()
1636 spin_unlock_irq(&device->resource->req_lock); in request_timer_fn()
1637 mod_timer(&device->request_timer, nt); in request_timer_fn()