Lines Matching refs:connection

112 	drbd_queue_work(&peer_device->connection->sender_work, &peer_req->w);  in drbd_endio_read_sec_final()
166 wake_asender(peer_device->connection); in drbd_endio_write_sec_final()
335 digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm); in w_e_send_csum()
340 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest); in w_e_send_csum()
429 &first_peer_device(device)->connection->sender_work, in resync_timer_fn()
567 struct drbd_connection *const connection = peer_device ? peer_device->connection : NULL; in make_resync_request() local
602 mutex_lock(&connection->data.mutex); in make_resync_request()
603 if (connection->data.socket) { in make_resync_request()
604 struct sock *sk = connection->data.socket->sk; in make_resync_request()
614 mutex_unlock(&connection->data.mutex); in make_resync_request()
811 struct drbd_connection *connection = first_peer_device(device)->connection; in ping_peer() local
813 clear_bit(GOT_PING_ACK, &connection->flags); in ping_peer()
814 request_ping(connection); in ping_peer()
815 wait_event(connection->ping_wait, in ping_peer()
816 test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED); in ping_peer()
842 drbd_queue_work(&first_peer_device(device)->connection->sender_work, in drbd_resync_finished()
1110 if (peer_device->connection->csums_tfm) { in w_e_end_csum_rs_req()
1111 digest_size = crypto_hash_digestsize(peer_device->connection->csums_tfm); in w_e_end_csum_rs_req()
1116 drbd_csum_ee(peer_device->connection->csums_tfm, peer_req, digest); in w_e_end_csum_rs_req()
1161 digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm); in w_e_end_ov_req()
1169 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); in w_e_end_ov_req()
1233 digest_size = crypto_hash_digestsize(peer_device->connection->verify_tfm); in w_e_end_ov_reply()
1236 drbd_csum_ee(peer_device->connection->verify_tfm, peer_req, digest); in w_e_end_ov_reply()
1282 static int drbd_send_barrier(struct drbd_connection *connection) in drbd_send_barrier() argument
1287 sock = &connection->data; in drbd_send_barrier()
1288 p = conn_prepare_command(connection, sock); in drbd_send_barrier()
1291 p->barrier = connection->send.current_epoch_nr; in drbd_send_barrier()
1293 connection->send.current_epoch_writes = 0; in drbd_send_barrier()
1295 return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0); in drbd_send_barrier()
1306 sock = &first_peer_device(device)->connection->data; in w_send_write_hint()
1312 static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch) in re_init_if_first_write() argument
1314 if (!connection->send.seen_any_write_yet) { in re_init_if_first_write()
1315 connection->send.seen_any_write_yet = true; in re_init_if_first_write()
1316 connection->send.current_epoch_nr = epoch; in re_init_if_first_write()
1317 connection->send.current_epoch_writes = 0; in re_init_if_first_write()
1321 static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch) in maybe_send_barrier() argument
1324 if (!connection->send.seen_any_write_yet) in maybe_send_barrier()
1326 if (connection->send.current_epoch_nr != epoch) { in maybe_send_barrier()
1327 if (connection->send.current_epoch_writes) in maybe_send_barrier()
1328 drbd_send_barrier(connection); in maybe_send_barrier()
1329 connection->send.current_epoch_nr = epoch; in maybe_send_barrier()
1338 struct drbd_connection *const connection = peer_device->connection; in w_send_out_of_sync() local
1351 maybe_send_barrier(connection, req->epoch); in w_send_out_of_sync()
1369 struct drbd_connection *connection = peer_device->connection; in w_send_dblock() local
1378 re_init_if_first_write(connection, req->epoch); in w_send_dblock()
1379 maybe_send_barrier(connection, req->epoch); in w_send_dblock()
1380 connection->send.current_epoch_writes++; in w_send_dblock()
1398 struct drbd_connection *connection = peer_device->connection; in w_send_read_req() local
1409 maybe_send_barrier(connection, req->epoch); in w_send_read_req()
1613 static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *devic… in use_checksum_based_resync() argument
1617 csums_after_crash_only = rcu_dereference(connection->net_conf)->csums_after_crash_only; in use_checksum_based_resync()
1619 return connection->agreed_pro_version >= 89 && /* supported? */ in use_checksum_based_resync()
1620 connection->csums_tfm && /* configured? */ in use_checksum_based_resync()
1636 struct drbd_connection *connection = peer_device ? peer_device->connection : NULL; in drbd_start_resync() local
1655 conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_HARD); in drbd_start_resync()
1668 conn_request_state(connection, in drbd_start_resync()
1676 if (current == connection->worker.task) { in drbd_start_resync()
1760 device->use_csums = use_checksum_based_resync(connection, device); in drbd_start_resync()
1772 if (side == C_SYNC_SOURCE && connection->agreed_pro_version < 96) in drbd_start_resync()
1775 if (connection->agreed_pro_version < 95 && device->rs_total == 0) { in drbd_start_resync()
1791 nc = rcu_dereference(connection->net_conf); in drbd_start_resync()
1949 static void do_unqueued_work(struct drbd_connection *connection) in do_unqueued_work() argument
1955 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in do_unqueued_work()
1978 static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list) in wait_for_work() argument
1984 dequeue_work_batch(&connection->sender_work, work_list); in wait_for_work()
1995 nc = rcu_dereference(connection->net_conf); in wait_for_work()
1999 mutex_lock(&connection->data.mutex); in wait_for_work()
2000 if (connection->data.socket) in wait_for_work()
2001 drbd_tcp_uncork(connection->data.socket); in wait_for_work()
2002 mutex_unlock(&connection->data.mutex); in wait_for_work()
2007 prepare_to_wait(&connection->sender_work.q_wait, &wait, TASK_INTERRUPTIBLE); in wait_for_work()
2008 spin_lock_irq(&connection->resource->req_lock); in wait_for_work()
2009 spin_lock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ in wait_for_work()
2010 if (!list_empty(&connection->sender_work.q)) in wait_for_work()
2011 list_splice_tail_init(&connection->sender_work.q, work_list); in wait_for_work()
2012 spin_unlock(&connection->sender_work.q_lock); /* FIXME get rid of this one? */ in wait_for_work()
2014 spin_unlock_irq(&connection->resource->req_lock); in wait_for_work()
2026 atomic_read(&connection->current_tle_nr) != in wait_for_work()
2027 connection->send.current_epoch_nr; in wait_for_work()
2028 spin_unlock_irq(&connection->resource->req_lock); in wait_for_work()
2031 maybe_send_barrier(connection, in wait_for_work()
2032 connection->send.current_epoch_nr + 1); in wait_for_work()
2034 if (test_bit(DEVICE_WORK_PENDING, &connection->flags)) in wait_for_work()
2038 if (get_t_state(&connection->worker) != RUNNING) in wait_for_work()
2046 finish_wait(&connection->sender_work.q_wait, &wait); in wait_for_work()
2050 nc = rcu_dereference(connection->net_conf); in wait_for_work()
2053 mutex_lock(&connection->data.mutex); in wait_for_work()
2054 if (connection->data.socket) { in wait_for_work()
2056 drbd_tcp_cork(connection->data.socket); in wait_for_work()
2058 drbd_tcp_uncork(connection->data.socket); in wait_for_work()
2060 mutex_unlock(&connection->data.mutex); in wait_for_work()
2065 struct drbd_connection *connection = thi->connection; in drbd_worker() local
2075 update_worker_timing_details(connection, wait_for_work); in drbd_worker()
2076 wait_for_work(connection, &work_list); in drbd_worker()
2079 if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) { in drbd_worker()
2080 update_worker_timing_details(connection, do_unqueued_work); in drbd_worker()
2081 do_unqueued_work(connection); in drbd_worker()
2087 drbd_warn(connection, "Worker got an unexpected signal\n"); in drbd_worker()
2099 update_worker_timing_details(connection, w->cb); in drbd_worker()
2100 if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0) in drbd_worker()
2102 if (connection->cstate >= C_WF_REPORT_PARAMS) in drbd_worker()
2103 conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD); in drbd_worker()
2108 if (test_and_clear_bit(DEVICE_WORK_PENDING, &connection->flags)) { in drbd_worker()
2109 update_worker_timing_details(connection, do_unqueued_work); in drbd_worker()
2110 do_unqueued_work(connection); in drbd_worker()
2115 update_worker_timing_details(connection, w->cb); in drbd_worker()
2118 dequeue_work_batch(&connection->sender_work, &work_list); in drbd_worker()
2119 } while (!list_empty(&work_list) || test_bit(DEVICE_WORK_PENDING, &connection->flags)); in drbd_worker()
2122 idr_for_each_entry(&connection->peer_devices, peer_device, vnr) { in drbd_worker()