Lines Matching refs:device

166 int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)  in _get_ldev_if_state()  argument
170 atomic_inc(&device->local_cnt); in _get_ldev_if_state()
171 io_allowed = (device->state.disk >= mins); in _get_ldev_if_state()
173 if (atomic_dec_and_test(&device->local_cnt)) in _get_ldev_if_state()
174 wake_up(&device->misc_wait); in _get_ldev_if_state()
307 void tl_abort_disk_io(struct drbd_device *device) in tl_abort_disk_io() argument
309 struct drbd_connection *connection = first_peer_device(device)->connection; in tl_abort_disk_io()
316 if (req->device != device) in tl_abort_disk_io()
494 minor = device_to_minor(peer_device->device); in conn_lowest_minor()
698 err = __send_command(peer_device->connection, peer_device->device->vnr, in drbd_send_command()
753 if (get_ldev(peer_device->device)) { in drbd_send_sync_param()
754 dc = rcu_dereference(peer_device->device->ldev->disk_conf); in drbd_send_sync_param()
760 put_ldev(peer_device->device); in drbd_send_sync_param()
836 struct drbd_device *device = peer_device->device; in _drbd_send_uuids() local
841 if (!get_ldev_if_state(device, D_NEGOTIATING)) in _drbd_send_uuids()
847 put_ldev(device); in _drbd_send_uuids()
850 spin_lock_irq(&device->ldev->md.uuid_lock); in _drbd_send_uuids()
852 p->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]); in _drbd_send_uuids()
853 spin_unlock_irq(&device->ldev->md.uuid_lock); in _drbd_send_uuids()
855 device->comm_bm_set = drbd_bm_total_weight(device); in _drbd_send_uuids()
856 p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set); in _drbd_send_uuids()
860 uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0; in _drbd_send_uuids()
861 uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; in _drbd_send_uuids()
864 put_ldev(device); in _drbd_send_uuids()
878 void drbd_print_uuids(struct drbd_device *device, const char *text) in drbd_print_uuids() argument
880 if (get_ldev_if_state(device, D_NEGOTIATING)) { in drbd_print_uuids()
881 u64 *uuid = device->ldev->md.uuid; in drbd_print_uuids()
882 drbd_info(device, "%s %016llX:%016llX:%016llX:%016llX\n", in drbd_print_uuids()
888 put_ldev(device); in drbd_print_uuids()
890 drbd_info(device, "%s effective data uuid: %016llX\n", in drbd_print_uuids()
892 (unsigned long long)device->ed_uuid); in drbd_print_uuids()
898 struct drbd_device *device = peer_device->device; in drbd_gen_and_send_sync_uuid() local
903 D_ASSERT(device, device->state.disk == D_UP_TO_DATE); in drbd_gen_and_send_sync_uuid()
905 uuid = device->ldev->md.uuid[UI_BITMAP]; in drbd_gen_and_send_sync_uuid()
910 drbd_uuid_set(device, UI_BITMAP, uuid); in drbd_gen_and_send_sync_uuid()
911 drbd_print_uuids(device, "updated sync UUID"); in drbd_gen_and_send_sync_uuid()
912 drbd_md_sync(device); in drbd_gen_and_send_sync_uuid()
924 struct drbd_device *device = peer_device->device; in drbd_send_sizes() local
931 if (get_ldev_if_state(device, D_NEGOTIATING)) { in drbd_send_sizes()
932 D_ASSERT(device, device->ldev->backing_bdev); in drbd_send_sizes()
933 d_size = drbd_get_max_capacity(device->ldev); in drbd_send_sizes()
935 u_size = rcu_dereference(device->ldev->disk_conf)->disk_size; in drbd_send_sizes()
937 q_order_type = drbd_queue_order_type(device); in drbd_send_sizes()
938 max_bio_size = queue_max_hw_sectors(device->ldev->backing_bdev->bd_disk->queue) << 9; in drbd_send_sizes()
940 put_ldev(device); in drbd_send_sizes()
960 p->c_size = cpu_to_be64(trigger_reply ? 0 : drbd_get_capacity(device->this_bdev)); in drbd_send_sizes()
980 p->state = cpu_to_be32(peer_device->device->state.i); /* Within the send mutex */ in drbd_send_current_state()
1081 static int fill_bitmap_rle_bits(struct drbd_device *device, in fill_bitmap_rle_bits() argument
1096 use_rle = rcu_dereference(first_peer_device(device)->connection->net_conf)->use_rle; in fill_bitmap_rle_bits()
1098 if (!use_rle || first_peer_device(device)->connection->agreed_pro_version < 90) in fill_bitmap_rle_bits()
1118 tmp = (toggle == 0) ? _drbd_bm_find_next_zero(device, c->bit_offset) in fill_bitmap_rle_bits()
1119 : _drbd_bm_find_next(device, c->bit_offset); in fill_bitmap_rle_bits()
1139 drbd_err(device, "unexpected zero runlength while encoding bitmap " in fill_bitmap_rle_bits()
1148 drbd_err(device, "error while encoding bitmap: %d\n", bits); in fill_bitmap_rle_bits()
1185 send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c) in send_bitmap_rle_or_plain() argument
1187 struct drbd_socket *sock = &first_peer_device(device)->connection->data; in send_bitmap_rle_or_plain()
1188 unsigned int header_size = drbd_header_size(first_peer_device(device)->connection); in send_bitmap_rle_or_plain()
1192 len = fill_bitmap_rle_bits(device, p, in send_bitmap_rle_or_plain()
1199 err = __send_command(first_peer_device(device)->connection, device->vnr, sock, in send_bitmap_rle_or_plain()
1219 drbd_bm_get_lel(device, c->word_offset, num_words, p); in send_bitmap_rle_or_plain()
1220 …err = __send_command(first_peer_device(device)->connection, device->vnr, sock, P_BITMAP, len, NULL… in send_bitmap_rle_or_plain()
1232 INFO_bm_xfer_stats(device, "send", c); in send_bitmap_rle_or_plain()
1241 static int _drbd_send_bitmap(struct drbd_device *device) in _drbd_send_bitmap() argument
1246 if (!expect(device->bitmap)) in _drbd_send_bitmap()
1249 if (get_ldev(device)) { in _drbd_send_bitmap()
1250 if (drbd_md_test_flag(device->ldev, MDF_FULL_SYNC)) { in _drbd_send_bitmap()
1251 drbd_info(device, "Writing the whole bitmap, MDF_FullSync was set.\n"); in _drbd_send_bitmap()
1252 drbd_bm_set_all(device); in _drbd_send_bitmap()
1253 if (drbd_bm_write(device)) { in _drbd_send_bitmap()
1257 drbd_err(device, "Failed to write bitmap to disk!\n"); in _drbd_send_bitmap()
1259 drbd_md_clear_flag(device, MDF_FULL_SYNC); in _drbd_send_bitmap()
1260 drbd_md_sync(device); in _drbd_send_bitmap()
1263 put_ldev(device); in _drbd_send_bitmap()
1267 .bm_bits = drbd_bm_bits(device), in _drbd_send_bitmap()
1268 .bm_words = drbd_bm_words(device), in _drbd_send_bitmap()
1272 err = send_bitmap_rle_or_plain(device, &c); in _drbd_send_bitmap()
1278 int drbd_send_bitmap(struct drbd_device *device) in drbd_send_bitmap() argument
1280 struct drbd_socket *sock = &first_peer_device(device)->connection->data; in drbd_send_bitmap()
1285 err = !_drbd_send_bitmap(device); in drbd_send_bitmap()
1321 if (peer_device->device->state.conn < C_CONNECTED) in _drbd_send_ack()
1331 p->seq_num = cpu_to_be32(atomic_inc_return(&peer_device->device->packet_seq)); in _drbd_send_ack()
1495 peer_device->device->send_cnt += size >> 9; in _drbd_no_send_page()
1529 drbd_warn(peer_device->device, "%s: size=%d len=%d sent=%d\n", in _drbd_send_page()
1543 peer_device->device->send_cnt += size >> 9; in _drbd_send_page()
1621 struct drbd_device *device = peer_device->device; in drbd_send_dblock() local
1637 p->seq_num = cpu_to_be32(atomic_inc_return(&device->packet_seq)); in drbd_send_dblock()
1639 if (device->state.conn >= C_SYNC_SOURCE && in drbd_send_dblock()
1640 device->state.conn <= C_PAUSED_SYNC_T) in drbd_send_dblock()
1656 err = __send_command(peer_device->connection, device->vnr, sock, P_TRIM, sizeof(*t), NULL, 0); in drbd_send_dblock()
1664 …err = __send_command(peer_device->connection, device->vnr, sock, P_DATA, sizeof(*p) + digest_size,… in drbd_send_dblock()
1689 drbd_warn(device, in drbd_send_dblock()
1710 struct drbd_device *device = peer_device->device; in drbd_send_block() local
1730 …err = __send_command(peer_device->connection, device->vnr, sock, cmd, sizeof(*p) + digest_size, NU… in drbd_send_block()
1859 struct drbd_device *device = bdev->bd_disk->private_data; in drbd_open() local
1864 spin_lock_irqsave(&device->resource->req_lock, flags); in drbd_open()
1868 if (device->state.role != R_PRIMARY) { in drbd_open()
1876 device->open_cnt++; in drbd_open()
1877 spin_unlock_irqrestore(&device->resource->req_lock, flags); in drbd_open()
1885 struct drbd_device *device = gd->private_data; in drbd_release() local
1887 device->open_cnt--; in drbd_release()
1891 static void drbd_set_defaults(struct drbd_device *device) in drbd_set_defaults() argument
1895 device->state = (union drbd_dev_state) { in drbd_set_defaults()
1904 void drbd_init_set_defaults(struct drbd_device *device) in drbd_init_set_defaults() argument
1909 drbd_set_defaults(device); in drbd_init_set_defaults()
1911 atomic_set(&device->ap_bio_cnt, 0); in drbd_init_set_defaults()
1912 atomic_set(&device->ap_actlog_cnt, 0); in drbd_init_set_defaults()
1913 atomic_set(&device->ap_pending_cnt, 0); in drbd_init_set_defaults()
1914 atomic_set(&device->rs_pending_cnt, 0); in drbd_init_set_defaults()
1915 atomic_set(&device->unacked_cnt, 0); in drbd_init_set_defaults()
1916 atomic_set(&device->local_cnt, 0); in drbd_init_set_defaults()
1917 atomic_set(&device->pp_in_use_by_net, 0); in drbd_init_set_defaults()
1918 atomic_set(&device->rs_sect_in, 0); in drbd_init_set_defaults()
1919 atomic_set(&device->rs_sect_ev, 0); in drbd_init_set_defaults()
1920 atomic_set(&device->ap_in_flight, 0); in drbd_init_set_defaults()
1921 atomic_set(&device->md_io.in_use, 0); in drbd_init_set_defaults()
1923 mutex_init(&device->own_state_mutex); in drbd_init_set_defaults()
1924 device->state_mutex = &device->own_state_mutex; in drbd_init_set_defaults()
1926 spin_lock_init(&device->al_lock); in drbd_init_set_defaults()
1927 spin_lock_init(&device->peer_seq_lock); in drbd_init_set_defaults()
1929 INIT_LIST_HEAD(&device->active_ee); in drbd_init_set_defaults()
1930 INIT_LIST_HEAD(&device->sync_ee); in drbd_init_set_defaults()
1931 INIT_LIST_HEAD(&device->done_ee); in drbd_init_set_defaults()
1932 INIT_LIST_HEAD(&device->read_ee); in drbd_init_set_defaults()
1933 INIT_LIST_HEAD(&device->net_ee); in drbd_init_set_defaults()
1934 INIT_LIST_HEAD(&device->resync_reads); in drbd_init_set_defaults()
1935 INIT_LIST_HEAD(&device->resync_work.list); in drbd_init_set_defaults()
1936 INIT_LIST_HEAD(&device->unplug_work.list); in drbd_init_set_defaults()
1937 INIT_LIST_HEAD(&device->bm_io_work.w.list); in drbd_init_set_defaults()
1938 INIT_LIST_HEAD(&device->pending_master_completion[0]); in drbd_init_set_defaults()
1939 INIT_LIST_HEAD(&device->pending_master_completion[1]); in drbd_init_set_defaults()
1940 INIT_LIST_HEAD(&device->pending_completion[0]); in drbd_init_set_defaults()
1941 INIT_LIST_HEAD(&device->pending_completion[1]); in drbd_init_set_defaults()
1943 device->resync_work.cb = w_resync_timer; in drbd_init_set_defaults()
1944 device->unplug_work.cb = w_send_write_hint; in drbd_init_set_defaults()
1945 device->bm_io_work.w.cb = w_bitmap_io; in drbd_init_set_defaults()
1947 init_timer(&device->resync_timer); in drbd_init_set_defaults()
1948 init_timer(&device->md_sync_timer); in drbd_init_set_defaults()
1949 init_timer(&device->start_resync_timer); in drbd_init_set_defaults()
1950 init_timer(&device->request_timer); in drbd_init_set_defaults()
1951 device->resync_timer.function = resync_timer_fn; in drbd_init_set_defaults()
1952 device->resync_timer.data = (unsigned long) device; in drbd_init_set_defaults()
1953 device->md_sync_timer.function = md_sync_timer_fn; in drbd_init_set_defaults()
1954 device->md_sync_timer.data = (unsigned long) device; in drbd_init_set_defaults()
1955 device->start_resync_timer.function = start_resync_timer_fn; in drbd_init_set_defaults()
1956 device->start_resync_timer.data = (unsigned long) device; in drbd_init_set_defaults()
1957 device->request_timer.function = request_timer_fn; in drbd_init_set_defaults()
1958 device->request_timer.data = (unsigned long) device; in drbd_init_set_defaults()
1960 init_waitqueue_head(&device->misc_wait); in drbd_init_set_defaults()
1961 init_waitqueue_head(&device->state_wait); in drbd_init_set_defaults()
1962 init_waitqueue_head(&device->ee_wait); in drbd_init_set_defaults()
1963 init_waitqueue_head(&device->al_wait); in drbd_init_set_defaults()
1964 init_waitqueue_head(&device->seq_wait); in drbd_init_set_defaults()
1966 device->resync_wenr = LC_FREE; in drbd_init_set_defaults()
1967 device->peer_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; in drbd_init_set_defaults()
1968 device->local_max_bio_size = DRBD_MAX_BIO_SIZE_SAFE; in drbd_init_set_defaults()
1971 void drbd_device_cleanup(struct drbd_device *device) in drbd_device_cleanup() argument
1974 if (first_peer_device(device)->connection->receiver.t_state != NONE) in drbd_device_cleanup()
1975 drbd_err(device, "ASSERT FAILED: receiver t_state == %d expected 0.\n", in drbd_device_cleanup()
1976 first_peer_device(device)->connection->receiver.t_state); in drbd_device_cleanup()
1978 device->al_writ_cnt = in drbd_device_cleanup()
1979 device->bm_writ_cnt = in drbd_device_cleanup()
1980 device->read_cnt = in drbd_device_cleanup()
1981 device->recv_cnt = in drbd_device_cleanup()
1982 device->send_cnt = in drbd_device_cleanup()
1983 device->writ_cnt = in drbd_device_cleanup()
1984 device->p_size = in drbd_device_cleanup()
1985 device->rs_start = in drbd_device_cleanup()
1986 device->rs_total = in drbd_device_cleanup()
1987 device->rs_failed = 0; in drbd_device_cleanup()
1988 device->rs_last_events = 0; in drbd_device_cleanup()
1989 device->rs_last_sect_ev = 0; in drbd_device_cleanup()
1991 device->rs_mark_left[i] = 0; in drbd_device_cleanup()
1992 device->rs_mark_time[i] = 0; in drbd_device_cleanup()
1994 D_ASSERT(device, first_peer_device(device)->connection->net_conf == NULL); in drbd_device_cleanup()
1996 drbd_set_my_capacity(device, 0); in drbd_device_cleanup()
1997 if (device->bitmap) { in drbd_device_cleanup()
1999 drbd_bm_resize(device, 0, 1); in drbd_device_cleanup()
2000 drbd_bm_cleanup(device); in drbd_device_cleanup()
2003 drbd_free_ldev(device->ldev); in drbd_device_cleanup()
2004 device->ldev = NULL; in drbd_device_cleanup()
2006 clear_bit(AL_SUSPENDED, &device->flags); in drbd_device_cleanup()
2008 D_ASSERT(device, list_empty(&device->active_ee)); in drbd_device_cleanup()
2009 D_ASSERT(device, list_empty(&device->sync_ee)); in drbd_device_cleanup()
2010 D_ASSERT(device, list_empty(&device->done_ee)); in drbd_device_cleanup()
2011 D_ASSERT(device, list_empty(&device->read_ee)); in drbd_device_cleanup()
2012 D_ASSERT(device, list_empty(&device->net_ee)); in drbd_device_cleanup()
2013 D_ASSERT(device, list_empty(&device->resync_reads)); in drbd_device_cleanup()
2014 D_ASSERT(device, list_empty(&first_peer_device(device)->connection->sender_work.q)); in drbd_device_cleanup()
2015 D_ASSERT(device, list_empty(&device->resync_work.list)); in drbd_device_cleanup()
2016 D_ASSERT(device, list_empty(&device->unplug_work.list)); in drbd_device_cleanup()
2018 drbd_set_defaults(device); in drbd_device_cleanup()
2138 static void drbd_release_all_peer_reqs(struct drbd_device *device) in drbd_release_all_peer_reqs() argument
2142 rr = drbd_free_peer_reqs(device, &device->active_ee); in drbd_release_all_peer_reqs()
2144 drbd_err(device, "%d EEs in active list found!\n", rr); in drbd_release_all_peer_reqs()
2146 rr = drbd_free_peer_reqs(device, &device->sync_ee); in drbd_release_all_peer_reqs()
2148 drbd_err(device, "%d EEs in sync list found!\n", rr); in drbd_release_all_peer_reqs()
2150 rr = drbd_free_peer_reqs(device, &device->read_ee); in drbd_release_all_peer_reqs()
2152 drbd_err(device, "%d EEs in read list found!\n", rr); in drbd_release_all_peer_reqs()
2154 rr = drbd_free_peer_reqs(device, &device->done_ee); in drbd_release_all_peer_reqs()
2156 drbd_err(device, "%d EEs in done list found!\n", rr); in drbd_release_all_peer_reqs()
2158 rr = drbd_free_peer_reqs(device, &device->net_ee); in drbd_release_all_peer_reqs()
2160 drbd_err(device, "%d EEs in net list found!\n", rr); in drbd_release_all_peer_reqs()
2166 struct drbd_device *device = container_of(kref, struct drbd_device, kref); in drbd_destroy_device() local
2167 struct drbd_resource *resource = device->resource; in drbd_destroy_device()
2170 del_timer_sync(&device->request_timer); in drbd_destroy_device()
2173 D_ASSERT(device, device->open_cnt == 0); in drbd_destroy_device()
2179 if (device->this_bdev) in drbd_destroy_device()
2180 bdput(device->this_bdev); in drbd_destroy_device()
2182 drbd_free_ldev(device->ldev); in drbd_destroy_device()
2183 device->ldev = NULL; in drbd_destroy_device()
2185 drbd_release_all_peer_reqs(device); in drbd_destroy_device()
2187 lc_destroy(device->act_log); in drbd_destroy_device()
2188 lc_destroy(device->resync); in drbd_destroy_device()
2190 kfree(device->p_uuid); in drbd_destroy_device()
2193 if (device->bitmap) /* should no longer be there. */ in drbd_destroy_device()
2194 drbd_bm_cleanup(device); in drbd_destroy_device()
2195 __free_page(device->md_io.page); in drbd_destroy_device()
2196 put_disk(device->vdisk); in drbd_destroy_device()
2197 blk_cleanup_queue(device->rq_queue); in drbd_destroy_device()
2198 kfree(device->rs_plan_s); in drbd_destroy_device()
2203 for_each_peer_device_safe(peer_device, tmp_peer_device, device) { in drbd_destroy_device()
2207 memset(device, 0xfd, sizeof(*device)); in drbd_destroy_device()
2208 kfree(device); in drbd_destroy_device()
2234 struct drbd_device *device = req->device; in do_retry() local
2246 drbd_err(device, "req=%p completion_ref=%d rq_state=%x\n", in do_retry()
2270 inc_ap_bio(device); in do_retry()
2271 __drbd_make_request(device, bio, start_jif); in do_retry()
2287 dec_ap_bio(req->device); in drbd_restart_request()
2320 struct drbd_device *device; in drbd_cleanup() local
2340 idr_for_each_entry(&drbd_devices, device, i) in drbd_cleanup()
2341 drbd_delete_device(device); in drbd_cleanup()
2366 struct drbd_device *device = congested_data; in drbd_congested() local
2371 if (!may_inc_ap_bio(device)) { in drbd_congested()
2378 if (test_bit(CALLBACK_PENDING, &first_peer_device(device)->connection->flags)) { in drbd_congested()
2385 if (!get_ldev_if_state(device, D_UP_TO_DATE)) in drbd_congested()
2388 put_ldev(device); in drbd_congested()
2394 if (get_ldev(device)) { in drbd_congested()
2395 q = bdev_get_queue(device->ldev->backing_bdev); in drbd_congested()
2397 put_ldev(device); in drbd_congested()
2403 test_bit(NET_CONGESTED, &first_peer_device(device)->connection->flags)) { in drbd_congested()
2409 device->congestion_reason = reason; in drbd_congested()
2701 static int init_submitter(struct drbd_device *device) in init_submitter() argument
2705 device->submit.wq = alloc_workqueue("drbd%u_submit", in init_submitter()
2706 WQ_UNBOUND | WQ_MEM_RECLAIM, 1, device->minor); in init_submitter()
2707 if (!device->submit.wq) in init_submitter()
2710 INIT_WORK(&device->submit.worker, do_submit); in init_submitter()
2711 INIT_LIST_HEAD(&device->submit.writes); in init_submitter()
2719 struct drbd_device *device; in drbd_create_device() local
2727 device = minor_to_device(minor); in drbd_create_device()
2728 if (device) in drbd_create_device()
2732 device = kzalloc(sizeof(struct drbd_device), GFP_KERNEL); in drbd_create_device()
2733 if (!device) in drbd_create_device()
2735 kref_init(&device->kref); in drbd_create_device()
2738 device->resource = resource; in drbd_create_device()
2739 device->minor = minor; in drbd_create_device()
2740 device->vnr = vnr; in drbd_create_device()
2742 drbd_init_set_defaults(device); in drbd_create_device()
2747 device->rq_queue = q; in drbd_create_device()
2748 q->queuedata = device; in drbd_create_device()
2753 device->vdisk = disk; in drbd_create_device()
2762 disk->private_data = device; in drbd_create_device()
2764 device->this_bdev = bdget(MKDEV(DRBD_MAJOR, minor)); in drbd_create_device()
2766 device->this_bdev->bd_contains = device->this_bdev; in drbd_create_device()
2769 q->backing_dev_info.congested_data = device; in drbd_create_device()
2780 device->md_io.page = alloc_page(GFP_KERNEL); in drbd_create_device()
2781 if (!device->md_io.page) in drbd_create_device()
2784 if (drbd_bm_init(device)) in drbd_create_device()
2786 device->read_requests = RB_ROOT; in drbd_create_device()
2787 device->write_requests = RB_ROOT; in drbd_create_device()
2789 id = idr_alloc(&drbd_devices, device, minor, minor + 1, GFP_KERNEL); in drbd_create_device()
2795 kref_get(&device->kref); in drbd_create_device()
2797 id = idr_alloc(&resource->devices, device, vnr, vnr + 1, GFP_KERNEL); in drbd_create_device()
2803 kref_get(&device->kref); in drbd_create_device()
2805 INIT_LIST_HEAD(&device->peer_devices); in drbd_create_device()
2806 INIT_LIST_HEAD(&device->pending_bitmap_io); in drbd_create_device()
2812 peer_device->device = device; in drbd_create_device()
2814 list_add(&peer_device->peer_devices, &device->peer_devices); in drbd_create_device()
2815 kref_get(&device->kref); in drbd_create_device()
2826 if (init_submitter(device)) { in drbd_create_device()
2834 device->state.conn = first_connection(resource)->cstate; in drbd_create_device()
2835 if (device->state.conn == C_WF_REPORT_PARAMS) { in drbd_create_device()
2836 for_each_peer_device(peer_device, device) in drbd_create_device()
2840 for_each_peer_device(peer_device, device) in drbd_create_device()
2842 drbd_debugfs_device_add(device); in drbd_create_device()
2855 for_each_peer_device_safe(peer_device, tmp_peer_device, device) { in drbd_create_device()
2864 drbd_bm_cleanup(device); in drbd_create_device()
2866 __free_page(device->md_io.page); in drbd_create_device()
2873 kfree(device); in drbd_create_device()
2877 void drbd_delete_device(struct drbd_device *device) in drbd_delete_device() argument
2879 struct drbd_resource *resource = device->resource; in drbd_delete_device()
2885 for_each_peer_device(peer_device, device) in drbd_delete_device()
2887 drbd_debugfs_device_cleanup(device); in drbd_delete_device()
2889 idr_remove(&connection->peer_devices, device->vnr); in drbd_delete_device()
2892 idr_remove(&resource->devices, device->vnr); in drbd_delete_device()
2893 idr_remove(&drbd_devices, device_to_minor(device)); in drbd_delete_device()
2894 del_gendisk(device->vdisk); in drbd_delete_device()
2896 kref_sub(&device->kref, refs, drbd_destroy_device); in drbd_delete_device()
3019 struct drbd_device *device = peer_device->device; in conn_md_sync() local
3021 kref_get(&device->kref); in conn_md_sync()
3023 drbd_md_sync(device); in conn_md_sync()
3024 kref_put(&device->kref, drbd_destroy_device); in conn_md_sync()
3055 void drbd_md_write(struct drbd_device *device, void *b) in drbd_md_write() argument
3063 buffer->la_size_sect = cpu_to_be64(drbd_get_capacity(device->this_bdev)); in drbd_md_write()
3065 buffer->uuid[i] = cpu_to_be64(device->ldev->md.uuid[i]); in drbd_md_write()
3066 buffer->flags = cpu_to_be32(device->ldev->md.flags); in drbd_md_write()
3069 buffer->md_size_sect = cpu_to_be32(device->ldev->md.md_size_sect); in drbd_md_write()
3070 buffer->al_offset = cpu_to_be32(device->ldev->md.al_offset); in drbd_md_write()
3071 buffer->al_nr_extents = cpu_to_be32(device->act_log->nr_elements); in drbd_md_write()
3073 buffer->device_uuid = cpu_to_be64(device->ldev->md.device_uuid); in drbd_md_write()
3075 buffer->bm_offset = cpu_to_be32(device->ldev->md.bm_offset); in drbd_md_write()
3076 buffer->la_peer_max_bio_size = cpu_to_be32(device->peer_max_bio_size); in drbd_md_write()
3078 buffer->al_stripes = cpu_to_be32(device->ldev->md.al_stripes); in drbd_md_write()
3079 buffer->al_stripe_size_4k = cpu_to_be32(device->ldev->md.al_stripe_size_4k); in drbd_md_write()
3081 D_ASSERT(device, drbd_md_ss(device->ldev) == device->ldev->md.md_offset); in drbd_md_write()
3082 sector = device->ldev->md.md_offset; in drbd_md_write()
3084 if (drbd_md_sync_page_io(device, device->ldev, sector, WRITE)) { in drbd_md_write()
3086 drbd_err(device, "meta data update failed!\n"); in drbd_md_write()
3087 drbd_chk_io_error(device, 1, DRBD_META_IO_ERROR); in drbd_md_write()
3095 void drbd_md_sync(struct drbd_device *device) in drbd_md_sync() argument
3103 del_timer(&device->md_sync_timer); in drbd_md_sync()
3105 if (!test_and_clear_bit(MD_DIRTY, &device->flags)) in drbd_md_sync()
3110 if (!get_ldev_if_state(device, D_FAILED)) in drbd_md_sync()
3113 buffer = drbd_md_get_buffer(device, __func__); in drbd_md_sync()
3117 drbd_md_write(device, buffer); in drbd_md_sync()
3121 device->ldev->md.la_size_sect = drbd_get_capacity(device->this_bdev); in drbd_md_sync()
3123 drbd_md_put_buffer(device); in drbd_md_sync()
3125 put_ldev(device); in drbd_md_sync()
3128 static int check_activity_log_stripe_size(struct drbd_device *device, in check_activity_log_stripe_size() argument
3168 drbd_err(device, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n", in check_activity_log_stripe_size()
3173 static int check_offsets_and_sizes(struct drbd_device *device, struct drbd_backing_dev *bdev) in check_offsets_and_sizes() argument
3240 drbd_err(device, "meta data offsets don't make sense: idx=%d " in check_offsets_and_sizes()
3264 int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev) in drbd_md_read() argument
3270 if (device->state.disk != D_DISKLESS) in drbd_md_read()
3273 buffer = drbd_md_get_buffer(device, __func__); in drbd_md_read()
3282 if (drbd_md_sync_page_io(device, bdev, bdev->md.md_offset, READ)) { in drbd_md_read()
3285 drbd_err(device, "Error while reading metadata.\n"); in drbd_md_read()
3295 drbd_err(device, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n"); in drbd_md_read()
3303 drbd_err(device, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n"); in drbd_md_read()
3305 drbd_err(device, "Meta data magic not found. Did you \"drbdadm create-md\"?\n"); in drbd_md_read()
3310 drbd_err(device, "unexpected bm_bytes_per_bit: %u (expected %u)\n", in drbd_md_read()
3327 if (check_activity_log_stripe_size(device, buffer, &bdev->md)) in drbd_md_read()
3329 if (check_offsets_and_sizes(device, bdev)) in drbd_md_read()
3333 drbd_err(device, "unexpected bm_offset: %d (expected %d)\n", in drbd_md_read()
3338 drbd_err(device, "unexpected md_size: %u (expected %u)\n", in drbd_md_read()
3345 spin_lock_irq(&device->resource->req_lock); in drbd_md_read()
3346 if (device->state.conn < C_CONNECTED) { in drbd_md_read()
3350 device->peer_max_bio_size = peer; in drbd_md_read()
3352 spin_unlock_irq(&device->resource->req_lock); in drbd_md_read()
3355 drbd_md_put_buffer(device); in drbd_md_read()
3369 void drbd_md_mark_dirty_(struct drbd_device *device, unsigned int line, const char *func) in drbd_md_mark_dirty_() argument
3371 if (!test_and_set_bit(MD_DIRTY, &device->flags)) { in drbd_md_mark_dirty_()
3372 mod_timer(&device->md_sync_timer, jiffies + HZ); in drbd_md_mark_dirty_()
3373 device->last_md_mark_dirty.line = line; in drbd_md_mark_dirty_()
3374 device->last_md_mark_dirty.func = func; in drbd_md_mark_dirty_()
3378 void drbd_md_mark_dirty(struct drbd_device *device) in drbd_md_mark_dirty() argument
3380 if (!test_and_set_bit(MD_DIRTY, &device->flags)) in drbd_md_mark_dirty()
3381 mod_timer(&device->md_sync_timer, jiffies + 5*HZ); in drbd_md_mark_dirty()
3385 void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local) in drbd_uuid_move_history() argument
3390 device->ldev->md.uuid[i+1] = device->ldev->md.uuid[i]; in drbd_uuid_move_history()
3393 void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local) in __drbd_uuid_set() argument
3396 if (device->state.role == R_PRIMARY) in __drbd_uuid_set()
3401 drbd_set_ed_uuid(device, val); in __drbd_uuid_set()
3404 device->ldev->md.uuid[idx] = val; in __drbd_uuid_set()
3405 drbd_md_mark_dirty(device); in __drbd_uuid_set()
3408 void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local) in _drbd_uuid_set() argument
3411 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); in _drbd_uuid_set()
3412 __drbd_uuid_set(device, idx, val); in _drbd_uuid_set()
3413 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); in _drbd_uuid_set()
3416 void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local) in drbd_uuid_set() argument
3419 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); in drbd_uuid_set()
3420 if (device->ldev->md.uuid[idx]) { in drbd_uuid_set()
3421 drbd_uuid_move_history(device); in drbd_uuid_set()
3422 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[idx]; in drbd_uuid_set()
3424 __drbd_uuid_set(device, idx, val); in drbd_uuid_set()
3425 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); in drbd_uuid_set()
3435 void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local) in drbd_uuid_new_current() argument
3442 spin_lock_irq(&device->ldev->md.uuid_lock); in drbd_uuid_new_current()
3443 bm_uuid = device->ldev->md.uuid[UI_BITMAP]; in drbd_uuid_new_current()
3446 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid); in drbd_uuid_new_current()
3448 device->ldev->md.uuid[UI_BITMAP] = device->ldev->md.uuid[UI_CURRENT]; in drbd_uuid_new_current()
3449 __drbd_uuid_set(device, UI_CURRENT, val); in drbd_uuid_new_current()
3450 spin_unlock_irq(&device->ldev->md.uuid_lock); in drbd_uuid_new_current()
3452 drbd_print_uuids(device, "new current UUID"); in drbd_uuid_new_current()
3454 drbd_md_sync(device); in drbd_uuid_new_current()
3457 void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local) in drbd_uuid_set_bm() argument
3460 if (device->ldev->md.uuid[UI_BITMAP] == 0 && val == 0) in drbd_uuid_set_bm()
3463 spin_lock_irqsave(&device->ldev->md.uuid_lock, flags); in drbd_uuid_set_bm()
3465 drbd_uuid_move_history(device); in drbd_uuid_set_bm()
3466 device->ldev->md.uuid[UI_HISTORY_START] = device->ldev->md.uuid[UI_BITMAP]; in drbd_uuid_set_bm()
3467 device->ldev->md.uuid[UI_BITMAP] = 0; in drbd_uuid_set_bm()
3469 unsigned long long bm_uuid = device->ldev->md.uuid[UI_BITMAP]; in drbd_uuid_set_bm()
3471 drbd_warn(device, "bm UUID was already set: %llX\n", bm_uuid); in drbd_uuid_set_bm()
3473 device->ldev->md.uuid[UI_BITMAP] = val & ~((u64)1); in drbd_uuid_set_bm()
3475 spin_unlock_irqrestore(&device->ldev->md.uuid_lock, flags); in drbd_uuid_set_bm()
3477 drbd_md_mark_dirty(device); in drbd_uuid_set_bm()
3486 int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local) in drbd_bmio_set_n_write() argument
3490 drbd_md_set_flag(device, MDF_FULL_SYNC); in drbd_bmio_set_n_write()
3491 drbd_md_sync(device); in drbd_bmio_set_n_write()
3492 drbd_bm_set_all(device); in drbd_bmio_set_n_write()
3494 rv = drbd_bm_write(device); in drbd_bmio_set_n_write()
3497 drbd_md_clear_flag(device, MDF_FULL_SYNC); in drbd_bmio_set_n_write()
3498 drbd_md_sync(device); in drbd_bmio_set_n_write()
3510 int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local) in drbd_bmio_clear_n_write() argument
3512 drbd_resume_al(device); in drbd_bmio_clear_n_write()
3513 drbd_bm_clear_all(device); in drbd_bmio_clear_n_write()
3514 return drbd_bm_write(device); in drbd_bmio_clear_n_write()
3519 struct drbd_device *device = in w_bitmap_io() local
3521 struct bm_io_work *work = &device->bm_io_work; in w_bitmap_io()
3524 D_ASSERT(device, atomic_read(&device->ap_bio_cnt) == 0); in w_bitmap_io()
3526 if (get_ldev(device)) { in w_bitmap_io()
3527 drbd_bm_lock(device, work->why, work->flags); in w_bitmap_io()
3528 rv = work->io_fn(device); in w_bitmap_io()
3529 drbd_bm_unlock(device); in w_bitmap_io()
3530 put_ldev(device); in w_bitmap_io()
3533 clear_bit_unlock(BITMAP_IO, &device->flags); in w_bitmap_io()
3534 wake_up(&device->misc_wait); in w_bitmap_io()
3537 work->done(device, rv); in w_bitmap_io()
3539 clear_bit(BITMAP_IO_QUEUED, &device->flags); in w_bitmap_io()
3561 void drbd_queue_bitmap_io(struct drbd_device *device, in drbd_queue_bitmap_io() argument
3566 D_ASSERT(device, current == first_peer_device(device)->connection->worker.task); in drbd_queue_bitmap_io()
3568 D_ASSERT(device, !test_bit(BITMAP_IO_QUEUED, &device->flags)); in drbd_queue_bitmap_io()
3569 D_ASSERT(device, !test_bit(BITMAP_IO, &device->flags)); in drbd_queue_bitmap_io()
3570 D_ASSERT(device, list_empty(&device->bm_io_work.w.list)); in drbd_queue_bitmap_io()
3571 if (device->bm_io_work.why) in drbd_queue_bitmap_io()
3572 drbd_err(device, "FIXME going to queue '%s' but '%s' still pending?\n", in drbd_queue_bitmap_io()
3573 why, device->bm_io_work.why); in drbd_queue_bitmap_io()
3575 device->bm_io_work.io_fn = io_fn; in drbd_queue_bitmap_io()
3576 device->bm_io_work.done = done; in drbd_queue_bitmap_io()
3577 device->bm_io_work.why = why; in drbd_queue_bitmap_io()
3578 device->bm_io_work.flags = flags; in drbd_queue_bitmap_io()
3580 spin_lock_irq(&device->resource->req_lock); in drbd_queue_bitmap_io()
3581 set_bit(BITMAP_IO, &device->flags); in drbd_queue_bitmap_io()
3582 if (atomic_read(&device->ap_bio_cnt) == 0) { in drbd_queue_bitmap_io()
3583 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) in drbd_queue_bitmap_io()
3584 drbd_queue_work(&first_peer_device(device)->connection->sender_work, in drbd_queue_bitmap_io()
3585 &device->bm_io_work.w); in drbd_queue_bitmap_io()
3587 spin_unlock_irq(&device->resource->req_lock); in drbd_queue_bitmap_io()
3599 int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *), in drbd_bitmap_io() argument
3604 D_ASSERT(device, current != first_peer_device(device)->connection->worker.task); in drbd_bitmap_io()
3607 drbd_suspend_io(device); in drbd_bitmap_io()
3609 drbd_bm_lock(device, why, flags); in drbd_bitmap_io()
3610 rv = io_fn(device); in drbd_bitmap_io()
3611 drbd_bm_unlock(device); in drbd_bitmap_io()
3614 drbd_resume_io(device); in drbd_bitmap_io()
3619 void drbd_md_set_flag(struct drbd_device *device, int flag) __must_hold(local) in drbd_md_set_flag() argument
3621 if ((device->ldev->md.flags & flag) != flag) { in drbd_md_set_flag()
3622 drbd_md_mark_dirty(device); in drbd_md_set_flag()
3623 device->ldev->md.flags |= flag; in drbd_md_set_flag()
3627 void drbd_md_clear_flag(struct drbd_device *device, int flag) __must_hold(local) in drbd_md_clear_flag() argument
3629 if ((device->ldev->md.flags & flag) != 0) { in drbd_md_clear_flag()
3630 drbd_md_mark_dirty(device); in drbd_md_clear_flag()
3631 device->ldev->md.flags &= ~flag; in drbd_md_clear_flag()
3641 struct drbd_device *device = (struct drbd_device *) data; in md_sync_timer_fn() local
3642 drbd_device_post_work(device, MD_SYNC); in md_sync_timer_fn()
3721 int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i) in drbd_wait_misc() argument
3728 nc = rcu_dereference(first_peer_device(device)->connection->net_conf); in drbd_wait_misc()
3738 prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE); in drbd_wait_misc()
3739 spin_unlock_irq(&device->resource->req_lock); in drbd_wait_misc()
3741 finish_wait(&device->misc_wait, &wait); in drbd_wait_misc()
3742 spin_lock_irq(&device->resource->req_lock); in drbd_wait_misc()
3743 if (!timeout || device->state.conn < C_CONNECTED) in drbd_wait_misc()
3799 _drbd_insert_fault(struct drbd_device *device, unsigned int type) in _drbd_insert_fault() argument
3805 ((1 << device_to_minor(device)) & fault_devs) != 0) && in _drbd_insert_fault()
3812 drbd_warn(device, "***Simulating %s failure\n", in _drbd_insert_fault()