This source file includes following definitions.
- drbd_insert_fault
- bm_xfer_ctx_bit_to_word_offset
- get_t_state
- has_net_conf
- minor_to_device
- first_peer_device
- conn_peer_device
- device_to_minor
- drbd_determine_dev_size
- drbd_tcp_cork
- drbd_tcp_uncork
- drbd_tcp_nodelay
- drbd_tcp_quickack
- drbd_generic_make_request
- page_chain_next
- drbd_peer_req_has_active_page
- drbd_read_state
- __drbd_chk_io_error_
- drbd_chk_io_error_
- drbd_md_first_sector
- drbd_md_last_sector
- drbd_get_capacity
- drbd_get_max_capacity
- drbd_md_ss
- drbd_queue_work
- drbd_queue_work_if_unqueued
- drbd_device_post_work
- wake_ack_receiver
- request_ping
- drbd_thread_stop
- drbd_thread_stop_nowait
- drbd_thread_restart_nowait
- inc_ap_pending
- _dec_ap_pending
- inc_rs_pending
- _dec_rs_pending
- inc_unacked
- _dec_unacked
- _sub_unacked
- is_sync_target_state
- is_sync_source_state
- is_sync_state
- put_ldev
- _get_ldev_if_state
- drbd_get_max_buffers
- drbd_state_is_stable
- drbd_suspended
- may_inc_ap_bio
- inc_ap_bio_cond
- inc_ap_bio
- dec_ap_bio
- verify_can_do_stop_sector
- drbd_set_ed_uuid
- drbd_queue_order_type
- first_connection
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #ifndef _DRBD_INT_H
15 #define _DRBD_INT_H
16
17 #include <crypto/hash.h>
18 #include <linux/compiler.h>
19 #include <linux/types.h>
20 #include <linux/list.h>
21 #include <linux/sched/signal.h>
22 #include <linux/bitops.h>
23 #include <linux/slab.h>
24 #include <linux/ratelimit.h>
25 #include <linux/tcp.h>
26 #include <linux/mutex.h>
27 #include <linux/major.h>
28 #include <linux/blkdev.h>
29 #include <linux/backing-dev.h>
30 #include <linux/genhd.h>
31 #include <linux/idr.h>
32 #include <linux/dynamic_debug.h>
33 #include <net/tcp.h>
34 #include <linux/lru_cache.h>
35 #include <linux/prefetch.h>
36 #include <linux/drbd_genl_api.h>
37 #include <linux/drbd.h>
38 #include "drbd_strings.h"
39 #include "drbd_state.h"
40 #include "drbd_protocol.h"
41
42 #ifdef __CHECKER__
43 # define __protected_by(x) __attribute__((require_context(x,1,999,"rdwr")))
44 # define __protected_read_by(x) __attribute__((require_context(x,1,999,"read")))
45 # define __protected_write_by(x) __attribute__((require_context(x,1,999,"write")))
46 #else
47 # define __protected_by(x)
48 # define __protected_read_by(x)
49 # define __protected_write_by(x)
50 #endif
51
52
53 #ifdef CONFIG_DRBD_FAULT_INJECTION
54 extern int drbd_enable_faults;
55 extern int drbd_fault_rate;
56 #endif
57
58 extern unsigned int drbd_minor_count;
59 extern char drbd_usermode_helper[];
60 extern int drbd_proc_details;
61
62
63
64
65
66
67
68 #define DRBD_SIGKILL SIGHUP
69
70 #define ID_IN_SYNC (4711ULL)
71 #define ID_OUT_OF_SYNC (4712ULL)
72 #define ID_SYNCER (-1ULL)
73
74 #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
75
76 struct drbd_device;
77 struct drbd_connection;
78
79 #define __drbd_printk_device(level, device, fmt, args...) \
80 dev_printk(level, disk_to_dev((device)->vdisk), fmt, ## args)
81 #define __drbd_printk_peer_device(level, peer_device, fmt, args...) \
82 dev_printk(level, disk_to_dev((peer_device)->device->vdisk), fmt, ## args)
83 #define __drbd_printk_resource(level, resource, fmt, args...) \
84 printk(level "drbd %s: " fmt, (resource)->name, ## args)
85 #define __drbd_printk_connection(level, connection, fmt, args...) \
86 printk(level "drbd %s: " fmt, (connection)->resource->name, ## args)
87
88 void drbd_printk_with_wrong_object_type(void);
89
90 #define __drbd_printk_if_same_type(obj, type, func, level, fmt, args...) \
91 (__builtin_types_compatible_p(typeof(obj), type) || \
92 __builtin_types_compatible_p(typeof(obj), const type)), \
93 func(level, (const type)(obj), fmt, ## args)
94
95 #define drbd_printk(level, obj, fmt, args...) \
96 __builtin_choose_expr( \
97 __drbd_printk_if_same_type(obj, struct drbd_device *, \
98 __drbd_printk_device, level, fmt, ## args), \
99 __builtin_choose_expr( \
100 __drbd_printk_if_same_type(obj, struct drbd_resource *, \
101 __drbd_printk_resource, level, fmt, ## args), \
102 __builtin_choose_expr( \
103 __drbd_printk_if_same_type(obj, struct drbd_connection *, \
104 __drbd_printk_connection, level, fmt, ## args), \
105 __builtin_choose_expr( \
106 __drbd_printk_if_same_type(obj, struct drbd_peer_device *, \
107 __drbd_printk_peer_device, level, fmt, ## args), \
108 drbd_printk_with_wrong_object_type()))))
109
110 #define drbd_dbg(obj, fmt, args...) \
111 drbd_printk(KERN_DEBUG, obj, fmt, ## args)
112 #define drbd_alert(obj, fmt, args...) \
113 drbd_printk(KERN_ALERT, obj, fmt, ## args)
114 #define drbd_err(obj, fmt, args...) \
115 drbd_printk(KERN_ERR, obj, fmt, ## args)
116 #define drbd_warn(obj, fmt, args...) \
117 drbd_printk(KERN_WARNING, obj, fmt, ## args)
118 #define drbd_info(obj, fmt, args...) \
119 drbd_printk(KERN_INFO, obj, fmt, ## args)
120 #define drbd_emerg(obj, fmt, args...) \
121 drbd_printk(KERN_EMERG, obj, fmt, ## args)
122
123 #define dynamic_drbd_dbg(device, fmt, args...) \
124 dynamic_dev_dbg(disk_to_dev(device->vdisk), fmt, ## args)
125
126 #define D_ASSERT(device, exp) do { \
127 if (!(exp)) \
128 drbd_err(device, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__); \
129 } while (0)
130
131
132
133
134
135
136 #define expect(exp) ({ \
137 bool _bool = (exp); \
138 if (!_bool) \
139 drbd_err(device, "ASSERTION %s FAILED in %s\n", \
140 #exp, __func__); \
141 _bool; \
142 })
143
144
145 enum {
146 DRBD_FAULT_MD_WR = 0,
147 DRBD_FAULT_MD_RD = 1,
148 DRBD_FAULT_RS_WR = 2,
149 DRBD_FAULT_RS_RD = 3,
150 DRBD_FAULT_DT_WR = 4,
151 DRBD_FAULT_DT_RD = 5,
152 DRBD_FAULT_DT_RA = 6,
153 DRBD_FAULT_BM_ALLOC = 7,
154 DRBD_FAULT_AL_EE = 8,
155 DRBD_FAULT_RECEIVE = 9,
156
157 DRBD_FAULT_MAX,
158 };
159
160 extern unsigned int
161 _drbd_insert_fault(struct drbd_device *device, unsigned int type);
162
163 static inline int
164 drbd_insert_fault(struct drbd_device *device, unsigned int type) {
165 #ifdef CONFIG_DRBD_FAULT_INJECTION
166 return drbd_fault_rate &&
167 (drbd_enable_faults & (1<<type)) &&
168 _drbd_insert_fault(device, type);
169 #else
170 return 0;
171 #endif
172 }
173
174
175 #define div_ceil(A, B) ((A)/(B) + ((A)%(B) ? 1 : 0))
176
177 #define div_floor(A, B) ((A)/(B))
178
179 extern struct ratelimit_state drbd_ratelimit_state;
180 extern struct idr drbd_devices;
181 extern struct list_head drbd_resources;
182
183 extern const char *cmdname(enum drbd_packet cmd);
184
185
186
187 struct bm_xfer_ctx {
188
189
190
191
192 unsigned long bm_bits;
193 unsigned long bm_words;
194
195 unsigned long bit_offset;
196 unsigned long word_offset;
197
198
199 unsigned packets[2];
200 unsigned bytes[2];
201 };
202
203 extern void INFO_bm_xfer_stats(struct drbd_device *device,
204 const char *direction, struct bm_xfer_ctx *c);
205
206 static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
207 {
208
209
210
211
212
213
214
215 #if BITS_PER_LONG == 64
216 c->word_offset = c->bit_offset >> 6;
217 #elif BITS_PER_LONG == 32
218 c->word_offset = c->bit_offset >> 5;
219 c->word_offset &= ~(1UL);
220 #else
221 # error "unsupported BITS_PER_LONG"
222 #endif
223 }
224
225 extern unsigned int drbd_header_size(struct drbd_connection *connection);
226
227
228 enum drbd_thread_state {
229 NONE,
230 RUNNING,
231 EXITING,
232 RESTARTING
233 };
234
235 struct drbd_thread {
236 spinlock_t t_lock;
237 struct task_struct *task;
238 struct completion stop;
239 enum drbd_thread_state t_state;
240 int (*function) (struct drbd_thread *);
241 struct drbd_resource *resource;
242 struct drbd_connection *connection;
243 int reset_cpu_mask;
244 const char *name;
245 };
246
247 static inline enum drbd_thread_state get_t_state(struct drbd_thread *thi)
248 {
249
250
251
252
253 smp_rmb();
254 return thi->t_state;
255 }
256
257 struct drbd_work {
258 struct list_head list;
259 int (*cb)(struct drbd_work *, int cancel);
260 };
261
262 struct drbd_device_work {
263 struct drbd_work w;
264 struct drbd_device *device;
265 };
266
267 #include "drbd_interval.h"
268
269 extern int drbd_wait_misc(struct drbd_device *, struct drbd_interval *);
270
271 extern void lock_all_resources(void);
272 extern void unlock_all_resources(void);
273
274 struct drbd_request {
275 struct drbd_work w;
276 struct drbd_device *device;
277
278
279
280
281
282 struct bio *private_bio;
283
284 struct drbd_interval i;
285
286
287
288
289
290
291
292
293
294 unsigned int epoch;
295
296 struct list_head tl_requests;
297 struct bio *master_bio;
298
299
300 struct list_head req_pending_master_completion;
301 struct list_head req_pending_local;
302
303
304 unsigned long start_jif;
305
306
307
308
309
310
311
312
313
314 unsigned long in_actlog_jif;
315
316
317 unsigned long pre_submit_jif;
318
319
320 unsigned long pre_send_jif;
321 unsigned long acked_jif;
322 unsigned long net_done_jif;
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357 atomic_t completion_ref;
358
359 struct kref kref;
360
361 unsigned rq_state;
362 };
363
364 struct drbd_epoch {
365 struct drbd_connection *connection;
366 struct list_head list;
367 unsigned int barrier_nr;
368 atomic_t epoch_size;
369 atomic_t active;
370 unsigned long flags;
371 };
372
373
374 int drbdd_init(struct drbd_thread *);
375 int drbd_asender(struct drbd_thread *);
376
377
378 enum {
379 DE_HAVE_BARRIER_NUMBER,
380 };
381
382 enum epoch_event {
383 EV_PUT,
384 EV_GOT_BARRIER_NR,
385 EV_BECAME_LAST,
386 EV_CLEANUP = 32,
387 };
388
389 struct digest_info {
390 int digest_size;
391 void *digest;
392 };
393
394 struct drbd_peer_request {
395 struct drbd_work w;
396 struct drbd_peer_device *peer_device;
397 struct drbd_epoch *epoch;
398 struct page *pages;
399 atomic_t pending_bios;
400 struct drbd_interval i;
401
402 unsigned long flags;
403 unsigned long submit_jif;
404 union {
405 u64 block_id;
406 struct digest_info *digest;
407 };
408 };
409
410
411
412
413
414
415
416 enum {
417 __EE_CALL_AL_COMPLETE_IO,
418 __EE_MAY_SET_IN_SYNC,
419
420
421 __EE_TRIM,
422
423
424
425 __EE_ZEROOUT,
426
427
428
429 __EE_RESUBMITTED,
430
431
432
433
434 __EE_WAS_ERROR,
435
436
437 __EE_HAS_DIGEST,
438
439
440 __EE_RESTART_REQUESTS,
441
442
443 __EE_SEND_WRITE_ACK,
444
445
446 __EE_IN_INTERVAL_TREE,
447
448
449
450 __EE_SUBMITTED,
451
452
453 __EE_WRITE,
454
455
456 __EE_WRITE_SAME,
457
458
459
460 __EE_APPLICATION,
461
462
463 __EE_RS_THIN_REQ,
464 };
465 #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
466 #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
467 #define EE_TRIM (1<<__EE_TRIM)
468 #define EE_ZEROOUT (1<<__EE_ZEROOUT)
469 #define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
470 #define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
471 #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
472 #define EE_RESTART_REQUESTS (1<<__EE_RESTART_REQUESTS)
473 #define EE_SEND_WRITE_ACK (1<<__EE_SEND_WRITE_ACK)
474 #define EE_IN_INTERVAL_TREE (1<<__EE_IN_INTERVAL_TREE)
475 #define EE_SUBMITTED (1<<__EE_SUBMITTED)
476 #define EE_WRITE (1<<__EE_WRITE)
477 #define EE_WRITE_SAME (1<<__EE_WRITE_SAME)
478 #define EE_APPLICATION (1<<__EE_APPLICATION)
479 #define EE_RS_THIN_REQ (1<<__EE_RS_THIN_REQ)
480
481
482 enum {
483 UNPLUG_REMOTE,
484 MD_DIRTY,
485 USE_DEGR_WFC_T,
486 CL_ST_CHG_SUCCESS,
487 CL_ST_CHG_FAIL,
488 CRASHED_PRIMARY,
489
490
491 CONSIDER_RESYNC,
492
493 MD_NO_FUA,
494
495 BITMAP_IO,
496
497 BITMAP_IO_QUEUED,
498 WAS_IO_ERROR,
499 WAS_READ_ERROR,
500 FORCE_DETACH,
501 RESYNC_AFTER_NEG,
502 RESIZE_PENDING,
503
504 NEW_CUR_UUID,
505 AL_SUSPENDED,
506 AHEAD_TO_SYNC_SOURCE,
507 B_RS_H_DONE,
508 DISCARD_MY_DATA,
509 READ_BALANCE_RR,
510
511 FLUSH_PENDING,
512
513
514
515 GOING_DISKLESS,
516
517
518 GO_DISKLESS,
519 DESTROY_DISK,
520 MD_SYNC,
521 RS_START,
522 RS_PROGRESS,
523 RS_DONE,
524 };
525
526 struct drbd_bitmap;
527
528
529
530 enum bm_flag {
531
532 BM_LOCKED_MASK = 0xf,
533
534
535 BM_DONT_CLEAR = 0x1,
536 BM_DONT_SET = 0x2,
537 BM_DONT_TEST = 0x4,
538
539
540
541 BM_IS_LOCKED = 0x8,
542
543
544 BM_LOCKED_TEST_ALLOWED = BM_DONT_CLEAR | BM_DONT_SET | BM_IS_LOCKED,
545
546
547
548
549 BM_LOCKED_SET_ALLOWED = BM_DONT_CLEAR | BM_IS_LOCKED,
550
551
552
553 BM_LOCKED_CHANGE_ALLOWED = BM_IS_LOCKED,
554 };
555
556 struct drbd_work_queue {
557 struct list_head q;
558 spinlock_t q_lock;
559 wait_queue_head_t q_wait;
560 };
561
562 struct drbd_socket {
563 struct mutex mutex;
564 struct socket *socket;
565
566
567 void *sbuf;
568 void *rbuf;
569 };
570
571 struct drbd_md {
572 u64 md_offset;
573
574 u64 la_size_sect;
575 spinlock_t uuid_lock;
576 u64 uuid[UI_SIZE];
577 u64 device_uuid;
578 u32 flags;
579 u32 md_size_sect;
580
581 s32 al_offset;
582 s32 bm_offset;
583
584
585 s32 meta_dev_idx;
586
587
588 u32 al_stripes;
589 u32 al_stripe_size_4k;
590 u32 al_size_4k;
591 };
592
593 struct drbd_backing_dev {
594 struct block_device *backing_bdev;
595 struct block_device *md_bdev;
596 struct drbd_md md;
597 struct disk_conf *disk_conf;
598 sector_t known_size;
599 };
600
601 struct drbd_md_io {
602 struct page *page;
603 unsigned long start_jif;
604 unsigned long submit_jif;
605 const char *current_use;
606 atomic_t in_use;
607 unsigned int done;
608 int error;
609 };
610
611 struct bm_io_work {
612 struct drbd_work w;
613 char *why;
614 enum bm_flag flags;
615 int (*io_fn)(struct drbd_device *device);
616 void (*done)(struct drbd_device *device, int rv);
617 };
618
619 struct fifo_buffer {
620 unsigned int head_index;
621 unsigned int size;
622 int total;
623 int values[0];
624 };
625 extern struct fifo_buffer *fifo_alloc(int fifo_size);
626
627
628 enum {
629 NET_CONGESTED,
630 RESOLVE_CONFLICTS,
631 SEND_PING,
632 GOT_PING_ACK,
633 CONN_WD_ST_CHG_REQ,
634 CONN_WD_ST_CHG_OKAY,
635 CONN_WD_ST_CHG_FAIL,
636 CONN_DRY_RUN,
637 CREATE_BARRIER,
638 STATE_SENT,
639 CALLBACK_PENDING,
640
641
642
643
644
645 DISCONNECT_SENT,
646
647 DEVICE_WORK_PENDING,
648 };
649
650 enum which_state { NOW, OLD = NOW, NEW };
651
652 struct drbd_resource {
653 char *name;
654 #ifdef CONFIG_DEBUG_FS
655 struct dentry *debugfs_res;
656 struct dentry *debugfs_res_volumes;
657 struct dentry *debugfs_res_connections;
658 struct dentry *debugfs_res_in_flight_summary;
659 #endif
660 struct kref kref;
661 struct idr devices;
662 struct list_head connections;
663 struct list_head resources;
664 struct res_opts res_opts;
665 struct mutex conf_update;
666 struct mutex adm_mutex;
667 spinlock_t req_lock;
668
669 unsigned susp:1;
670 unsigned susp_nod:1;
671 unsigned susp_fen:1;
672
673 enum write_ordering_e write_ordering;
674
675 cpumask_var_t cpu_mask;
676 };
677
678 struct drbd_thread_timing_details
679 {
680 unsigned long start_jif;
681 void *cb_addr;
682 const char *caller_fn;
683 unsigned int line;
684 unsigned int cb_nr;
685 };
686
687 struct drbd_connection {
688 struct list_head connections;
689 struct drbd_resource *resource;
690 #ifdef CONFIG_DEBUG_FS
691 struct dentry *debugfs_conn;
692 struct dentry *debugfs_conn_callback_history;
693 struct dentry *debugfs_conn_oldest_requests;
694 #endif
695 struct kref kref;
696 struct idr peer_devices;
697 enum drbd_conns cstate;
698 struct mutex cstate_mutex;
699 unsigned int connect_cnt;
700
701 unsigned long flags;
702 struct net_conf *net_conf;
703 wait_queue_head_t ping_wait;
704
705 struct sockaddr_storage my_addr;
706 int my_addr_len;
707 struct sockaddr_storage peer_addr;
708 int peer_addr_len;
709
710 struct drbd_socket data;
711 struct drbd_socket meta;
712 int agreed_pro_version;
713 u32 agreed_features;
714 unsigned long last_received;
715 unsigned int ko_count;
716
717 struct list_head transfer_log;
718
719 struct crypto_shash *cram_hmac_tfm;
720 struct crypto_shash *integrity_tfm;
721 struct crypto_shash *peer_integrity_tfm;
722 struct crypto_shash *csums_tfm;
723 struct crypto_shash *verify_tfm;
724 void *int_dig_in;
725 void *int_dig_vv;
726
727
728 struct drbd_epoch *current_epoch;
729 spinlock_t epoch_lock;
730 unsigned int epochs;
731 atomic_t current_tle_nr;
732 unsigned current_tle_writes;
733
734 unsigned long last_reconnect_jif;
735
736 struct blk_plug receiver_plug;
737 struct drbd_thread receiver;
738 struct drbd_thread worker;
739 struct drbd_thread ack_receiver;
740 struct workqueue_struct *ack_sender;
741
742
743
744
745 struct drbd_request *req_next;
746 struct drbd_request *req_ack_pending;
747 struct drbd_request *req_not_net_done;
748
749
750 struct drbd_work_queue sender_work;
751
752 #define DRBD_THREAD_DETAILS_HIST 16
753 unsigned int w_cb_nr;
754 unsigned int r_cb_nr;
755 struct drbd_thread_timing_details w_timing_details[DRBD_THREAD_DETAILS_HIST];
756 struct drbd_thread_timing_details r_timing_details[DRBD_THREAD_DETAILS_HIST];
757
758 struct {
759 unsigned long last_sent_barrier_jif;
760
761
762
763 bool seen_any_write_yet;
764
765
766 int current_epoch_nr;
767
768
769
770
771 unsigned current_epoch_writes;
772 } send;
773 };
774
775 static inline bool has_net_conf(struct drbd_connection *connection)
776 {
777 bool has_net_conf;
778
779 rcu_read_lock();
780 has_net_conf = rcu_dereference(connection->net_conf);
781 rcu_read_unlock();
782
783 return has_net_conf;
784 }
785
786 void __update_timing_details(
787 struct drbd_thread_timing_details *tdp,
788 unsigned int *cb_nr,
789 void *cb,
790 const char *fn, const unsigned int line);
791
792 #define update_worker_timing_details(c, cb) \
793 __update_timing_details(c->w_timing_details, &c->w_cb_nr, cb, __func__ , __LINE__ )
794 #define update_receiver_timing_details(c, cb) \
795 __update_timing_details(c->r_timing_details, &c->r_cb_nr, cb, __func__ , __LINE__ )
796
797 struct submit_worker {
798 struct workqueue_struct *wq;
799 struct work_struct worker;
800
801
802 struct list_head writes;
803 };
804
805 struct drbd_peer_device {
806 struct list_head peer_devices;
807 struct drbd_device *device;
808 struct drbd_connection *connection;
809 struct work_struct send_acks_work;
810 #ifdef CONFIG_DEBUG_FS
811 struct dentry *debugfs_peer_dev;
812 #endif
813 };
814
815 struct drbd_device {
816 struct drbd_resource *resource;
817 struct list_head peer_devices;
818 struct list_head pending_bitmap_io;
819
820 unsigned long flush_jif;
821 #ifdef CONFIG_DEBUG_FS
822 struct dentry *debugfs_minor;
823 struct dentry *debugfs_vol;
824 struct dentry *debugfs_vol_oldest_requests;
825 struct dentry *debugfs_vol_act_log_extents;
826 struct dentry *debugfs_vol_resync_extents;
827 struct dentry *debugfs_vol_data_gen_id;
828 struct dentry *debugfs_vol_ed_gen_id;
829 #endif
830
831 unsigned int vnr;
832 unsigned int minor;
833
834 struct kref kref;
835
836
837 unsigned long flags;
838
839
840 struct drbd_backing_dev *ldev __protected_by(local);
841
842 sector_t p_size;
843 struct request_queue *rq_queue;
844 struct block_device *this_bdev;
845 struct gendisk *vdisk;
846
847 unsigned long last_reattach_jif;
848 struct drbd_work resync_work;
849 struct drbd_work unplug_work;
850 struct timer_list resync_timer;
851 struct timer_list md_sync_timer;
852 struct timer_list start_resync_timer;
853 struct timer_list request_timer;
854
855
856 union drbd_state new_state_tmp;
857
858 union drbd_dev_state state;
859 wait_queue_head_t misc_wait;
860 wait_queue_head_t state_wait;
861 unsigned int send_cnt;
862 unsigned int recv_cnt;
863 unsigned int read_cnt;
864 unsigned int writ_cnt;
865 unsigned int al_writ_cnt;
866 unsigned int bm_writ_cnt;
867 atomic_t ap_bio_cnt;
868 atomic_t ap_actlog_cnt;
869 atomic_t ap_pending_cnt;
870 atomic_t rs_pending_cnt;
871 atomic_t unacked_cnt;
872 atomic_t local_cnt;
873 atomic_t suspend_cnt;
874
875
876 struct rb_root read_requests;
877 struct rb_root write_requests;
878
879
880
881 struct list_head pending_master_completion[2];
882 struct list_head pending_completion[2];
883
884
885 bool use_csums;
886
887 unsigned long rs_total;
888
889 unsigned long rs_failed;
890
891 unsigned long rs_start;
892
893 unsigned long rs_paused;
894
895 unsigned long rs_same_csum;
896 #define DRBD_SYNC_MARKS 8
897 #define DRBD_SYNC_MARK_STEP (3*HZ)
898
899 unsigned long rs_mark_left[DRBD_SYNC_MARKS];
900
901 unsigned long rs_mark_time[DRBD_SYNC_MARKS];
902
903 int rs_last_mark;
904 unsigned long rs_last_bcast;
905
906
907 sector_t ov_start_sector;
908 sector_t ov_stop_sector;
909
910 sector_t ov_position;
911
912 sector_t ov_last_oos_start;
913
914 sector_t ov_last_oos_size;
915 unsigned long ov_left;
916
917 struct drbd_bitmap *bitmap;
918 unsigned long bm_resync_fo;
919
920
921 struct lru_cache *resync;
922
923 unsigned int resync_locked;
924
925 unsigned int resync_wenr;
926
927 int open_cnt;
928 u64 *p_uuid;
929
930 struct list_head active_ee;
931 struct list_head sync_ee;
932 struct list_head done_ee;
933 struct list_head read_ee;
934 struct list_head net_ee;
935
936 int next_barrier_nr;
937 struct list_head resync_reads;
938 atomic_t pp_in_use;
939 atomic_t pp_in_use_by_net;
940 wait_queue_head_t ee_wait;
941 struct drbd_md_io md_io;
942 spinlock_t al_lock;
943 wait_queue_head_t al_wait;
944 struct lru_cache *act_log;
945 unsigned int al_tr_number;
946 int al_tr_cycle;
947 wait_queue_head_t seq_wait;
948 atomic_t packet_seq;
949 unsigned int peer_seq;
950 spinlock_t peer_seq_lock;
951 unsigned long comm_bm_set;
952 struct bm_io_work bm_io_work;
953 u64 ed_uuid;
954 struct mutex own_state_mutex;
955 struct mutex *state_mutex;
956 char congestion_reason;
957 atomic_t rs_sect_in;
958 atomic_t rs_sect_ev;
959 int rs_last_sect_ev;
960 int rs_last_events;
961
962 int c_sync_rate;
963 struct fifo_buffer *rs_plan_s;
964 int rs_in_flight;
965 atomic_t ap_in_flight;
966 unsigned int peer_max_bio_size;
967 unsigned int local_max_bio_size;
968
969
970
971 struct submit_worker submit;
972 };
973
974 struct drbd_bm_aio_ctx {
975 struct drbd_device *device;
976 struct list_head list; ;
977 unsigned long start_jif;
978 atomic_t in_flight;
979 unsigned int done;
980 unsigned flags;
981 #define BM_AIO_COPY_PAGES 1
982 #define BM_AIO_WRITE_HINTED 2
983 #define BM_AIO_WRITE_ALL_PAGES 4
984 #define BM_AIO_READ 8
985 int error;
986 struct kref kref;
987 };
988
989 struct drbd_config_context {
990
991 unsigned int minor;
992
993 unsigned int volume;
994 #define VOLUME_UNSPECIFIED (-1U)
995
996
997 char *resource_name;
998 struct nlattr *my_addr;
999 struct nlattr *peer_addr;
1000
1001
1002 struct sk_buff *reply_skb;
1003
1004 struct drbd_genlmsghdr *reply_dh;
1005
1006 struct drbd_device *device;
1007 struct drbd_resource *resource;
1008 struct drbd_connection *connection;
1009 };
1010
1011 static inline struct drbd_device *minor_to_device(unsigned int minor)
1012 {
1013 return (struct drbd_device *)idr_find(&drbd_devices, minor);
1014 }
1015
1016 static inline struct drbd_peer_device *first_peer_device(struct drbd_device *device)
1017 {
1018 return list_first_entry_or_null(&device->peer_devices, struct drbd_peer_device, peer_devices);
1019 }
1020
1021 static inline struct drbd_peer_device *
1022 conn_peer_device(struct drbd_connection *connection, int volume_number)
1023 {
1024 return idr_find(&connection->peer_devices, volume_number);
1025 }
1026
1027 #define for_each_resource(resource, _resources) \
1028 list_for_each_entry(resource, _resources, resources)
1029
1030 #define for_each_resource_rcu(resource, _resources) \
1031 list_for_each_entry_rcu(resource, _resources, resources)
1032
1033 #define for_each_resource_safe(resource, tmp, _resources) \
1034 list_for_each_entry_safe(resource, tmp, _resources, resources)
1035
1036 #define for_each_connection(connection, resource) \
1037 list_for_each_entry(connection, &resource->connections, connections)
1038
1039 #define for_each_connection_rcu(connection, resource) \
1040 list_for_each_entry_rcu(connection, &resource->connections, connections)
1041
1042 #define for_each_connection_safe(connection, tmp, resource) \
1043 list_for_each_entry_safe(connection, tmp, &resource->connections, connections)
1044
1045 #define for_each_peer_device(peer_device, device) \
1046 list_for_each_entry(peer_device, &device->peer_devices, peer_devices)
1047
1048 #define for_each_peer_device_rcu(peer_device, device) \
1049 list_for_each_entry_rcu(peer_device, &device->peer_devices, peer_devices)
1050
1051 #define for_each_peer_device_safe(peer_device, tmp, device) \
1052 list_for_each_entry_safe(peer_device, tmp, &device->peer_devices, peer_devices)
1053
1054 static inline unsigned int device_to_minor(struct drbd_device *device)
1055 {
1056 return device->minor;
1057 }
1058
1059
1060
1061
1062
1063
1064
1065 enum dds_flags {
1066 DDSF_FORCED = 1,
1067 DDSF_NO_RESYNC = 2,
1068 };
1069
1070 extern void drbd_init_set_defaults(struct drbd_device *device);
1071 extern int drbd_thread_start(struct drbd_thread *thi);
1072 extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
1073 #ifdef CONFIG_SMP
1074 extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
1075 #else
1076 #define drbd_thread_current_set_cpu(A) ({})
1077 #endif
1078 extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
1079 unsigned int set_size);
1080 extern void tl_clear(struct drbd_connection *);
1081 extern void drbd_free_sock(struct drbd_connection *connection);
1082 extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
1083 void *buf, size_t size, unsigned msg_flags);
1084 extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
1085 unsigned);
1086
1087 extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
1088 extern int drbd_send_protocol(struct drbd_connection *connection);
1089 extern int drbd_send_uuids(struct drbd_peer_device *);
1090 extern int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device *);
1091 extern void drbd_gen_and_send_sync_uuid(struct drbd_peer_device *);
1092 extern int drbd_send_sizes(struct drbd_peer_device *, int trigger_reply, enum dds_flags flags);
1093 extern int drbd_send_state(struct drbd_peer_device *, union drbd_state s);
1094 extern int drbd_send_current_state(struct drbd_peer_device *);
1095 extern int drbd_send_sync_param(struct drbd_peer_device *);
1096 extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
1097 u32 set_size);
1098 extern int drbd_send_ack(struct drbd_peer_device *, enum drbd_packet,
1099 struct drbd_peer_request *);
1100 extern void drbd_send_ack_rp(struct drbd_peer_device *, enum drbd_packet,
1101 struct p_block_req *rp);
1102 extern void drbd_send_ack_dp(struct drbd_peer_device *, enum drbd_packet,
1103 struct p_data *dp, int data_size);
1104 extern int drbd_send_ack_ex(struct drbd_peer_device *, enum drbd_packet,
1105 sector_t sector, int blksize, u64 block_id);
1106 extern int drbd_send_out_of_sync(struct drbd_peer_device *, struct drbd_request *);
1107 extern int drbd_send_block(struct drbd_peer_device *, enum drbd_packet,
1108 struct drbd_peer_request *);
1109 extern int drbd_send_dblock(struct drbd_peer_device *, struct drbd_request *req);
1110 extern int drbd_send_drequest(struct drbd_peer_device *, int cmd,
1111 sector_t sector, int size, u64 block_id);
1112 extern int drbd_send_drequest_csum(struct drbd_peer_device *, sector_t sector,
1113 int size, void *digest, int digest_size,
1114 enum drbd_packet cmd);
1115 extern int drbd_send_ov_request(struct drbd_peer_device *, sector_t sector, int size);
1116
1117 extern int drbd_send_bitmap(struct drbd_device *device);
1118 extern void drbd_send_sr_reply(struct drbd_peer_device *, enum drbd_state_rv retcode);
1119 extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
1120 extern int drbd_send_rs_deallocated(struct drbd_peer_device *, struct drbd_peer_request *);
1121 extern void drbd_backing_dev_free(struct drbd_device *device, struct drbd_backing_dev *ldev);
1122 extern void drbd_device_cleanup(struct drbd_device *device);
1123 extern void drbd_print_uuids(struct drbd_device *device, const char *text);
1124 extern void drbd_queue_unplug(struct drbd_device *device);
1125
1126 extern void conn_md_sync(struct drbd_connection *connection);
1127 extern void drbd_md_write(struct drbd_device *device, void *buffer);
1128 extern void drbd_md_sync(struct drbd_device *device);
1129 extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
1130 extern void drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1131 extern void _drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1132 extern void drbd_uuid_new_current(struct drbd_device *device) __must_hold(local);
1133 extern void drbd_uuid_set_bm(struct drbd_device *device, u64 val) __must_hold(local);
1134 extern void drbd_uuid_move_history(struct drbd_device *device) __must_hold(local);
1135 extern void __drbd_uuid_set(struct drbd_device *device, int idx, u64 val) __must_hold(local);
1136 extern void drbd_md_set_flag(struct drbd_device *device, int flags) __must_hold(local);
1137 extern void drbd_md_clear_flag(struct drbd_device *device, int flags)__must_hold(local);
1138 extern int drbd_md_test_flag(struct drbd_backing_dev *, int);
1139 extern void drbd_md_mark_dirty(struct drbd_device *device);
1140 extern void drbd_queue_bitmap_io(struct drbd_device *device,
1141 int (*io_fn)(struct drbd_device *),
1142 void (*done)(struct drbd_device *, int),
1143 char *why, enum bm_flag flags);
1144 extern int drbd_bitmap_io(struct drbd_device *device,
1145 int (*io_fn)(struct drbd_device *),
1146 char *why, enum bm_flag flags);
1147 extern int drbd_bitmap_io_from_worker(struct drbd_device *device,
1148 int (*io_fn)(struct drbd_device *),
1149 char *why, enum bm_flag flags);
1150 extern int drbd_bmio_set_n_write(struct drbd_device *device) __must_hold(local);
1151 extern int drbd_bmio_clear_n_write(struct drbd_device *device) __must_hold(local);
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186 #define MD_128MB_SECT (128LLU << 11)
1187 #define MD_4kB_SECT 8
1188 #define MD_32kB_SECT 64
1189
1190
1191 #define AL_EXTENT_SHIFT 22
1192 #define AL_EXTENT_SIZE (1<<AL_EXTENT_SHIFT)
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208 #define AL_UPDATES_PER_TRANSACTION 64
1209 #define AL_CONTEXT_PER_TRANSACTION 919
1210
1211 #if BITS_PER_LONG == 32
1212 #define LN2_BPL 5
1213 #define cpu_to_lel(A) cpu_to_le32(A)
1214 #define lel_to_cpu(A) le32_to_cpu(A)
1215 #elif BITS_PER_LONG == 64
1216 #define LN2_BPL 6
1217 #define cpu_to_lel(A) cpu_to_le64(A)
1218 #define lel_to_cpu(A) le64_to_cpu(A)
1219 #else
1220 #error "LN2 of BITS_PER_LONG unknown!"
1221 #endif
1222
1223
1224
1225 struct bm_extent {
1226 int rs_left;
1227 int rs_failed;
1228 unsigned long flags;
1229 struct lc_element lce;
1230 };
1231
1232 #define BME_NO_WRITES 0
1233 #define BME_LOCKED 1
1234 #define BME_PRIORITY 2
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244 #define SLEEP_TIME (HZ/10)
1245
1246
1247
1248 #define BM_BLOCK_SHIFT 12
1249 #define BM_BLOCK_SIZE (1<<BM_BLOCK_SHIFT)
1250
1251
1252
1253 #define BM_EXT_SHIFT 24
1254 #define BM_EXT_SIZE (1<<BM_EXT_SHIFT)
1255
1256 #if (BM_EXT_SHIFT != 24) || (BM_BLOCK_SHIFT != 12)
1257 #error "HAVE YOU FIXED drbdmeta AS WELL??"
1258 #endif
1259
1260
1261 #define BM_SECT_TO_BIT(x) ((x)>>(BM_BLOCK_SHIFT-9))
1262 #define BM_BIT_TO_SECT(x) ((sector_t)(x)<<(BM_BLOCK_SHIFT-9))
1263 #define BM_SECT_PER_BIT BM_BIT_TO_SECT(1)
1264
1265
1266 #define Bit2KB(bits) ((bits)<<(BM_BLOCK_SHIFT-10))
1267
1268
1269
1270 #define BM_SECT_TO_EXT(x) ((x)>>(BM_EXT_SHIFT-9))
1271 #define BM_BIT_TO_EXT(x) ((x) >> (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1272
1273
1274 #define BM_EXT_TO_SECT(x) ((sector_t)(x) << (BM_EXT_SHIFT-9))
1275
1276 #define BM_SECT_PER_EXT BM_EXT_TO_SECT(1)
1277
1278 #define BM_BITS_PER_EXT (1UL << (BM_EXT_SHIFT - BM_BLOCK_SHIFT))
1279
1280 #define BM_BLOCKS_PER_BM_EXT_MASK (BM_BITS_PER_EXT - 1)
1281
1282
1283
1284 #define AL_EXT_PER_BM_SECT (1 << (BM_EXT_SHIFT - AL_EXTENT_SHIFT))
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300 #define DRBD_MAX_SECTORS_32 (0xffffffffLU)
1301
1302
1303
1304
1305
1306 #define DRBD_MAX_SECTORS_FIXED_BM \
1307 ((MD_128MB_SECT - MD_32kB_SECT - MD_4kB_SECT) * (1LL<<(BM_EXT_SHIFT-9)))
1308 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_FIXED_BM
1309
1310 #if BITS_PER_LONG == 32
1311
1312
1313
1314 #define DRBD_MAX_SECTORS_FLEX BM_BIT_TO_SECT(0xffff7fff)
1315 #else
1316
1317 #define DRBD_MAX_SECTORS_FLEX (1UL << 51)
1318
1319 #endif
1320
1321
1322
1323
1324
1325
1326
1327 #define DRBD_MAX_BIO_SIZE (1U << 20)
1328 #if DRBD_MAX_BIO_SIZE > (BIO_MAX_PAGES << PAGE_SHIFT)
1329 #error Architecture not supported: DRBD_MAX_BIO_SIZE > BIO_MAX_SIZE
1330 #endif
1331 #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12)
1332
1333 #define DRBD_MAX_SIZE_H80_PACKET (1U << 15)
1334 #define DRBD_MAX_BIO_SIZE_P95 (1U << 17)
1335
1336
1337
1338
1339 #define DRBD_MAX_BATCH_BIO_SIZE (AL_UPDATES_PER_TRANSACTION/2*AL_EXTENT_SIZE)
1340 #define DRBD_MAX_BBIO_SECTORS (DRBD_MAX_BATCH_BIO_SIZE >> 9)
1341
1342 extern int drbd_bm_init(struct drbd_device *device);
1343 extern int drbd_bm_resize(struct drbd_device *device, sector_t sectors, int set_new_bits);
1344 extern void drbd_bm_cleanup(struct drbd_device *device);
1345 extern void drbd_bm_set_all(struct drbd_device *device);
1346 extern void drbd_bm_clear_all(struct drbd_device *device);
1347
1348 extern int drbd_bm_set_bits(
1349 struct drbd_device *device, unsigned long s, unsigned long e);
1350 extern int drbd_bm_clear_bits(
1351 struct drbd_device *device, unsigned long s, unsigned long e);
1352 extern int drbd_bm_count_bits(
1353 struct drbd_device *device, const unsigned long s, const unsigned long e);
1354
1355
1356 extern void _drbd_bm_set_bits(struct drbd_device *device,
1357 const unsigned long s, const unsigned long e);
1358 extern int drbd_bm_test_bit(struct drbd_device *device, unsigned long bitnr);
1359 extern int drbd_bm_e_weight(struct drbd_device *device, unsigned long enr);
1360 extern int drbd_bm_read(struct drbd_device *device) __must_hold(local);
1361 extern void drbd_bm_mark_for_writeout(struct drbd_device *device, int page_nr);
1362 extern int drbd_bm_write(struct drbd_device *device) __must_hold(local);
1363 extern void drbd_bm_reset_al_hints(struct drbd_device *device) __must_hold(local);
1364 extern int drbd_bm_write_hinted(struct drbd_device *device) __must_hold(local);
1365 extern int drbd_bm_write_lazy(struct drbd_device *device, unsigned upper_idx) __must_hold(local);
1366 extern int drbd_bm_write_all(struct drbd_device *device) __must_hold(local);
1367 extern int drbd_bm_write_copy_pages(struct drbd_device *device) __must_hold(local);
1368 extern size_t drbd_bm_words(struct drbd_device *device);
1369 extern unsigned long drbd_bm_bits(struct drbd_device *device);
1370 extern sector_t drbd_bm_capacity(struct drbd_device *device);
1371
1372 #define DRBD_END_OF_BITMAP (~(unsigned long)0)
1373 extern unsigned long drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1374
1375 extern unsigned long _drbd_bm_find_next(struct drbd_device *device, unsigned long bm_fo);
1376 extern unsigned long _drbd_bm_find_next_zero(struct drbd_device *device, unsigned long bm_fo);
1377 extern unsigned long _drbd_bm_total_weight(struct drbd_device *device);
1378 extern unsigned long drbd_bm_total_weight(struct drbd_device *device);
1379
1380 extern void drbd_bm_merge_lel(struct drbd_device *device, size_t offset,
1381 size_t number, unsigned long *buffer);
1382
1383 extern void drbd_bm_get_lel(struct drbd_device *device, size_t offset,
1384 size_t number, unsigned long *buffer);
1385
1386 extern void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags);
1387 extern void drbd_bm_unlock(struct drbd_device *device);
1388
1389
1390 extern struct kmem_cache *drbd_request_cache;
1391 extern struct kmem_cache *drbd_ee_cache;
1392 extern struct kmem_cache *drbd_bm_ext_cache;
1393 extern struct kmem_cache *drbd_al_ext_cache;
1394 extern mempool_t drbd_request_mempool;
1395 extern mempool_t drbd_ee_mempool;
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410 extern struct page *drbd_pp_pool;
1411 extern spinlock_t drbd_pp_lock;
1412 extern int drbd_pp_vacant;
1413 extern wait_queue_head_t drbd_pp_wait;
1414
1415
1416
1417
1418
1419
1420 #define DRBD_MIN_POOL_PAGES 128
1421 extern mempool_t drbd_md_io_page_pool;
1422
1423
1424
1425 extern struct bio_set drbd_md_io_bio_set;
1426
1427 extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
1428
1429
1430 extern struct bio_set drbd_io_bio_set;
1431
1432 extern struct mutex resources_mutex;
1433
1434 extern int conn_lowest_minor(struct drbd_connection *connection);
1435 extern enum drbd_ret_code drbd_create_device(struct drbd_config_context *adm_ctx, unsigned int minor);
1436 extern void drbd_destroy_device(struct kref *kref);
1437 extern void drbd_delete_device(struct drbd_device *device);
1438
1439 extern struct drbd_resource *drbd_create_resource(const char *name);
1440 extern void drbd_free_resource(struct drbd_resource *resource);
1441
1442 extern int set_resource_options(struct drbd_resource *resource, struct res_opts *res_opts);
1443 extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
1444 extern void drbd_destroy_connection(struct kref *kref);
1445 extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
1446 void *peer_addr, int peer_addr_len);
1447 extern struct drbd_resource *drbd_find_resource(const char *name);
1448 extern void drbd_destroy_resource(struct kref *kref);
1449 extern void conn_free_crypto(struct drbd_connection *connection);
1450
1451
1452 extern void do_submit(struct work_struct *ws);
1453 extern void __drbd_make_request(struct drbd_device *, struct bio *, unsigned long);
1454 extern blk_qc_t drbd_make_request(struct request_queue *q, struct bio *bio);
1455 extern int drbd_read_remote(struct drbd_device *device, struct drbd_request *req);
1456 extern int is_valid_ar_handle(struct drbd_request *, sector_t);
1457
1458
1459
1460
1461 extern struct mutex notification_mutex;
1462
1463 extern void drbd_suspend_io(struct drbd_device *device);
1464 extern void drbd_resume_io(struct drbd_device *device);
1465 extern char *ppsize(char *buf, unsigned long long size);
1466 extern sector_t drbd_new_dev_size(struct drbd_device *, struct drbd_backing_dev *, sector_t, int);
1467 enum determine_dev_size {
1468 DS_ERROR_SHRINK = -3,
1469 DS_ERROR_SPACE_MD = -2,
1470 DS_ERROR = -1,
1471 DS_UNCHANGED = 0,
1472 DS_SHRUNK = 1,
1473 DS_GREW = 2,
1474 DS_GREW_FROM_ZERO = 3,
1475 };
1476 extern enum determine_dev_size
1477 drbd_determine_dev_size(struct drbd_device *, enum dds_flags, struct resize_parms *) __must_hold(local);
1478 extern void resync_after_online_grow(struct drbd_device *);
1479 extern void drbd_reconsider_queue_parameters(struct drbd_device *device,
1480 struct drbd_backing_dev *bdev, struct o_qlim *o);
1481 extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
1482 enum drbd_role new_role,
1483 int force);
1484 extern bool conn_try_outdate_peer(struct drbd_connection *connection);
1485 extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
1486 extern enum drbd_peer_state conn_khelper(struct drbd_connection *connection, char *cmd);
1487 extern int drbd_khelper(struct drbd_device *device, char *cmd);
1488
1489
1490
1491 extern void drbd_md_endio(struct bio *bio);
1492 extern void drbd_peer_request_endio(struct bio *bio);
1493 extern void drbd_request_endio(struct bio *bio);
1494 extern int drbd_worker(struct drbd_thread *thi);
1495 enum drbd_ret_code drbd_resync_after_valid(struct drbd_device *device, int o_minor);
1496 void drbd_resync_after_changed(struct drbd_device *device);
1497 extern void drbd_start_resync(struct drbd_device *device, enum drbd_conns side);
1498 extern void resume_next_sg(struct drbd_device *device);
1499 extern void suspend_other_sg(struct drbd_device *device);
1500 extern int drbd_resync_finished(struct drbd_device *device);
1501
1502 extern void *drbd_md_get_buffer(struct drbd_device *device, const char *intent);
1503 extern void drbd_md_put_buffer(struct drbd_device *device);
1504 extern int drbd_md_sync_page_io(struct drbd_device *device,
1505 struct drbd_backing_dev *bdev, sector_t sector, int op);
1506 extern void drbd_ov_out_of_sync_found(struct drbd_device *, sector_t, int);
1507 extern void wait_until_done_or_force_detached(struct drbd_device *device,
1508 struct drbd_backing_dev *bdev, unsigned int *done);
1509 extern void drbd_rs_controller_reset(struct drbd_device *device);
1510
1511 static inline void ov_out_of_sync_print(struct drbd_device *device)
1512 {
1513 if (device->ov_last_oos_size) {
1514 drbd_err(device, "Out of sync: start=%llu, size=%lu (sectors)\n",
1515 (unsigned long long)device->ov_last_oos_start,
1516 (unsigned long)device->ov_last_oos_size);
1517 }
1518 device->ov_last_oos_size = 0;
1519 }
1520
1521
1522 extern void drbd_csum_bio(struct crypto_shash *, struct bio *, void *);
1523 extern void drbd_csum_ee(struct crypto_shash *, struct drbd_peer_request *,
1524 void *);
1525
1526 extern int w_e_end_data_req(struct drbd_work *, int);
1527 extern int w_e_end_rsdata_req(struct drbd_work *, int);
1528 extern int w_e_end_csum_rs_req(struct drbd_work *, int);
1529 extern int w_e_end_ov_reply(struct drbd_work *, int);
1530 extern int w_e_end_ov_req(struct drbd_work *, int);
1531 extern int w_ov_finished(struct drbd_work *, int);
1532 extern int w_resync_timer(struct drbd_work *, int);
1533 extern int w_send_write_hint(struct drbd_work *, int);
1534 extern int w_send_dblock(struct drbd_work *, int);
1535 extern int w_send_read_req(struct drbd_work *, int);
1536 extern int w_e_reissue(struct drbd_work *, int);
1537 extern int w_restart_disk_io(struct drbd_work *, int);
1538 extern int w_send_out_of_sync(struct drbd_work *, int);
1539 extern int w_start_resync(struct drbd_work *, int);
1540
1541 extern void resync_timer_fn(struct timer_list *t);
1542 extern void start_resync_timer_fn(struct timer_list *t);
1543
1544 extern void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req);
1545
1546
1547 extern int drbd_issue_discard_or_zero_out(struct drbd_device *device,
1548 sector_t start, unsigned int nr_sectors, int flags);
1549 extern int drbd_receiver(struct drbd_thread *thi);
1550 extern int drbd_ack_receiver(struct drbd_thread *thi);
1551 extern void drbd_send_ping_wf(struct work_struct *ws);
1552 extern void drbd_send_acks_wf(struct work_struct *ws);
1553 extern bool drbd_rs_c_min_rate_throttle(struct drbd_device *device);
1554 extern bool drbd_rs_should_slow_down(struct drbd_device *device, sector_t sector,
1555 bool throttle_if_app_is_waiting);
1556 extern int drbd_submit_peer_request(struct drbd_device *,
1557 struct drbd_peer_request *, const unsigned,
1558 const unsigned, const int);
1559 extern int drbd_free_peer_reqs(struct drbd_device *, struct list_head *);
1560 extern struct drbd_peer_request *drbd_alloc_peer_req(struct drbd_peer_device *, u64,
1561 sector_t, unsigned int,
1562 unsigned int,
1563 gfp_t) __must_hold(local);
1564 extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request *,
1565 int);
1566 #define drbd_free_peer_req(m,e) __drbd_free_peer_req(m, e, 0)
1567 #define drbd_free_net_peer_req(m,e) __drbd_free_peer_req(m, e, 1)
1568 extern struct page *drbd_alloc_pages(struct drbd_peer_device *, unsigned int, bool);
1569 extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
1570 extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
1571 extern int drbd_connected(struct drbd_peer_device *);
1572
1573 static inline void drbd_tcp_cork(struct socket *sock)
1574 {
1575 int val = 1;
1576 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1577 (char*)&val, sizeof(val));
1578 }
1579
1580 static inline void drbd_tcp_uncork(struct socket *sock)
1581 {
1582 int val = 0;
1583 (void) kernel_setsockopt(sock, SOL_TCP, TCP_CORK,
1584 (char*)&val, sizeof(val));
1585 }
1586
1587 static inline void drbd_tcp_nodelay(struct socket *sock)
1588 {
1589 int val = 1;
1590 (void) kernel_setsockopt(sock, SOL_TCP, TCP_NODELAY,
1591 (char*)&val, sizeof(val));
1592 }
1593
1594 static inline void drbd_tcp_quickack(struct socket *sock)
1595 {
1596 int val = 2;
1597 (void) kernel_setsockopt(sock, SOL_TCP, TCP_QUICKACK,
1598 (char*)&val, sizeof(val));
1599 }
1600
1601
1602 void drbd_set_my_capacity(struct drbd_device *device, sector_t size);
1603
1604
1605
1606
1607 static inline void drbd_generic_make_request(struct drbd_device *device,
1608 int fault_type, struct bio *bio)
1609 {
1610 __release(local);
1611 if (!bio->bi_disk) {
1612 drbd_err(device, "drbd_generic_make_request: bio->bi_disk == NULL\n");
1613 bio->bi_status = BLK_STS_IOERR;
1614 bio_endio(bio);
1615 return;
1616 }
1617
1618 if (drbd_insert_fault(device, fault_type))
1619 bio_io_error(bio);
1620 else
1621 generic_make_request(bio);
1622 }
1623
1624 void drbd_bump_write_ordering(struct drbd_resource *resource, struct drbd_backing_dev *bdev,
1625 enum write_ordering_e wo);
1626
1627
1628 extern struct proc_dir_entry *drbd_proc;
1629 int drbd_seq_show(struct seq_file *seq, void *v);
1630
1631
1632 extern bool drbd_al_begin_io_prepare(struct drbd_device *device, struct drbd_interval *i);
1633 extern int drbd_al_begin_io_nonblock(struct drbd_device *device, struct drbd_interval *i);
1634 extern void drbd_al_begin_io_commit(struct drbd_device *device);
1635 extern bool drbd_al_begin_io_fastpath(struct drbd_device *device, struct drbd_interval *i);
1636 extern void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i);
1637 extern void drbd_al_complete_io(struct drbd_device *device, struct drbd_interval *i);
1638 extern void drbd_rs_complete_io(struct drbd_device *device, sector_t sector);
1639 extern int drbd_rs_begin_io(struct drbd_device *device, sector_t sector);
1640 extern int drbd_try_rs_begin_io(struct drbd_device *device, sector_t sector);
1641 extern void drbd_rs_cancel_all(struct drbd_device *device);
1642 extern int drbd_rs_del_all(struct drbd_device *device);
1643 extern void drbd_rs_failed_io(struct drbd_device *device,
1644 sector_t sector, int size);
1645 extern void drbd_advance_rs_marks(struct drbd_device *device, unsigned long still_to_go);
1646
1647 enum update_sync_bits_mode { RECORD_RS_FAILED, SET_OUT_OF_SYNC, SET_IN_SYNC };
1648 extern int __drbd_change_sync(struct drbd_device *device, sector_t sector, int size,
1649 enum update_sync_bits_mode mode);
1650 #define drbd_set_in_sync(device, sector, size) \
1651 __drbd_change_sync(device, sector, size, SET_IN_SYNC)
1652 #define drbd_set_out_of_sync(device, sector, size) \
1653 __drbd_change_sync(device, sector, size, SET_OUT_OF_SYNC)
1654 #define drbd_rs_failed_io(device, sector, size) \
1655 __drbd_change_sync(device, sector, size, RECORD_RS_FAILED)
1656 extern void drbd_al_shrink(struct drbd_device *device);
1657 extern int drbd_al_initialize(struct drbd_device *, void *);
1658
1659
1660
1661 struct sib_info {
1662 enum drbd_state_info_bcast_reason sib_reason;
1663 union {
1664 struct {
1665 char *helper_name;
1666 unsigned helper_exit_code;
1667 };
1668 struct {
1669 union drbd_state os;
1670 union drbd_state ns;
1671 };
1672 };
1673 };
1674 void drbd_bcast_event(struct drbd_device *device, const struct sib_info *sib);
1675
1676 extern void notify_resource_state(struct sk_buff *,
1677 unsigned int,
1678 struct drbd_resource *,
1679 struct resource_info *,
1680 enum drbd_notification_type);
1681 extern void notify_device_state(struct sk_buff *,
1682 unsigned int,
1683 struct drbd_device *,
1684 struct device_info *,
1685 enum drbd_notification_type);
1686 extern void notify_connection_state(struct sk_buff *,
1687 unsigned int,
1688 struct drbd_connection *,
1689 struct connection_info *,
1690 enum drbd_notification_type);
1691 extern void notify_peer_device_state(struct sk_buff *,
1692 unsigned int,
1693 struct drbd_peer_device *,
1694 struct peer_device_info *,
1695 enum drbd_notification_type);
1696 extern void notify_helper(enum drbd_notification_type, struct drbd_device *,
1697 struct drbd_connection *, const char *, int);
1698
1699
1700
1701
1702
1703
1704 static inline struct page *page_chain_next(struct page *page)
1705 {
1706 return (struct page *)page_private(page);
1707 }
1708 #define page_chain_for_each(page) \
1709 for (; page && ({ prefetch(page_chain_next(page)); 1; }); \
1710 page = page_chain_next(page))
1711 #define page_chain_for_each_safe(page, n) \
1712 for (; page && ({ n = page_chain_next(page); 1; }); page = n)
1713
1714
1715 static inline int drbd_peer_req_has_active_page(struct drbd_peer_request *peer_req)
1716 {
1717 struct page *page = peer_req->pages;
1718 page_chain_for_each(page) {
1719 if (page_count(page) > 1)
1720 return 1;
1721 }
1722 return 0;
1723 }
1724
1725 static inline union drbd_state drbd_read_state(struct drbd_device *device)
1726 {
1727 struct drbd_resource *resource = device->resource;
1728 union drbd_state rv;
1729
1730 rv.i = device->state.i;
1731 rv.susp = resource->susp;
1732 rv.susp_nod = resource->susp_nod;
1733 rv.susp_fen = resource->susp_fen;
1734
1735 return rv;
1736 }
1737
1738 enum drbd_force_detach_flags {
1739 DRBD_READ_ERROR,
1740 DRBD_WRITE_ERROR,
1741 DRBD_META_IO_ERROR,
1742 DRBD_FORCE_DETACH,
1743 };
1744
1745 #define __drbd_chk_io_error(m,f) __drbd_chk_io_error_(m,f, __func__)
1746 static inline void __drbd_chk_io_error_(struct drbd_device *device,
1747 enum drbd_force_detach_flags df,
1748 const char *where)
1749 {
1750 enum drbd_io_error_p ep;
1751
1752 rcu_read_lock();
1753 ep = rcu_dereference(device->ldev->disk_conf)->on_io_error;
1754 rcu_read_unlock();
1755 switch (ep) {
1756 case EP_PASS_ON:
1757 if (df == DRBD_READ_ERROR || df == DRBD_WRITE_ERROR) {
1758 if (__ratelimit(&drbd_ratelimit_state))
1759 drbd_err(device, "Local IO failed in %s.\n", where);
1760 if (device->state.disk > D_INCONSISTENT)
1761 _drbd_set_state(_NS(device, disk, D_INCONSISTENT), CS_HARD, NULL);
1762 break;
1763 }
1764
1765 case EP_DETACH:
1766 case EP_CALL_HELPER:
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787 set_bit(WAS_IO_ERROR, &device->flags);
1788 if (df == DRBD_READ_ERROR)
1789 set_bit(WAS_READ_ERROR, &device->flags);
1790 if (df == DRBD_FORCE_DETACH)
1791 set_bit(FORCE_DETACH, &device->flags);
1792 if (device->state.disk > D_FAILED) {
1793 _drbd_set_state(_NS(device, disk, D_FAILED), CS_HARD, NULL);
1794 drbd_err(device,
1795 "Local IO failed in %s. Detaching...\n", where);
1796 }
1797 break;
1798 }
1799 }
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809 #define drbd_chk_io_error(m,e,f) drbd_chk_io_error_(m,e,f, __func__)
1810 static inline void drbd_chk_io_error_(struct drbd_device *device,
1811 int error, enum drbd_force_detach_flags forcedetach, const char *where)
1812 {
1813 if (error) {
1814 unsigned long flags;
1815 spin_lock_irqsave(&device->resource->req_lock, flags);
1816 __drbd_chk_io_error_(device, forcedetach, where);
1817 spin_unlock_irqrestore(&device->resource->req_lock, flags);
1818 }
1819 }
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829 static inline sector_t drbd_md_first_sector(struct drbd_backing_dev *bdev)
1830 {
1831 switch (bdev->md.meta_dev_idx) {
1832 case DRBD_MD_INDEX_INTERNAL:
1833 case DRBD_MD_INDEX_FLEX_INT:
1834 return bdev->md.md_offset + bdev->md.bm_offset;
1835 case DRBD_MD_INDEX_FLEX_EXT:
1836 default:
1837 return bdev->md.md_offset;
1838 }
1839 }
1840
1841
1842
1843
1844
1845 static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
1846 {
1847 switch (bdev->md.meta_dev_idx) {
1848 case DRBD_MD_INDEX_INTERNAL:
1849 case DRBD_MD_INDEX_FLEX_INT:
1850 return bdev->md.md_offset + MD_4kB_SECT -1;
1851 case DRBD_MD_INDEX_FLEX_EXT:
1852 default:
1853 return bdev->md.md_offset + bdev->md.md_size_sect -1;
1854 }
1855 }
1856
1857
1858 static inline sector_t drbd_get_capacity(struct block_device *bdev)
1859 {
1860
1861 return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
1862 }
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872 static inline sector_t drbd_get_max_capacity(struct drbd_backing_dev *bdev)
1873 {
1874 sector_t s;
1875
1876 switch (bdev->md.meta_dev_idx) {
1877 case DRBD_MD_INDEX_INTERNAL:
1878 case DRBD_MD_INDEX_FLEX_INT:
1879 s = drbd_get_capacity(bdev->backing_bdev)
1880 ? min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1881 drbd_md_first_sector(bdev))
1882 : 0;
1883 break;
1884 case DRBD_MD_INDEX_FLEX_EXT:
1885 s = min_t(sector_t, DRBD_MAX_SECTORS_FLEX,
1886 drbd_get_capacity(bdev->backing_bdev));
1887
1888 s = min_t(sector_t, s,
1889 BM_EXT_TO_SECT(bdev->md.md_size_sect
1890 - bdev->md.bm_offset));
1891 break;
1892 default:
1893 s = min_t(sector_t, DRBD_MAX_SECTORS,
1894 drbd_get_capacity(bdev->backing_bdev));
1895 }
1896 return s;
1897 }
1898
1899
1900
1901
1902
1903 static inline sector_t drbd_md_ss(struct drbd_backing_dev *bdev)
1904 {
1905 const int meta_dev_idx = bdev->md.meta_dev_idx;
1906
1907 if (meta_dev_idx == DRBD_MD_INDEX_FLEX_EXT)
1908 return 0;
1909
1910
1911
1912 if (meta_dev_idx == DRBD_MD_INDEX_INTERNAL ||
1913 meta_dev_idx == DRBD_MD_INDEX_FLEX_INT)
1914 return (drbd_get_capacity(bdev->backing_bdev) & ~7ULL) - 8;
1915
1916
1917 return MD_128MB_SECT * bdev->md.meta_dev_idx;
1918 }
1919
1920 static inline void
1921 drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
1922 {
1923 unsigned long flags;
1924 spin_lock_irqsave(&q->q_lock, flags);
1925 list_add_tail(&w->list, &q->q);
1926 spin_unlock_irqrestore(&q->q_lock, flags);
1927 wake_up(&q->q_wait);
1928 }
1929
1930 static inline void
1931 drbd_queue_work_if_unqueued(struct drbd_work_queue *q, struct drbd_work *w)
1932 {
1933 unsigned long flags;
1934 spin_lock_irqsave(&q->q_lock, flags);
1935 if (list_empty_careful(&w->list))
1936 list_add_tail(&w->list, &q->q);
1937 spin_unlock_irqrestore(&q->q_lock, flags);
1938 wake_up(&q->q_wait);
1939 }
1940
1941 static inline void
1942 drbd_device_post_work(struct drbd_device *device, int work_bit)
1943 {
1944 if (!test_and_set_bit(work_bit, &device->flags)) {
1945 struct drbd_connection *connection =
1946 first_peer_device(device)->connection;
1947 struct drbd_work_queue *q = &connection->sender_work;
1948 if (!test_and_set_bit(DEVICE_WORK_PENDING, &connection->flags))
1949 wake_up(&q->q_wait);
1950 }
1951 }
1952
1953 extern void drbd_flush_workqueue(struct drbd_work_queue *work_queue);
1954
1955
1956
1957
1958
1959 static inline void wake_ack_receiver(struct drbd_connection *connection)
1960 {
1961 struct task_struct *task = connection->ack_receiver.task;
1962 if (task && get_t_state(&connection->ack_receiver) == RUNNING)
1963 send_sig(SIGXCPU, task, 1);
1964 }
1965
1966 static inline void request_ping(struct drbd_connection *connection)
1967 {
1968 set_bit(SEND_PING, &connection->flags);
1969 wake_ack_receiver(connection);
1970 }
1971
1972 extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
1973 extern void *drbd_prepare_command(struct drbd_peer_device *, struct drbd_socket *);
1974 extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
1975 enum drbd_packet, unsigned int, void *,
1976 unsigned int);
1977 extern int drbd_send_command(struct drbd_peer_device *, struct drbd_socket *,
1978 enum drbd_packet, unsigned int, void *,
1979 unsigned int);
1980
1981 extern int drbd_send_ping(struct drbd_connection *connection);
1982 extern int drbd_send_ping_ack(struct drbd_connection *connection);
1983 extern int drbd_send_state_req(struct drbd_peer_device *, union drbd_state, union drbd_state);
1984 extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
1985
1986 static inline void drbd_thread_stop(struct drbd_thread *thi)
1987 {
1988 _drbd_thread_stop(thi, false, true);
1989 }
1990
1991 static inline void drbd_thread_stop_nowait(struct drbd_thread *thi)
1992 {
1993 _drbd_thread_stop(thi, false, false);
1994 }
1995
1996 static inline void drbd_thread_restart_nowait(struct drbd_thread *thi)
1997 {
1998 _drbd_thread_stop(thi, true, false);
1999 }
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023 static inline void inc_ap_pending(struct drbd_device *device)
2024 {
2025 atomic_inc(&device->ap_pending_cnt);
2026 }
2027
2028 #define ERR_IF_CNT_IS_NEGATIVE(which, func, line) \
2029 if (atomic_read(&device->which) < 0) \
2030 drbd_err(device, "in %s:%d: " #which " = %d < 0 !\n", \
2031 func, line, \
2032 atomic_read(&device->which))
2033
2034 #define dec_ap_pending(device) _dec_ap_pending(device, __func__, __LINE__)
2035 static inline void _dec_ap_pending(struct drbd_device *device, const char *func, int line)
2036 {
2037 if (atomic_dec_and_test(&device->ap_pending_cnt))
2038 wake_up(&device->misc_wait);
2039 ERR_IF_CNT_IS_NEGATIVE(ap_pending_cnt, func, line);
2040 }
2041
2042
2043
2044
2045
2046
2047
2048 static inline void inc_rs_pending(struct drbd_device *device)
2049 {
2050 atomic_inc(&device->rs_pending_cnt);
2051 }
2052
2053 #define dec_rs_pending(device) _dec_rs_pending(device, __func__, __LINE__)
2054 static inline void _dec_rs_pending(struct drbd_device *device, const char *func, int line)
2055 {
2056 atomic_dec(&device->rs_pending_cnt);
2057 ERR_IF_CNT_IS_NEGATIVE(rs_pending_cnt, func, line);
2058 }
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069 static inline void inc_unacked(struct drbd_device *device)
2070 {
2071 atomic_inc(&device->unacked_cnt);
2072 }
2073
2074 #define dec_unacked(device) _dec_unacked(device, __func__, __LINE__)
2075 static inline void _dec_unacked(struct drbd_device *device, const char *func, int line)
2076 {
2077 atomic_dec(&device->unacked_cnt);
2078 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2079 }
2080
2081 #define sub_unacked(device, n) _sub_unacked(device, n, __func__, __LINE__)
2082 static inline void _sub_unacked(struct drbd_device *device, int n, const char *func, int line)
2083 {
2084 atomic_sub(n, &device->unacked_cnt);
2085 ERR_IF_CNT_IS_NEGATIVE(unacked_cnt, func, line);
2086 }
2087
2088 static inline bool is_sync_target_state(enum drbd_conns connection_state)
2089 {
2090 return connection_state == C_SYNC_TARGET ||
2091 connection_state == C_PAUSED_SYNC_T;
2092 }
2093
2094 static inline bool is_sync_source_state(enum drbd_conns connection_state)
2095 {
2096 return connection_state == C_SYNC_SOURCE ||
2097 connection_state == C_PAUSED_SYNC_S;
2098 }
2099
2100 static inline bool is_sync_state(enum drbd_conns connection_state)
2101 {
2102 return is_sync_source_state(connection_state) ||
2103 is_sync_target_state(connection_state);
2104 }
2105
2106
2107
2108
2109
2110
2111
2112
2113 #define get_ldev_if_state(_device, _min_state) \
2114 (_get_ldev_if_state((_device), (_min_state)) ? \
2115 ({ __acquire(x); true; }) : false)
2116 #define get_ldev(_device) get_ldev_if_state(_device, D_INCONSISTENT)
2117
2118 static inline void put_ldev(struct drbd_device *device)
2119 {
2120 enum drbd_disk_state disk_state = device->state.disk;
2121
2122
2123
2124
2125 int i = atomic_dec_return(&device->local_cnt);
2126
2127
2128
2129
2130 __release(local);
2131 D_ASSERT(device, i >= 0);
2132 if (i == 0) {
2133 if (disk_state == D_DISKLESS)
2134
2135 drbd_device_post_work(device, DESTROY_DISK);
2136 if (disk_state == D_FAILED)
2137
2138 if (!test_and_set_bit(GOING_DISKLESS, &device->flags))
2139 drbd_device_post_work(device, GO_DISKLESS);
2140 wake_up(&device->misc_wait);
2141 }
2142 }
2143
2144 #ifndef __CHECKER__
2145 static inline int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
2146 {
2147 int io_allowed;
2148
2149
2150 if (device->state.disk == D_DISKLESS)
2151 return 0;
2152
2153 atomic_inc(&device->local_cnt);
2154 io_allowed = (device->state.disk >= mins);
2155 if (!io_allowed)
2156 put_ldev(device);
2157 return io_allowed;
2158 }
2159 #else
2160 extern int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins);
2161 #endif
2162
2163
2164
2165
2166 static inline int drbd_get_max_buffers(struct drbd_device *device)
2167 {
2168 struct net_conf *nc;
2169 int mxb;
2170
2171 rcu_read_lock();
2172 nc = rcu_dereference(first_peer_device(device)->connection->net_conf);
2173 mxb = nc ? nc->max_buffers : 1000000;
2174 rcu_read_unlock();
2175
2176 return mxb;
2177 }
2178
2179 static inline int drbd_state_is_stable(struct drbd_device *device)
2180 {
2181 union drbd_dev_state s = device->state;
2182
2183
2184
2185
2186 switch ((enum drbd_conns)s.conn) {
2187
2188 case C_STANDALONE:
2189 case C_WF_CONNECTION:
2190
2191 case C_CONNECTED:
2192 case C_SYNC_SOURCE:
2193 case C_SYNC_TARGET:
2194 case C_VERIFY_S:
2195 case C_VERIFY_T:
2196 case C_PAUSED_SYNC_S:
2197 case C_PAUSED_SYNC_T:
2198 case C_AHEAD:
2199 case C_BEHIND:
2200
2201 case C_DISCONNECTING:
2202 case C_UNCONNECTED:
2203 case C_TIMEOUT:
2204 case C_BROKEN_PIPE:
2205 case C_NETWORK_FAILURE:
2206 case C_PROTOCOL_ERROR:
2207 case C_TEAR_DOWN:
2208 case C_WF_REPORT_PARAMS:
2209 case C_STARTING_SYNC_S:
2210 case C_STARTING_SYNC_T:
2211 break;
2212
2213
2214 case C_WF_BITMAP_S:
2215 if (first_peer_device(device)->connection->agreed_pro_version < 96)
2216 return 0;
2217 break;
2218
2219
2220 case C_WF_BITMAP_T:
2221 case C_WF_SYNC_UUID:
2222 case C_MASK:
2223
2224 return 0;
2225 }
2226
2227 switch ((enum drbd_disk_state)s.disk) {
2228 case D_DISKLESS:
2229 case D_INCONSISTENT:
2230 case D_OUTDATED:
2231 case D_CONSISTENT:
2232 case D_UP_TO_DATE:
2233 case D_FAILED:
2234
2235 break;
2236
2237
2238 case D_ATTACHING:
2239 case D_NEGOTIATING:
2240 case D_UNKNOWN:
2241 case D_MASK:
2242
2243 return 0;
2244 }
2245
2246 return 1;
2247 }
2248
2249 static inline int drbd_suspended(struct drbd_device *device)
2250 {
2251 struct drbd_resource *resource = device->resource;
2252
2253 return resource->susp || resource->susp_fen || resource->susp_nod;
2254 }
2255
2256 static inline bool may_inc_ap_bio(struct drbd_device *device)
2257 {
2258 int mxb = drbd_get_max_buffers(device);
2259
2260 if (drbd_suspended(device))
2261 return false;
2262 if (atomic_read(&device->suspend_cnt))
2263 return false;
2264
2265
2266
2267
2268
2269
2270 if (!drbd_state_is_stable(device))
2271 return false;
2272
2273
2274
2275 if (atomic_read(&device->ap_bio_cnt) > mxb)
2276 return false;
2277 if (test_bit(BITMAP_IO, &device->flags))
2278 return false;
2279 return true;
2280 }
2281
2282 static inline bool inc_ap_bio_cond(struct drbd_device *device)
2283 {
2284 bool rv = false;
2285
2286 spin_lock_irq(&device->resource->req_lock);
2287 rv = may_inc_ap_bio(device);
2288 if (rv)
2289 atomic_inc(&device->ap_bio_cnt);
2290 spin_unlock_irq(&device->resource->req_lock);
2291
2292 return rv;
2293 }
2294
2295 static inline void inc_ap_bio(struct drbd_device *device)
2296 {
2297
2298
2299
2300
2301
2302
2303
2304
2305 wait_event(device->misc_wait, inc_ap_bio_cond(device));
2306 }
2307
2308 static inline void dec_ap_bio(struct drbd_device *device)
2309 {
2310 int mxb = drbd_get_max_buffers(device);
2311 int ap_bio = atomic_dec_return(&device->ap_bio_cnt);
2312
2313 D_ASSERT(device, ap_bio >= 0);
2314
2315 if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
2316 if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
2317 drbd_queue_work(&first_peer_device(device)->
2318 connection->sender_work,
2319 &device->bm_io_work.w);
2320 }
2321
2322
2323
2324
2325 if (ap_bio < mxb)
2326 wake_up(&device->misc_wait);
2327 }
2328
2329 static inline bool verify_can_do_stop_sector(struct drbd_device *device)
2330 {
2331 return first_peer_device(device)->connection->agreed_pro_version >= 97 &&
2332 first_peer_device(device)->connection->agreed_pro_version != 100;
2333 }
2334
2335 static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
2336 {
2337 int changed = device->ed_uuid != val;
2338 device->ed_uuid = val;
2339 return changed;
2340 }
2341
2342 static inline int drbd_queue_order_type(struct drbd_device *device)
2343 {
2344
2345
2346 #ifndef QUEUE_ORDERED_NONE
2347 #define QUEUE_ORDERED_NONE 0
2348 #endif
2349 return QUEUE_ORDERED_NONE;
2350 }
2351
2352 static inline struct drbd_connection *first_connection(struct drbd_resource *resource)
2353 {
2354 return list_first_entry_or_null(&resource->connections,
2355 struct drbd_connection, connections);
2356 }
2357
2358 #endif