root/drivers/md/dm-core.h

/* [<][>][^][v][top][bottom][index][help] */

INCLUDED FROM


DEFINITIONS

This source file includes following definitions.
  1. dm_get_completion_from_kobject
  2. dm_message_test_buffer_overflow

   1 /*
   2  * Internal header file _only_ for device mapper core
   3  *
   4  * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
   5  *
   6  * This file is released under the LGPL.
   7  */
   8 
   9 #ifndef DM_CORE_INTERNAL_H
  10 #define DM_CORE_INTERNAL_H
  11 
  12 #include <linux/kthread.h>
  13 #include <linux/ktime.h>
  14 #include <linux/blk-mq.h>
  15 
  16 #include <trace/events/block.h>
  17 
  18 #include "dm.h"
  19 
  20 #define DM_RESERVED_MAX_IOS             1024
  21 
  22 struct dm_kobject_holder {
  23         struct kobject kobj;
  24         struct completion completion;
  25 };
  26 
  27 /*
  28  * DM core internal structure that used directly by dm.c and dm-rq.c
  29  * DM targets must _not_ deference a mapped_device to directly access its members!
  30  */
  31 struct mapped_device {
  32         struct mutex suspend_lock;
  33 
  34         struct mutex table_devices_lock;
  35         struct list_head table_devices;
  36 
  37         /*
  38          * The current mapping (struct dm_table *).
  39          * Use dm_get_live_table{_fast} or take suspend_lock for
  40          * dereference.
  41          */
  42         void __rcu *map;
  43 
  44         unsigned long flags;
  45 
  46         /* Protect queue and type against concurrent access. */
  47         struct mutex type_lock;
  48         enum dm_queue_mode type;
  49 
  50         int numa_node_id;
  51         struct request_queue *queue;
  52 
  53         atomic_t holders;
  54         atomic_t open_count;
  55 
  56         struct dm_target *immutable_target;
  57         struct target_type *immutable_target_type;
  58 
  59         char name[16];
  60         struct gendisk *disk;
  61         struct dax_device *dax_dev;
  62 
  63         /*
  64          * A list of ios that arrived while we were suspended.
  65          */
  66         struct work_struct work;
  67         wait_queue_head_t wait;
  68         spinlock_t deferred_lock;
  69         struct bio_list deferred;
  70 
  71         void *interface_ptr;
  72 
  73         /*
  74          * Event handling.
  75          */
  76         wait_queue_head_t eventq;
  77         atomic_t event_nr;
  78         atomic_t uevent_seq;
  79         struct list_head uevent_list;
  80         spinlock_t uevent_lock; /* Protect access to uevent_list */
  81 
  82         /* the number of internal suspends */
  83         unsigned internal_suspend_count;
  84 
  85         /*
  86          * io objects are allocated from here.
  87          */
  88         struct bio_set io_bs;
  89         struct bio_set bs;
  90 
  91         /*
  92          * Processing queue (flush)
  93          */
  94         struct workqueue_struct *wq;
  95 
  96         /*
  97          * freeze/thaw support require holding onto a super block
  98          */
  99         struct super_block *frozen_sb;
 100 
 101         /* forced geometry settings */
 102         struct hd_geometry geometry;
 103 
 104         /* kobject and completion */
 105         struct dm_kobject_holder kobj_holder;
 106 
 107         struct block_device *bdev;
 108 
 109         struct dm_stats stats;
 110 
 111         /* for blk-mq request-based DM support */
 112         struct blk_mq_tag_set *tag_set;
 113         bool init_tio_pdu:1;
 114 
 115         struct srcu_struct io_barrier;
 116 };
 117 
 118 void disable_discard(struct mapped_device *md);
 119 void disable_write_same(struct mapped_device *md);
 120 void disable_write_zeroes(struct mapped_device *md);
 121 
 122 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
 123 {
 124         return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
 125 }
 126 
 127 unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
 128 
 129 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
 130 {
 131         return !maxlen || strlen(result) + 1 >= maxlen;
 132 }
 133 
 134 extern atomic_t dm_global_event_nr;
 135 extern wait_queue_head_t dm_global_eventq;
 136 void dm_issue_global_event(void);
 137 
 138 #endif

/* [<][>][^][v][top][bottom][index][help] */