This source file includes following definitions.
- rsxx_blkdev_ioctl
- rsxx_getgeo
- disk_stats_start
- disk_stats_complete
- bio_dma_done_cb
- rsxx_make_request
- rsxx_discard_supported
- rsxx_attach_dev
- rsxx_detach_dev
- rsxx_setup_dev
- rsxx_destroy_dev
- rsxx_dev_init
- rsxx_dev_cleanup
1
2
3
4
5
6
7
8
9
10
11 #include <linux/kernel.h>
12 #include <linux/interrupt.h>
13 #include <linux/module.h>
14 #include <linux/pci.h>
15 #include <linux/slab.h>
16
17 #include <linux/hdreg.h>
18 #include <linux/genhd.h>
19 #include <linux/blkdev.h>
20 #include <linux/bio.h>
21
22 #include <linux/fs.h>
23
24 #include "rsxx_priv.h"
25
26 static unsigned int blkdev_minors = 64;
27 module_param(blkdev_minors, uint, 0444);
28 MODULE_PARM_DESC(blkdev_minors, "Number of minors(partitions)");
29
30
31
32
33
34
35 static unsigned int blkdev_max_hw_sectors = 1024;
36 module_param(blkdev_max_hw_sectors, uint, 0444);
37 MODULE_PARM_DESC(blkdev_max_hw_sectors, "Max hw sectors for a single BIO");
38
39 static unsigned int enable_blkdev = 1;
40 module_param(enable_blkdev , uint, 0444);
41 MODULE_PARM_DESC(enable_blkdev, "Enable block device interfaces");
42
43
44 struct rsxx_bio_meta {
45 struct bio *bio;
46 atomic_t pending_dmas;
47 atomic_t error;
48 unsigned long start_time;
49 };
50
51 static struct kmem_cache *bio_meta_pool;
52
53
54 static int rsxx_blkdev_ioctl(struct block_device *bdev,
55 fmode_t mode,
56 unsigned int cmd,
57 unsigned long arg)
58 {
59 struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
60
61 switch (cmd) {
62 case RSXX_GETREG:
63 return rsxx_reg_access(card, (void __user *)arg, 1);
64 case RSXX_SETREG:
65 return rsxx_reg_access(card, (void __user *)arg, 0);
66 }
67
68 return -ENOTTY;
69 }
70
71 static int rsxx_getgeo(struct block_device *bdev, struct hd_geometry *geo)
72 {
73 struct rsxx_cardinfo *card = bdev->bd_disk->private_data;
74 u64 blocks = card->size8 >> 9;
75
76
77
78
79
80 if (card->size8) {
81 geo->heads = 64;
82 geo->sectors = 16;
83 do_div(blocks, (geo->heads * geo->sectors));
84 geo->cylinders = blocks;
85 } else {
86 geo->heads = 0;
87 geo->sectors = 0;
88 geo->cylinders = 0;
89 }
90 return 0;
91 }
92
93 static const struct block_device_operations rsxx_fops = {
94 .owner = THIS_MODULE,
95 .getgeo = rsxx_getgeo,
96 .ioctl = rsxx_blkdev_ioctl,
97 };
98
99 static void disk_stats_start(struct rsxx_cardinfo *card, struct bio *bio)
100 {
101 generic_start_io_acct(card->queue, bio_op(bio), bio_sectors(bio),
102 &card->gendisk->part0);
103 }
104
105 static void disk_stats_complete(struct rsxx_cardinfo *card,
106 struct bio *bio,
107 unsigned long start_time)
108 {
109 generic_end_io_acct(card->queue, bio_op(bio),
110 &card->gendisk->part0, start_time);
111 }
112
113 static void bio_dma_done_cb(struct rsxx_cardinfo *card,
114 void *cb_data,
115 unsigned int error)
116 {
117 struct rsxx_bio_meta *meta = cb_data;
118
119 if (error)
120 atomic_set(&meta->error, 1);
121
122 if (atomic_dec_and_test(&meta->pending_dmas)) {
123 if (!card->eeh_state && card->gendisk)
124 disk_stats_complete(card, meta->bio, meta->start_time);
125
126 if (atomic_read(&meta->error))
127 bio_io_error(meta->bio);
128 else
129 bio_endio(meta->bio);
130 kmem_cache_free(bio_meta_pool, meta);
131 }
132 }
133
134 static blk_qc_t rsxx_make_request(struct request_queue *q, struct bio *bio)
135 {
136 struct rsxx_cardinfo *card = q->queuedata;
137 struct rsxx_bio_meta *bio_meta;
138 blk_status_t st = BLK_STS_IOERR;
139
140 blk_queue_split(q, &bio);
141
142 might_sleep();
143
144 if (!card)
145 goto req_err;
146
147 if (bio_end_sector(bio) > get_capacity(card->gendisk))
148 goto req_err;
149
150 if (unlikely(card->halt))
151 goto req_err;
152
153 if (unlikely(card->dma_fault))
154 goto req_err;
155
156 if (bio->bi_iter.bi_size == 0) {
157 dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
158 goto req_err;
159 }
160
161 bio_meta = kmem_cache_alloc(bio_meta_pool, GFP_KERNEL);
162 if (!bio_meta) {
163 st = BLK_STS_RESOURCE;
164 goto req_err;
165 }
166
167 bio_meta->bio = bio;
168 atomic_set(&bio_meta->error, 0);
169 atomic_set(&bio_meta->pending_dmas, 0);
170 bio_meta->start_time = jiffies;
171
172 if (!unlikely(card->halt))
173 disk_stats_start(card, bio);
174
175 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
176 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
177 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
178
179 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
180 bio_dma_done_cb, bio_meta);
181 if (st)
182 goto queue_err;
183
184 return BLK_QC_T_NONE;
185
186 queue_err:
187 kmem_cache_free(bio_meta_pool, bio_meta);
188 req_err:
189 if (st)
190 bio->bi_status = st;
191 bio_endio(bio);
192 return BLK_QC_T_NONE;
193 }
194
195
196 static bool rsxx_discard_supported(struct rsxx_cardinfo *card)
197 {
198 unsigned char pci_rev;
199
200 pci_read_config_byte(card->dev, PCI_REVISION_ID, &pci_rev);
201
202 return (pci_rev >= RSXX_DISCARD_SUPPORT);
203 }
204
205 int rsxx_attach_dev(struct rsxx_cardinfo *card)
206 {
207 mutex_lock(&card->dev_lock);
208
209
210 if (enable_blkdev) {
211 if (card->config_valid)
212 set_capacity(card->gendisk, card->size8 >> 9);
213 else
214 set_capacity(card->gendisk, 0);
215 device_add_disk(CARD_TO_DEV(card), card->gendisk, NULL);
216 card->bdev_attached = 1;
217 }
218
219 mutex_unlock(&card->dev_lock);
220
221 return 0;
222 }
223
224 void rsxx_detach_dev(struct rsxx_cardinfo *card)
225 {
226 mutex_lock(&card->dev_lock);
227
228 if (card->bdev_attached) {
229 del_gendisk(card->gendisk);
230 card->bdev_attached = 0;
231 }
232
233 mutex_unlock(&card->dev_lock);
234 }
235
236 int rsxx_setup_dev(struct rsxx_cardinfo *card)
237 {
238 unsigned short blk_size;
239
240 mutex_init(&card->dev_lock);
241
242 if (!enable_blkdev)
243 return 0;
244
245 card->major = register_blkdev(0, DRIVER_NAME);
246 if (card->major < 0) {
247 dev_err(CARD_TO_DEV(card), "Failed to get major number\n");
248 return -ENOMEM;
249 }
250
251 card->queue = blk_alloc_queue(GFP_KERNEL);
252 if (!card->queue) {
253 dev_err(CARD_TO_DEV(card), "Failed queue alloc\n");
254 unregister_blkdev(card->major, DRIVER_NAME);
255 return -ENOMEM;
256 }
257
258 card->gendisk = alloc_disk(blkdev_minors);
259 if (!card->gendisk) {
260 dev_err(CARD_TO_DEV(card), "Failed disk alloc\n");
261 blk_cleanup_queue(card->queue);
262 unregister_blkdev(card->major, DRIVER_NAME);
263 return -ENOMEM;
264 }
265
266 if (card->config_valid) {
267 blk_size = card->config.data.block_size;
268 blk_queue_dma_alignment(card->queue, blk_size - 1);
269 blk_queue_logical_block_size(card->queue, blk_size);
270 }
271
272 blk_queue_make_request(card->queue, rsxx_make_request);
273 blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors);
274 blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE);
275
276 blk_queue_flag_set(QUEUE_FLAG_NONROT, card->queue);
277 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, card->queue);
278 if (rsxx_discard_supported(card)) {
279 blk_queue_flag_set(QUEUE_FLAG_DISCARD, card->queue);
280 blk_queue_max_discard_sectors(card->queue,
281 RSXX_HW_BLK_SIZE >> 9);
282 card->queue->limits.discard_granularity = RSXX_HW_BLK_SIZE;
283 card->queue->limits.discard_alignment = RSXX_HW_BLK_SIZE;
284 }
285
286 card->queue->queuedata = card;
287
288 snprintf(card->gendisk->disk_name, sizeof(card->gendisk->disk_name),
289 "rsxx%d", card->disk_id);
290 card->gendisk->major = card->major;
291 card->gendisk->first_minor = 0;
292 card->gendisk->fops = &rsxx_fops;
293 card->gendisk->private_data = card;
294 card->gendisk->queue = card->queue;
295
296 return 0;
297 }
298
299 void rsxx_destroy_dev(struct rsxx_cardinfo *card)
300 {
301 if (!enable_blkdev)
302 return;
303
304 put_disk(card->gendisk);
305 card->gendisk = NULL;
306
307 blk_cleanup_queue(card->queue);
308 card->queue->queuedata = NULL;
309 unregister_blkdev(card->major, DRIVER_NAME);
310 }
311
312 int rsxx_dev_init(void)
313 {
314 bio_meta_pool = KMEM_CACHE(rsxx_bio_meta, SLAB_HWCACHE_ALIGN);
315 if (!bio_meta_pool)
316 return -ENOMEM;
317
318 return 0;
319 }
320
321 void rsxx_dev_cleanup(void)
322 {
323 kmem_cache_destroy(bio_meta_pool);
324 }
325
326