This source file includes following definitions.
- blk_flush_integrity
- bio_integrity_alloc
- bio_integrity_free
- bio_integrity_add_page
- bio_integrity_process
- bio_integrity_prep
- bio_integrity_verify_fn
- __bio_integrity_endio
- bio_integrity_advance
- bio_integrity_trim
- bio_integrity_clone
- bioset_integrity_create
- bioset_integrity_free
- bio_integrity_init
1
2
3
4
5
6
7
8
9 #include <linux/blkdev.h>
10 #include <linux/mempool.h>
11 #include <linux/export.h>
12 #include <linux/bio.h>
13 #include <linux/workqueue.h>
14 #include <linux/slab.h>
15 #include "blk.h"
16
17 #define BIP_INLINE_VECS 4
18
19 static struct kmem_cache *bip_slab;
20 static struct workqueue_struct *kintegrityd_wq;
21
22 void blk_flush_integrity(void)
23 {
24 flush_workqueue(kintegrityd_wq);
25 }
26
27
28
29
30
31
32
33
34
35
36
37 struct bio_integrity_payload *bio_integrity_alloc(struct bio *bio,
38 gfp_t gfp_mask,
39 unsigned int nr_vecs)
40 {
41 struct bio_integrity_payload *bip;
42 struct bio_set *bs = bio->bi_pool;
43 unsigned inline_vecs;
44
45 if (!bs || !mempool_initialized(&bs->bio_integrity_pool)) {
46 bip = kmalloc(struct_size(bip, bip_inline_vecs, nr_vecs), gfp_mask);
47 inline_vecs = nr_vecs;
48 } else {
49 bip = mempool_alloc(&bs->bio_integrity_pool, gfp_mask);
50 inline_vecs = BIP_INLINE_VECS;
51 }
52
53 if (unlikely(!bip))
54 return ERR_PTR(-ENOMEM);
55
56 memset(bip, 0, sizeof(*bip));
57
58 if (nr_vecs > inline_vecs) {
59 unsigned long idx = 0;
60
61 bip->bip_vec = bvec_alloc(gfp_mask, nr_vecs, &idx,
62 &bs->bvec_integrity_pool);
63 if (!bip->bip_vec)
64 goto err;
65 bip->bip_max_vcnt = bvec_nr_vecs(idx);
66 bip->bip_slab = idx;
67 } else {
68 bip->bip_vec = bip->bip_inline_vecs;
69 bip->bip_max_vcnt = inline_vecs;
70 }
71
72 bip->bip_bio = bio;
73 bio->bi_integrity = bip;
74 bio->bi_opf |= REQ_INTEGRITY;
75
76 return bip;
77 err:
78 mempool_free(bip, &bs->bio_integrity_pool);
79 return ERR_PTR(-ENOMEM);
80 }
81 EXPORT_SYMBOL(bio_integrity_alloc);
82
83
84
85
86
87
88
89
90 void bio_integrity_free(struct bio *bio)
91 {
92 struct bio_integrity_payload *bip = bio_integrity(bio);
93 struct bio_set *bs = bio->bi_pool;
94
95 if (bip->bip_flags & BIP_BLOCK_INTEGRITY)
96 kfree(page_address(bip->bip_vec->bv_page) +
97 bip->bip_vec->bv_offset);
98
99 if (bs && mempool_initialized(&bs->bio_integrity_pool)) {
100 bvec_free(&bs->bvec_integrity_pool, bip->bip_vec, bip->bip_slab);
101
102 mempool_free(bip, &bs->bio_integrity_pool);
103 } else {
104 kfree(bip);
105 }
106
107 bio->bi_integrity = NULL;
108 bio->bi_opf &= ~REQ_INTEGRITY;
109 }
110
111
112
113
114
115
116
117
118
119
120 int bio_integrity_add_page(struct bio *bio, struct page *page,
121 unsigned int len, unsigned int offset)
122 {
123 struct bio_integrity_payload *bip = bio_integrity(bio);
124 struct bio_vec *iv;
125
126 if (bip->bip_vcnt >= bip->bip_max_vcnt) {
127 printk(KERN_ERR "%s: bip_vec full\n", __func__);
128 return 0;
129 }
130
131 iv = bip->bip_vec + bip->bip_vcnt;
132
133 if (bip->bip_vcnt &&
134 bvec_gap_to_prev(bio->bi_disk->queue,
135 &bip->bip_vec[bip->bip_vcnt - 1], offset))
136 return 0;
137
138 iv->bv_page = page;
139 iv->bv_len = len;
140 iv->bv_offset = offset;
141 bip->bip_vcnt++;
142
143 return len;
144 }
145 EXPORT_SYMBOL(bio_integrity_add_page);
146
147
148
149
150
151
152
153 static blk_status_t bio_integrity_process(struct bio *bio,
154 struct bvec_iter *proc_iter, integrity_processing_fn *proc_fn)
155 {
156 struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
157 struct blk_integrity_iter iter;
158 struct bvec_iter bviter;
159 struct bio_vec bv;
160 struct bio_integrity_payload *bip = bio_integrity(bio);
161 blk_status_t ret = BLK_STS_OK;
162 void *prot_buf = page_address(bip->bip_vec->bv_page) +
163 bip->bip_vec->bv_offset;
164
165 iter.disk_name = bio->bi_disk->disk_name;
166 iter.interval = 1 << bi->interval_exp;
167 iter.seed = proc_iter->bi_sector;
168 iter.prot_buf = prot_buf;
169
170 __bio_for_each_segment(bv, bio, bviter, *proc_iter) {
171 void *kaddr = kmap_atomic(bv.bv_page);
172
173 iter.data_buf = kaddr + bv.bv_offset;
174 iter.data_size = bv.bv_len;
175
176 ret = proc_fn(&iter);
177 if (ret) {
178 kunmap_atomic(kaddr);
179 return ret;
180 }
181
182 kunmap_atomic(kaddr);
183 }
184 return ret;
185 }
186
187
188
189
190
191
192
193
194
195
196
197
198
199 bool bio_integrity_prep(struct bio *bio)
200 {
201 struct bio_integrity_payload *bip;
202 struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
203 struct request_queue *q = bio->bi_disk->queue;
204 void *buf;
205 unsigned long start, end;
206 unsigned int len, nr_pages;
207 unsigned int bytes, offset, i;
208 unsigned int intervals;
209 blk_status_t status;
210
211 if (!bi)
212 return true;
213
214 if (bio_op(bio) != REQ_OP_READ && bio_op(bio) != REQ_OP_WRITE)
215 return true;
216
217 if (!bio_sectors(bio))
218 return true;
219
220
221 if (bio_integrity(bio))
222 return true;
223
224 if (bio_data_dir(bio) == READ) {
225 if (!bi->profile->verify_fn ||
226 !(bi->flags & BLK_INTEGRITY_VERIFY))
227 return true;
228 } else {
229 if (!bi->profile->generate_fn ||
230 !(bi->flags & BLK_INTEGRITY_GENERATE))
231 return true;
232 }
233 intervals = bio_integrity_intervals(bi, bio_sectors(bio));
234
235
236 len = intervals * bi->tuple_size;
237 buf = kmalloc(len, GFP_NOIO | q->bounce_gfp);
238 status = BLK_STS_RESOURCE;
239 if (unlikely(buf == NULL)) {
240 printk(KERN_ERR "could not allocate integrity buffer\n");
241 goto err_end_io;
242 }
243
244 end = (((unsigned long) buf) + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
245 start = ((unsigned long) buf) >> PAGE_SHIFT;
246 nr_pages = end - start;
247
248
249 bip = bio_integrity_alloc(bio, GFP_NOIO, nr_pages);
250 if (IS_ERR(bip)) {
251 printk(KERN_ERR "could not allocate data integrity bioset\n");
252 kfree(buf);
253 status = BLK_STS_RESOURCE;
254 goto err_end_io;
255 }
256
257 bip->bip_flags |= BIP_BLOCK_INTEGRITY;
258 bip->bip_iter.bi_size = len;
259 bip_set_seed(bip, bio->bi_iter.bi_sector);
260
261 if (bi->flags & BLK_INTEGRITY_IP_CHECKSUM)
262 bip->bip_flags |= BIP_IP_CHECKSUM;
263
264
265 offset = offset_in_page(buf);
266 for (i = 0 ; i < nr_pages ; i++) {
267 int ret;
268 bytes = PAGE_SIZE - offset;
269
270 if (len <= 0)
271 break;
272
273 if (bytes > len)
274 bytes = len;
275
276 ret = bio_integrity_add_page(bio, virt_to_page(buf),
277 bytes, offset);
278
279 if (ret == 0) {
280 printk(KERN_ERR "could not attach integrity payload\n");
281 kfree(buf);
282 status = BLK_STS_RESOURCE;
283 goto err_end_io;
284 }
285
286 if (ret < bytes)
287 break;
288
289 buf += bytes;
290 len -= bytes;
291 offset = 0;
292 }
293
294
295 if (bio_data_dir(bio) == WRITE) {
296 bio_integrity_process(bio, &bio->bi_iter,
297 bi->profile->generate_fn);
298 } else {
299 bip->bio_iter = bio->bi_iter;
300 }
301 return true;
302
303 err_end_io:
304 bio->bi_status = status;
305 bio_endio(bio);
306 return false;
307
308 }
309 EXPORT_SYMBOL(bio_integrity_prep);
310
311
312
313
314
315
316
317
318
319 static void bio_integrity_verify_fn(struct work_struct *work)
320 {
321 struct bio_integrity_payload *bip =
322 container_of(work, struct bio_integrity_payload, bip_work);
323 struct bio *bio = bip->bip_bio;
324 struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
325
326
327
328
329
330
331 bio->bi_status = bio_integrity_process(bio, &bip->bio_iter,
332 bi->profile->verify_fn);
333 bio_integrity_free(bio);
334 bio_endio(bio);
335 }
336
337
338
339
340
341
342
343
344
345
346
347
348 bool __bio_integrity_endio(struct bio *bio)
349 {
350 struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
351 struct bio_integrity_payload *bip = bio_integrity(bio);
352
353 if (bio_op(bio) == REQ_OP_READ && !bio->bi_status &&
354 (bip->bip_flags & BIP_BLOCK_INTEGRITY) && bi->profile->verify_fn) {
355 INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
356 queue_work(kintegrityd_wq, &bip->bip_work);
357 return false;
358 }
359
360 bio_integrity_free(bio);
361 return true;
362 }
363
364
365
366
367
368
369
370
371
372
373 void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
374 {
375 struct bio_integrity_payload *bip = bio_integrity(bio);
376 struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
377 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
378
379 bip->bip_iter.bi_sector += bytes_done >> 9;
380 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
381 }
382
383
384
385
386
387
388
389 void bio_integrity_trim(struct bio *bio)
390 {
391 struct bio_integrity_payload *bip = bio_integrity(bio);
392 struct blk_integrity *bi = blk_get_integrity(bio->bi_disk);
393
394 bip->bip_iter.bi_size = bio_integrity_bytes(bi, bio_sectors(bio));
395 }
396 EXPORT_SYMBOL(bio_integrity_trim);
397
398
399
400
401
402
403
404
405
406 int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
407 gfp_t gfp_mask)
408 {
409 struct bio_integrity_payload *bip_src = bio_integrity(bio_src);
410 struct bio_integrity_payload *bip;
411
412 BUG_ON(bip_src == NULL);
413
414 bip = bio_integrity_alloc(bio, gfp_mask, bip_src->bip_vcnt);
415 if (IS_ERR(bip))
416 return PTR_ERR(bip);
417
418 memcpy(bip->bip_vec, bip_src->bip_vec,
419 bip_src->bip_vcnt * sizeof(struct bio_vec));
420
421 bip->bip_vcnt = bip_src->bip_vcnt;
422 bip->bip_iter = bip_src->bip_iter;
423
424 return 0;
425 }
426 EXPORT_SYMBOL(bio_integrity_clone);
427
428 int bioset_integrity_create(struct bio_set *bs, int pool_size)
429 {
430 if (mempool_initialized(&bs->bio_integrity_pool))
431 return 0;
432
433 if (mempool_init_slab_pool(&bs->bio_integrity_pool,
434 pool_size, bip_slab))
435 return -1;
436
437 if (biovec_init_pool(&bs->bvec_integrity_pool, pool_size)) {
438 mempool_exit(&bs->bio_integrity_pool);
439 return -1;
440 }
441
442 return 0;
443 }
444 EXPORT_SYMBOL(bioset_integrity_create);
445
446 void bioset_integrity_free(struct bio_set *bs)
447 {
448 mempool_exit(&bs->bio_integrity_pool);
449 mempool_exit(&bs->bvec_integrity_pool);
450 }
451
452 void __init bio_integrity_init(void)
453 {
454
455
456
457
458 kintegrityd_wq = alloc_workqueue("kintegrityd", WQ_MEM_RECLAIM |
459 WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1);
460 if (!kintegrityd_wq)
461 panic("Failed to create kintegrityd\n");
462
463 bip_slab = kmem_cache_create("bio_integrity_payload",
464 sizeof(struct bio_integrity_payload) +
465 sizeof(struct bio_vec) * BIP_INLINE_VECS,
466 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
467 }