This source file includes following definitions.
- sg_next
- sg_nents
- sg_nents_for_len
- sg_last
- sg_init_table
- sg_init_one
- sg_kmalloc
- sg_kfree
- __sg_free_table
- sg_free_table
- __sg_alloc_table
- sg_alloc_table
- __sg_alloc_table_from_pages
- sg_alloc_table_from_pages
- sgl_alloc_order
- sgl_alloc
- sgl_free_n_order
- sgl_free_order
- sgl_free
- __sg_page_iter_start
- sg_page_count
- __sg_page_iter_next
- sg_dma_page_count
- __sg_page_iter_dma_next
- sg_miter_start
- sg_miter_get_next_page
- sg_miter_skip
- sg_miter_next
- sg_miter_stop
- sg_copy_buffer
- sg_copy_from_buffer
- sg_copy_to_buffer
- sg_pcopy_from_buffer
- sg_pcopy_to_buffer
- sg_zero_buffer
1
2
3
4
5
6
7 #include <linux/export.h>
8 #include <linux/slab.h>
9 #include <linux/scatterlist.h>
10 #include <linux/highmem.h>
11 #include <linux/kmemleak.h>
12
13
14
15
16
17
18
19
20
21
22
23 struct scatterlist *sg_next(struct scatterlist *sg)
24 {
25 if (sg_is_last(sg))
26 return NULL;
27
28 sg++;
29 if (unlikely(sg_is_chain(sg)))
30 sg = sg_chain_ptr(sg);
31
32 return sg;
33 }
34 EXPORT_SYMBOL(sg_next);
35
36
37
38
39
40
41
42
43
44
45 int sg_nents(struct scatterlist *sg)
46 {
47 int nents;
48 for (nents = 0; sg; sg = sg_next(sg))
49 nents++;
50 return nents;
51 }
52 EXPORT_SYMBOL(sg_nents);
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68 int sg_nents_for_len(struct scatterlist *sg, u64 len)
69 {
70 int nents;
71 u64 total;
72
73 if (!len)
74 return 0;
75
76 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
77 nents++;
78 total += sg->length;
79 if (total >= len)
80 return nents;
81 }
82
83 return -EINVAL;
84 }
85 EXPORT_SYMBOL(sg_nents_for_len);
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101 struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
102 {
103 struct scatterlist *sg, *ret = NULL;
104 unsigned int i;
105
106 for_each_sg(sgl, sg, nents, i)
107 ret = sg;
108
109 BUG_ON(!sg_is_last(ret));
110 return ret;
111 }
112 EXPORT_SYMBOL(sg_last);
113
114
115
116
117
118
119
120
121
122
123
124 void sg_init_table(struct scatterlist *sgl, unsigned int nents)
125 {
126 memset(sgl, 0, sizeof(*sgl) * nents);
127 sg_init_marker(sgl, nents);
128 }
129 EXPORT_SYMBOL(sg_init_table);
130
131
132
133
134
135
136
137
138 void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
139 {
140 sg_init_table(sg, 1);
141 sg_set_buf(sg, buf, buflen);
142 }
143 EXPORT_SYMBOL(sg_init_one);
144
145
146
147
148
149 static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
150 {
151 if (nents == SG_MAX_SINGLE_ALLOC) {
152
153
154
155
156
157
158
159
160
161 void *ptr = (void *) __get_free_page(gfp_mask);
162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
163 return ptr;
164 } else
165 return kmalloc_array(nents, sizeof(struct scatterlist),
166 gfp_mask);
167 }
168
169 static void sg_kfree(struct scatterlist *sg, unsigned int nents)
170 {
171 if (nents == SG_MAX_SINGLE_ALLOC) {
172 kmemleak_free(sg);
173 free_page((unsigned long) sg);
174 } else
175 kfree(sg);
176 }
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192 void __sg_free_table(struct sg_table *table, unsigned int max_ents,
193 unsigned int nents_first_chunk, sg_free_fn *free_fn)
194 {
195 struct scatterlist *sgl, *next;
196 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
197
198 if (unlikely(!table->sgl))
199 return;
200
201 sgl = table->sgl;
202 while (table->orig_nents) {
203 unsigned int alloc_size = table->orig_nents;
204 unsigned int sg_size;
205
206
207
208
209
210
211
212 if (alloc_size > curr_max_ents) {
213 next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
214 alloc_size = curr_max_ents;
215 sg_size = alloc_size - 1;
216 } else {
217 sg_size = alloc_size;
218 next = NULL;
219 }
220
221 table->orig_nents -= sg_size;
222 if (nents_first_chunk)
223 nents_first_chunk = 0;
224 else
225 free_fn(sgl, alloc_size);
226 sgl = next;
227 curr_max_ents = max_ents;
228 }
229
230 table->sgl = NULL;
231 }
232 EXPORT_SYMBOL(__sg_free_table);
233
234
235
236
237
238
239 void sg_free_table(struct sg_table *table)
240 {
241 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
242 }
243 EXPORT_SYMBOL(sg_free_table);
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266 int __sg_alloc_table(struct sg_table *table, unsigned int nents,
267 unsigned int max_ents, struct scatterlist *first_chunk,
268 unsigned int nents_first_chunk, gfp_t gfp_mask,
269 sg_alloc_fn *alloc_fn)
270 {
271 struct scatterlist *sg, *prv;
272 unsigned int left;
273 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
274 unsigned prv_max_ents;
275
276 memset(table, 0, sizeof(*table));
277
278 if (nents == 0)
279 return -EINVAL;
280 #ifdef CONFIG_ARCH_NO_SG_CHAIN
281 if (WARN_ON_ONCE(nents > max_ents))
282 return -EINVAL;
283 #endif
284
285 left = nents;
286 prv = NULL;
287 do {
288 unsigned int sg_size, alloc_size = left;
289
290 if (alloc_size > curr_max_ents) {
291 alloc_size = curr_max_ents;
292 sg_size = alloc_size - 1;
293 } else
294 sg_size = alloc_size;
295
296 left -= sg_size;
297
298 if (first_chunk) {
299 sg = first_chunk;
300 first_chunk = NULL;
301 } else {
302 sg = alloc_fn(alloc_size, gfp_mask);
303 }
304 if (unlikely(!sg)) {
305
306
307
308
309
310
311 if (prv)
312 table->nents = ++table->orig_nents;
313
314 return -ENOMEM;
315 }
316
317 sg_init_table(sg, alloc_size);
318 table->nents = table->orig_nents += sg_size;
319
320
321
322
323
324 if (prv)
325 sg_chain(prv, prv_max_ents, sg);
326 else
327 table->sgl = sg;
328
329
330
331
332 if (!left)
333 sg_mark_end(&sg[sg_size - 1]);
334
335 prv = sg;
336 prv_max_ents = curr_max_ents;
337 curr_max_ents = max_ents;
338 } while (left);
339
340 return 0;
341 }
342 EXPORT_SYMBOL(__sg_alloc_table);
343
344
345
346
347
348
349
350
351
352
353
354
355 int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
356 {
357 int ret;
358
359 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
360 NULL, 0, gfp_mask, sg_kmalloc);
361 if (unlikely(ret))
362 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
363
364 return ret;
365 }
366 EXPORT_SYMBOL(sg_alloc_table);
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389 int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
390 unsigned int n_pages, unsigned int offset,
391 unsigned long size, unsigned int max_segment,
392 gfp_t gfp_mask)
393 {
394 unsigned int chunks, cur_page, seg_len, i;
395 int ret;
396 struct scatterlist *s;
397
398 if (WARN_ON(!max_segment || offset_in_page(max_segment)))
399 return -EINVAL;
400
401
402 chunks = 1;
403 seg_len = 0;
404 for (i = 1; i < n_pages; i++) {
405 seg_len += PAGE_SIZE;
406 if (seg_len >= max_segment ||
407 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
408 chunks++;
409 seg_len = 0;
410 }
411 }
412
413 ret = sg_alloc_table(sgt, chunks, gfp_mask);
414 if (unlikely(ret))
415 return ret;
416
417
418 cur_page = 0;
419 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
420 unsigned int j, chunk_size;
421
422
423 seg_len = 0;
424 for (j = cur_page + 1; j < n_pages; j++) {
425 seg_len += PAGE_SIZE;
426 if (seg_len >= max_segment ||
427 page_to_pfn(pages[j]) !=
428 page_to_pfn(pages[j - 1]) + 1)
429 break;
430 }
431
432 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
433 sg_set_page(s, pages[cur_page],
434 min_t(unsigned long, size, chunk_size), offset);
435 size -= chunk_size;
436 offset = 0;
437 cur_page = j;
438 }
439
440 return 0;
441 }
442 EXPORT_SYMBOL(__sg_alloc_table_from_pages);
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464 int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
465 unsigned int n_pages, unsigned int offset,
466 unsigned long size, gfp_t gfp_mask)
467 {
468 return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
469 SCATTERLIST_MAX_SEGMENT, gfp_mask);
470 }
471 EXPORT_SYMBOL(sg_alloc_table_from_pages);
472
473 #ifdef CONFIG_SGL_ALLOC
474
475
476
477
478
479
480
481
482
483
484
485
486 struct scatterlist *sgl_alloc_order(unsigned long long length,
487 unsigned int order, bool chainable,
488 gfp_t gfp, unsigned int *nent_p)
489 {
490 struct scatterlist *sgl, *sg;
491 struct page *page;
492 unsigned int nent, nalloc;
493 u32 elem_len;
494
495 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
496
497 if (length > (nent << (PAGE_SHIFT + order)))
498 return NULL;
499 nalloc = nent;
500 if (chainable) {
501
502 if (nalloc + 1 < nalloc)
503 return NULL;
504 nalloc++;
505 }
506 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
507 (gfp & ~GFP_DMA) | __GFP_ZERO);
508 if (!sgl)
509 return NULL;
510
511 sg_init_table(sgl, nalloc);
512 sg = sgl;
513 while (length) {
514 elem_len = min_t(u64, length, PAGE_SIZE << order);
515 page = alloc_pages(gfp, order);
516 if (!page) {
517 sgl_free(sgl);
518 return NULL;
519 }
520
521 sg_set_page(sg, page, elem_len, 0);
522 length -= elem_len;
523 sg = sg_next(sg);
524 }
525 WARN_ONCE(length, "length = %lld\n", length);
526 if (nent_p)
527 *nent_p = nent;
528 return sgl;
529 }
530 EXPORT_SYMBOL(sgl_alloc_order);
531
532
533
534
535
536
537
538
539
540 struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
541 unsigned int *nent_p)
542 {
543 return sgl_alloc_order(length, 0, false, gfp, nent_p);
544 }
545 EXPORT_SYMBOL(sgl_alloc);
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560 void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
561 {
562 struct scatterlist *sg;
563 struct page *page;
564 int i;
565
566 for_each_sg(sgl, sg, nents, i) {
567 if (!sg)
568 break;
569 page = sg_page(sg);
570 if (page)
571 __free_pages(page, order);
572 }
573 kfree(sgl);
574 }
575 EXPORT_SYMBOL(sgl_free_n_order);
576
577
578
579
580
581
582 void sgl_free_order(struct scatterlist *sgl, int order)
583 {
584 sgl_free_n_order(sgl, INT_MAX, order);
585 }
586 EXPORT_SYMBOL(sgl_free_order);
587
588
589
590
591
592 void sgl_free(struct scatterlist *sgl)
593 {
594 sgl_free_order(sgl, 0);
595 }
596 EXPORT_SYMBOL(sgl_free);
597
598 #endif
599
600 void __sg_page_iter_start(struct sg_page_iter *piter,
601 struct scatterlist *sglist, unsigned int nents,
602 unsigned long pgoffset)
603 {
604 piter->__pg_advance = 0;
605 piter->__nents = nents;
606
607 piter->sg = sglist;
608 piter->sg_pgoffset = pgoffset;
609 }
610 EXPORT_SYMBOL(__sg_page_iter_start);
611
612 static int sg_page_count(struct scatterlist *sg)
613 {
614 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
615 }
616
617 bool __sg_page_iter_next(struct sg_page_iter *piter)
618 {
619 if (!piter->__nents || !piter->sg)
620 return false;
621
622 piter->sg_pgoffset += piter->__pg_advance;
623 piter->__pg_advance = 1;
624
625 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
626 piter->sg_pgoffset -= sg_page_count(piter->sg);
627 piter->sg = sg_next(piter->sg);
628 if (!--piter->__nents || !piter->sg)
629 return false;
630 }
631
632 return true;
633 }
634 EXPORT_SYMBOL(__sg_page_iter_next);
635
636 static int sg_dma_page_count(struct scatterlist *sg)
637 {
638 return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
639 }
640
641 bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
642 {
643 struct sg_page_iter *piter = &dma_iter->base;
644
645 if (!piter->__nents || !piter->sg)
646 return false;
647
648 piter->sg_pgoffset += piter->__pg_advance;
649 piter->__pg_advance = 1;
650
651 while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
652 piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
653 piter->sg = sg_next(piter->sg);
654 if (!--piter->__nents || !piter->sg)
655 return false;
656 }
657
658 return true;
659 }
660 EXPORT_SYMBOL(__sg_page_iter_dma_next);
661
662
663
664
665
666
667
668
669
670
671
672
673
674 void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
675 unsigned int nents, unsigned int flags)
676 {
677 memset(miter, 0, sizeof(struct sg_mapping_iter));
678
679 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
680 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
681 miter->__flags = flags;
682 }
683 EXPORT_SYMBOL(sg_miter_start);
684
685 static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
686 {
687 if (!miter->__remaining) {
688 struct scatterlist *sg;
689
690 if (!__sg_page_iter_next(&miter->piter))
691 return false;
692
693 sg = miter->piter.sg;
694
695 miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
696 miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
697 miter->__offset &= PAGE_SIZE - 1;
698 miter->__remaining = sg->offset + sg->length -
699 (miter->piter.sg_pgoffset << PAGE_SHIFT) -
700 miter->__offset;
701 miter->__remaining = min_t(unsigned long, miter->__remaining,
702 PAGE_SIZE - miter->__offset);
703 }
704
705 return true;
706 }
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726 bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
727 {
728 sg_miter_stop(miter);
729
730 while (offset) {
731 off_t consumed;
732
733 if (!sg_miter_get_next_page(miter))
734 return false;
735
736 consumed = min_t(off_t, offset, miter->__remaining);
737 miter->__offset += consumed;
738 miter->__remaining -= consumed;
739 offset -= consumed;
740 }
741
742 return true;
743 }
744 EXPORT_SYMBOL(sg_miter_skip);
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763 bool sg_miter_next(struct sg_mapping_iter *miter)
764 {
765 sg_miter_stop(miter);
766
767
768
769
770
771 if (!sg_miter_get_next_page(miter))
772 return false;
773
774 miter->page = sg_page_iter_page(&miter->piter);
775 miter->consumed = miter->length = miter->__remaining;
776
777 if (miter->__flags & SG_MITER_ATOMIC)
778 miter->addr = kmap_atomic(miter->page) + miter->__offset;
779 else
780 miter->addr = kmap(miter->page) + miter->__offset;
781
782 return true;
783 }
784 EXPORT_SYMBOL(sg_miter_next);
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800 void sg_miter_stop(struct sg_mapping_iter *miter)
801 {
802 WARN_ON(miter->consumed > miter->length);
803
804
805 if (miter->addr) {
806 miter->__offset += miter->consumed;
807 miter->__remaining -= miter->consumed;
808
809 if ((miter->__flags & SG_MITER_TO_SG) &&
810 !PageSlab(miter->page))
811 flush_kernel_dcache_page(miter->page);
812
813 if (miter->__flags & SG_MITER_ATOMIC) {
814 WARN_ON_ONCE(preemptible());
815 kunmap_atomic(miter->addr);
816 } else
817 kunmap(miter->page);
818
819 miter->page = NULL;
820 miter->addr = NULL;
821 miter->length = 0;
822 miter->consumed = 0;
823 }
824 }
825 EXPORT_SYMBOL(sg_miter_stop);
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840 size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
841 size_t buflen, off_t skip, bool to_buffer)
842 {
843 unsigned int offset = 0;
844 struct sg_mapping_iter miter;
845 unsigned int sg_flags = SG_MITER_ATOMIC;
846
847 if (to_buffer)
848 sg_flags |= SG_MITER_FROM_SG;
849 else
850 sg_flags |= SG_MITER_TO_SG;
851
852 sg_miter_start(&miter, sgl, nents, sg_flags);
853
854 if (!sg_miter_skip(&miter, skip))
855 return false;
856
857 while ((offset < buflen) && sg_miter_next(&miter)) {
858 unsigned int len;
859
860 len = min(miter.length, buflen - offset);
861
862 if (to_buffer)
863 memcpy(buf + offset, miter.addr, len);
864 else
865 memcpy(miter.addr, buf + offset, len);
866
867 offset += len;
868 }
869
870 sg_miter_stop(&miter);
871
872 return offset;
873 }
874 EXPORT_SYMBOL(sg_copy_buffer);
875
876
877
878
879
880
881
882
883
884
885
886 size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
887 const void *buf, size_t buflen)
888 {
889 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
890 }
891 EXPORT_SYMBOL(sg_copy_from_buffer);
892
893
894
895
896
897
898
899
900
901
902
903 size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
904 void *buf, size_t buflen)
905 {
906 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
907 }
908 EXPORT_SYMBOL(sg_copy_to_buffer);
909
910
911
912
913
914
915
916
917
918
919
920
921 size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
922 const void *buf, size_t buflen, off_t skip)
923 {
924 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
925 }
926 EXPORT_SYMBOL(sg_pcopy_from_buffer);
927
928
929
930
931
932
933
934
935
936
937
938
939 size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
940 void *buf, size_t buflen, off_t skip)
941 {
942 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
943 }
944 EXPORT_SYMBOL(sg_pcopy_to_buffer);
945
946
947
948
949
950
951
952
953
954
955 size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
956 size_t buflen, off_t skip)
957 {
958 unsigned int offset = 0;
959 struct sg_mapping_iter miter;
960 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
961
962 sg_miter_start(&miter, sgl, nents, sg_flags);
963
964 if (!sg_miter_skip(&miter, skip))
965 return false;
966
967 while (offset < buflen && sg_miter_next(&miter)) {
968 unsigned int len;
969
970 len = min(miter.length, buflen - offset);
971 memset(miter.addr, 0, len);
972
973 offset += len;
974 }
975
976 sg_miter_stop(&miter);
977 return offset;
978 }
979 EXPORT_SYMBOL(sg_zero_buffer);