Lines Matching refs:chunk

218 static int pcpu_chunk_slot(const struct pcpu_chunk *chunk)  in pcpu_chunk_slot()  argument
220 if (chunk->free_size < sizeof(int) || chunk->contig_hint < sizeof(int)) in pcpu_chunk_slot()
223 return pcpu_size_to_slot(chunk->free_size); in pcpu_chunk_slot()
243 static unsigned long pcpu_chunk_addr(struct pcpu_chunk *chunk, in pcpu_chunk_addr() argument
246 return (unsigned long)chunk->base_addr + pcpu_unit_offsets[cpu] + in pcpu_chunk_addr()
250 static void __maybe_unused pcpu_next_unpop(struct pcpu_chunk *chunk, in pcpu_next_unpop() argument
253 *rs = find_next_zero_bit(chunk->populated, end, *rs); in pcpu_next_unpop()
254 *re = find_next_bit(chunk->populated, end, *rs + 1); in pcpu_next_unpop()
257 static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, in pcpu_next_pop() argument
260 *rs = find_next_bit(chunk->populated, end, *rs); in pcpu_next_pop()
261 *re = find_next_zero_bit(chunk->populated, end, *rs + 1); in pcpu_next_pop()
270 #define pcpu_for_each_unpop_region(chunk, rs, re, start, end) \ argument
271 for ((rs) = (start), pcpu_next_unpop((chunk), &(rs), &(re), (end)); \
273 (rs) = (re) + 1, pcpu_next_unpop((chunk), &(rs), &(re), (end)))
275 #define pcpu_for_each_pop_region(chunk, rs, re, start, end) \ argument
276 for ((rs) = (start), pcpu_next_pop((chunk), &(rs), &(re), (end)); \
278 (rs) = (re) + 1, pcpu_next_pop((chunk), &(rs), &(re), (end)))
329 static int pcpu_count_occupied_pages(struct pcpu_chunk *chunk, int i) in pcpu_count_occupied_pages() argument
331 int off = chunk->map[i] & ~1; in pcpu_count_occupied_pages()
332 int end = chunk->map[i + 1] & ~1; in pcpu_count_occupied_pages()
335 int prev = chunk->map[i - 1]; in pcpu_count_occupied_pages()
341 if (!PAGE_ALIGNED(end) && i + 1 < chunk->map_used) { in pcpu_count_occupied_pages()
342 int next = chunk->map[i + 1]; in pcpu_count_occupied_pages()
343 int nend = chunk->map[i + 2] & ~1; in pcpu_count_occupied_pages()
365 static void pcpu_chunk_relocate(struct pcpu_chunk *chunk, int oslot) in pcpu_chunk_relocate() argument
367 int nslot = pcpu_chunk_slot(chunk); in pcpu_chunk_relocate()
369 if (chunk != pcpu_reserved_chunk && oslot != nslot) { in pcpu_chunk_relocate()
371 list_move(&chunk->list, &pcpu_slot[nslot]); in pcpu_chunk_relocate()
373 list_move_tail(&chunk->list, &pcpu_slot[nslot]); in pcpu_chunk_relocate()
396 static int pcpu_need_to_extend(struct pcpu_chunk *chunk, bool is_atomic) in pcpu_need_to_extend() argument
403 if (chunk->map_alloc < in pcpu_need_to_extend()
404 chunk->map_used + PCPU_ATOMIC_MAP_MARGIN_LOW && in pcpu_need_to_extend()
406 schedule_work(&chunk->map_extend_work); in pcpu_need_to_extend()
411 if (chunk->map_alloc >= chunk->map_used + margin) in pcpu_need_to_extend()
415 while (new_alloc < chunk->map_used + margin) in pcpu_need_to_extend()
434 static int pcpu_extend_area_map(struct pcpu_chunk *chunk, int new_alloc) in pcpu_extend_area_map() argument
447 if (new_alloc <= chunk->map_alloc) in pcpu_extend_area_map()
450 old_size = chunk->map_alloc * sizeof(chunk->map[0]); in pcpu_extend_area_map()
451 old = chunk->map; in pcpu_extend_area_map()
455 chunk->map_alloc = new_alloc; in pcpu_extend_area_map()
456 chunk->map = new; in pcpu_extend_area_map()
474 struct pcpu_chunk *chunk = container_of(work, struct pcpu_chunk, in pcpu_map_extend_workfn() local
479 new_alloc = pcpu_need_to_extend(chunk, false); in pcpu_map_extend_workfn()
483 pcpu_extend_area_map(chunk, new_alloc); in pcpu_map_extend_workfn()
504 static int pcpu_fit_in_area(struct pcpu_chunk *chunk, int off, int this_size, in pcpu_fit_in_area() argument
528 pcpu_next_unpop(chunk, &rs, &re, PFN_UP(off + this_size)); in pcpu_fit_in_area()
556 static int pcpu_alloc_area(struct pcpu_chunk *chunk, int size, int align, in pcpu_alloc_area() argument
559 int oslot = pcpu_chunk_slot(chunk); in pcpu_alloc_area()
565 for (i = chunk->first_free, p = chunk->map + i; i < chunk->map_used; i++, p++) { in pcpu_alloc_area()
575 head = pcpu_fit_in_area(chunk, off, this_size, size, align, in pcpu_alloc_area()
579 chunk->first_free = i; in pcpu_alloc_area()
595 chunk->free_size -= head; in pcpu_alloc_area()
615 sizeof(chunk->map[0]) * (chunk->map_used - i)); in pcpu_alloc_area()
616 chunk->map_used += nr_extra; in pcpu_alloc_area()
620 chunk->first_free = i; in pcpu_alloc_area()
634 chunk->first_free = i + 1; in pcpu_alloc_area()
637 if (i + 1 == chunk->map_used) in pcpu_alloc_area()
638 chunk->contig_hint = max_contig; /* fully scanned */ in pcpu_alloc_area()
640 chunk->contig_hint = max(chunk->contig_hint, in pcpu_alloc_area()
643 chunk->free_size -= size; in pcpu_alloc_area()
646 *occ_pages_p = pcpu_count_occupied_pages(chunk, i); in pcpu_alloc_area()
647 pcpu_chunk_relocate(chunk, oslot); in pcpu_alloc_area()
651 chunk->contig_hint = max_contig; /* fully scanned */ in pcpu_alloc_area()
652 pcpu_chunk_relocate(chunk, oslot); in pcpu_alloc_area()
671 static void pcpu_free_area(struct pcpu_chunk *chunk, int freeme, in pcpu_free_area() argument
674 int oslot = pcpu_chunk_slot(chunk); in pcpu_free_area()
683 j = chunk->map_used; in pcpu_free_area()
686 off = chunk->map[k]; in pcpu_free_area()
696 if (i < chunk->first_free) in pcpu_free_area()
697 chunk->first_free = i; in pcpu_free_area()
699 p = chunk->map + i; in pcpu_free_area()
701 chunk->free_size += (p[1] & ~1) - off; in pcpu_free_area()
703 *occ_pages_p = pcpu_count_occupied_pages(chunk, i); in pcpu_free_area()
715 chunk->map_used -= to_free; in pcpu_free_area()
717 (chunk->map_used - i) * sizeof(chunk->map[0])); in pcpu_free_area()
720 chunk->contig_hint = max(chunk->map[i + 1] - chunk->map[i] - 1, chunk->contig_hint); in pcpu_free_area()
721 pcpu_chunk_relocate(chunk, oslot); in pcpu_free_area()
726 struct pcpu_chunk *chunk; in pcpu_alloc_chunk() local
728 chunk = pcpu_mem_zalloc(pcpu_chunk_struct_size); in pcpu_alloc_chunk()
729 if (!chunk) in pcpu_alloc_chunk()
732 chunk->map = pcpu_mem_zalloc(PCPU_DFL_MAP_ALLOC * in pcpu_alloc_chunk()
733 sizeof(chunk->map[0])); in pcpu_alloc_chunk()
734 if (!chunk->map) { in pcpu_alloc_chunk()
735 pcpu_mem_free(chunk, pcpu_chunk_struct_size); in pcpu_alloc_chunk()
739 chunk->map_alloc = PCPU_DFL_MAP_ALLOC; in pcpu_alloc_chunk()
740 chunk->map[0] = 0; in pcpu_alloc_chunk()
741 chunk->map[1] = pcpu_unit_size | 1; in pcpu_alloc_chunk()
742 chunk->map_used = 1; in pcpu_alloc_chunk()
744 INIT_LIST_HEAD(&chunk->list); in pcpu_alloc_chunk()
745 INIT_WORK(&chunk->map_extend_work, pcpu_map_extend_workfn); in pcpu_alloc_chunk()
746 chunk->free_size = pcpu_unit_size; in pcpu_alloc_chunk()
747 chunk->contig_hint = pcpu_unit_size; in pcpu_alloc_chunk()
749 return chunk; in pcpu_alloc_chunk()
752 static void pcpu_free_chunk(struct pcpu_chunk *chunk) in pcpu_free_chunk() argument
754 if (!chunk) in pcpu_free_chunk()
756 pcpu_mem_free(chunk->map, chunk->map_alloc * sizeof(chunk->map[0])); in pcpu_free_chunk()
757 pcpu_mem_free(chunk, pcpu_chunk_struct_size); in pcpu_free_chunk()
770 static void pcpu_chunk_populated(struct pcpu_chunk *chunk, in pcpu_chunk_populated() argument
777 bitmap_set(chunk->populated, page_start, nr); in pcpu_chunk_populated()
778 chunk->nr_populated += nr; in pcpu_chunk_populated()
792 static void pcpu_chunk_depopulated(struct pcpu_chunk *chunk, in pcpu_chunk_depopulated() argument
799 bitmap_clear(chunk->populated, page_start, nr); in pcpu_chunk_depopulated()
800 chunk->nr_populated -= nr; in pcpu_chunk_depopulated()
819 static int pcpu_populate_chunk(struct pcpu_chunk *chunk, int off, int size);
820 static void pcpu_depopulate_chunk(struct pcpu_chunk *chunk, int off, int size);
822 static void pcpu_destroy_chunk(struct pcpu_chunk *chunk);
877 struct pcpu_chunk *chunk; in pcpu_alloc() local
904 chunk = pcpu_reserved_chunk; in pcpu_alloc()
906 if (size > chunk->contig_hint) { in pcpu_alloc()
911 while ((new_alloc = pcpu_need_to_extend(chunk, is_atomic))) { in pcpu_alloc()
914 pcpu_extend_area_map(chunk, new_alloc) < 0) { in pcpu_alloc()
921 off = pcpu_alloc_area(chunk, size, align, is_atomic, in pcpu_alloc()
933 list_for_each_entry(chunk, &pcpu_slot[slot], list) { in pcpu_alloc()
934 if (size > chunk->contig_hint) in pcpu_alloc()
937 new_alloc = pcpu_need_to_extend(chunk, is_atomic); in pcpu_alloc()
942 if (pcpu_extend_area_map(chunk, in pcpu_alloc()
955 off = pcpu_alloc_area(chunk, size, align, is_atomic, in pcpu_alloc()
975 chunk = pcpu_create_chunk(); in pcpu_alloc()
976 if (!chunk) { in pcpu_alloc()
983 pcpu_chunk_relocate(chunk, -1); in pcpu_alloc()
1003 pcpu_for_each_unpop_region(chunk, rs, re, page_start, page_end) { in pcpu_alloc()
1004 WARN_ON(chunk->immutable); in pcpu_alloc()
1006 ret = pcpu_populate_chunk(chunk, rs, re); in pcpu_alloc()
1011 pcpu_free_area(chunk, off, &occ_pages); in pcpu_alloc()
1015 pcpu_chunk_populated(chunk, rs, re); in pcpu_alloc()
1022 if (chunk != pcpu_reserved_chunk) in pcpu_alloc()
1030 memset((void *)pcpu_chunk_addr(chunk, cpu, 0) + off, 0, size); in pcpu_alloc()
1032 ptr = __addr_to_pcpu_ptr(chunk->base_addr + off); in pcpu_alloc()
1117 struct pcpu_chunk *chunk, *next; in pcpu_balance_workfn() local
1127 list_for_each_entry_safe(chunk, next, free_head, list) { in pcpu_balance_workfn()
1128 WARN_ON(chunk->immutable); in pcpu_balance_workfn()
1131 if (chunk == list_first_entry(free_head, struct pcpu_chunk, list)) in pcpu_balance_workfn()
1134 list_move(&chunk->list, &to_free); in pcpu_balance_workfn()
1139 list_for_each_entry_safe(chunk, next, &to_free, list) { in pcpu_balance_workfn()
1142 pcpu_for_each_pop_region(chunk, rs, re, 0, pcpu_unit_pages) { in pcpu_balance_workfn()
1143 pcpu_depopulate_chunk(chunk, rs, re); in pcpu_balance_workfn()
1145 pcpu_chunk_depopulated(chunk, rs, re); in pcpu_balance_workfn()
1148 pcpu_destroy_chunk(chunk); in pcpu_balance_workfn()
1179 list_for_each_entry(chunk, &pcpu_slot[slot], list) { in pcpu_balance_workfn()
1180 nr_unpop = pcpu_unit_pages - chunk->nr_populated; in pcpu_balance_workfn()
1190 pcpu_for_each_unpop_region(chunk, rs, re, 0, pcpu_unit_pages) { in pcpu_balance_workfn()
1193 ret = pcpu_populate_chunk(chunk, rs, rs + nr); in pcpu_balance_workfn()
1197 pcpu_chunk_populated(chunk, rs, rs + nr); in pcpu_balance_workfn()
1210 chunk = pcpu_create_chunk(); in pcpu_balance_workfn()
1211 if (chunk) { in pcpu_balance_workfn()
1213 pcpu_chunk_relocate(chunk, -1); in pcpu_balance_workfn()
1234 struct pcpu_chunk *chunk; in free_percpu() local
1247 chunk = pcpu_chunk_addr_search(addr); in free_percpu()
1248 off = addr - chunk->base_addr; in free_percpu()
1250 pcpu_free_area(chunk, off, &occ_pages); in free_percpu()
1252 if (chunk != pcpu_reserved_chunk) in free_percpu()
1256 if (chunk->free_size == pcpu_unit_size) { in free_percpu()
1260 if (pos != chunk) { in free_percpu()
2265 struct pcpu_chunk *chunk; in percpu_init_late() local
2269 for (i = 0; (chunk = target_chunks[i]); i++) { in percpu_init_late()
2279 memcpy(map, chunk->map, size); in percpu_init_late()
2280 chunk->map = map; in percpu_init_late()