This source file includes following definitions.
- mem_limit_func
- setup_bootmem
- map_pages
- set_kernel_text_rw
- free_initmem
- mark_rodata_ro
- mem_init
- pagetable_init
- gateway_init
- parisc_bootmem_free
- paging_init
- alloc_sid
- free_sid
- get_dirty_sids
- recycle_sids
- recycle_sids
- flush_tlb_all
- flush_tlb_all
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15 #include <linux/module.h>
16 #include <linux/mm.h>
17 #include <linux/memblock.h>
18 #include <linux/gfp.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/initrd.h>
22 #include <linux/swap.h>
23 #include <linux/unistd.h>
24 #include <linux/nodemask.h>
25 #include <linux/pagemap.h>
26 #include <linux/compat.h>
27
28 #include <asm/pgalloc.h>
29 #include <asm/pgtable.h>
30 #include <asm/tlb.h>
31 #include <asm/pdc_chassis.h>
32 #include <asm/mmzone.h>
33 #include <asm/sections.h>
34 #include <asm/msgbuf.h>
35 #include <asm/sparsemem.h>
36
37 extern int data_start;
38 extern void parisc_kernel_start(void);
39
40 #if CONFIG_PGTABLE_LEVELS == 3
41
42
43
44
45
46 pmd_t pmd0[PTRS_PER_PMD] __attribute__ ((__section__ (".data..vm0.pmd"), aligned(PAGE_SIZE)));
47 #endif
48
49 pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__ ((__section__ (".data..vm0.pgd"), aligned(PAGE_SIZE)));
50 pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __attribute__ ((__section__ (".data..vm0.pte"), aligned(PAGE_SIZE)));
51
52 static struct resource data_resource = {
53 .name = "Kernel data",
54 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
55 };
56
57 static struct resource code_resource = {
58 .name = "Kernel code",
59 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
60 };
61
62 static struct resource pdcdata_resource = {
63 .name = "PDC data (Page Zero)",
64 .start = 0,
65 .end = 0x9ff,
66 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
67 };
68
69 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
70
71
72
73
74
75 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
76 int npmem_ranges __initdata;
77
78 #ifdef CONFIG_64BIT
79 #define MAX_MEM (1UL << MAX_PHYSMEM_BITS)
80 #else
81 #define MAX_MEM (3584U*1024U*1024U)
82 #endif
83
84 static unsigned long mem_limit __read_mostly = MAX_MEM;
85
86 static void __init mem_limit_func(void)
87 {
88 char *cp, *end;
89 unsigned long limit;
90
91
92
93 limit = MAX_MEM;
94 for (cp = boot_command_line; *cp; ) {
95 if (memcmp(cp, "mem=", 4) == 0) {
96 cp += 4;
97 limit = memparse(cp, &end);
98 if (end != cp)
99 break;
100 cp = end;
101 } else {
102 while (*cp != ' ' && *cp)
103 ++cp;
104 while (*cp == ' ')
105 ++cp;
106 }
107 }
108
109 if (limit < mem_limit)
110 mem_limit = limit;
111 }
112
113 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
114
115 static void __init setup_bootmem(void)
116 {
117 unsigned long mem_max;
118 #ifndef CONFIG_SPARSEMEM
119 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
120 int npmem_holes;
121 #endif
122 int i, sysram_resource_count;
123
124 disable_sr_hashing();
125
126
127
128
129
130
131
132 for (i = 1; i < npmem_ranges; i++) {
133 int j;
134
135 for (j = i; j > 0; j--) {
136 physmem_range_t tmp;
137
138 if (pmem_ranges[j-1].start_pfn <
139 pmem_ranges[j].start_pfn) {
140
141 break;
142 }
143 tmp = pmem_ranges[j-1];
144 pmem_ranges[j-1] = pmem_ranges[j];
145 pmem_ranges[j] = tmp;
146 }
147 }
148
149 #ifndef CONFIG_SPARSEMEM
150
151
152
153
154
155 for (i = 1; i < npmem_ranges; i++) {
156 if (pmem_ranges[i].start_pfn -
157 (pmem_ranges[i-1].start_pfn +
158 pmem_ranges[i-1].pages) > MAX_GAP) {
159 npmem_ranges = i;
160 printk("Large gap in memory detected (%ld pages). "
161 "Consider turning on CONFIG_SPARSEMEM\n",
162 pmem_ranges[i].start_pfn -
163 (pmem_ranges[i-1].start_pfn +
164 pmem_ranges[i-1].pages));
165 break;
166 }
167 }
168 #endif
169
170
171 pr_info("Memory Ranges:\n");
172
173 for (i = 0; i < npmem_ranges; i++) {
174 struct resource *res = &sysram_resources[i];
175 unsigned long start;
176 unsigned long size;
177
178 size = (pmem_ranges[i].pages << PAGE_SHIFT);
179 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
180 pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
181 i, start, start + (size - 1), size >> 20);
182
183
184 res->name = "System RAM";
185 res->start = start;
186 res->end = start + size - 1;
187 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
188 request_resource(&iomem_resource, res);
189 }
190
191 sysram_resource_count = npmem_ranges;
192
193
194
195
196
197
198
199
200
201
202 mem_limit_func();
203
204 mem_max = 0;
205 for (i = 0; i < npmem_ranges; i++) {
206 unsigned long rsize;
207
208 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
209 if ((mem_max + rsize) > mem_limit) {
210 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
211 if (mem_max == mem_limit)
212 npmem_ranges = i;
213 else {
214 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
215 - (mem_max >> PAGE_SHIFT);
216 npmem_ranges = i + 1;
217 mem_max = mem_limit;
218 }
219 break;
220 }
221 mem_max += rsize;
222 }
223
224 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
225
226 #ifndef CONFIG_SPARSEMEM
227
228 {
229 unsigned long end_pfn;
230 unsigned long hole_pages;
231
232 npmem_holes = 0;
233 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
234 for (i = 1; i < npmem_ranges; i++) {
235
236 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
237 if (hole_pages) {
238 pmem_holes[npmem_holes].start_pfn = end_pfn;
239 pmem_holes[npmem_holes++].pages = hole_pages;
240 end_pfn += hole_pages;
241 }
242 end_pfn += pmem_ranges[i].pages;
243 }
244
245 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
246 npmem_ranges = 1;
247 }
248 #endif
249
250
251
252
253
254 max_pfn = 0;
255 for (i = 0; i < npmem_ranges; i++) {
256 unsigned long start_pfn;
257 unsigned long npages;
258 unsigned long start;
259 unsigned long size;
260
261 start_pfn = pmem_ranges[i].start_pfn;
262 npages = pmem_ranges[i].pages;
263
264 start = start_pfn << PAGE_SHIFT;
265 size = npages << PAGE_SHIFT;
266
267
268 memblock_add(start, size);
269
270 if ((start_pfn + npages) > max_pfn)
271 max_pfn = start_pfn + npages;
272 }
273
274
275
276
277
278
279 memblock_set_bottom_up(true);
280
281
282
283
284
285
286 max_low_pfn = max_pfn;
287
288
289
290 #define PDC_CONSOLE_IO_IODC_SIZE 32768
291
292 memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
293 PDC_CONSOLE_IO_IODC_SIZE));
294 memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
295 (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
296
297 #ifndef CONFIG_SPARSEMEM
298
299
300
301 for (i = 0; i < npmem_holes; i++) {
302 memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
303 (pmem_holes[i].pages << PAGE_SHIFT));
304 }
305 #endif
306
307 #ifdef CONFIG_BLK_DEV_INITRD
308 if (initrd_start) {
309 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
310 if (__pa(initrd_start) < mem_max) {
311 unsigned long initrd_reserve;
312
313 if (__pa(initrd_end) > mem_max) {
314 initrd_reserve = mem_max - __pa(initrd_start);
315 } else {
316 initrd_reserve = initrd_end - initrd_start;
317 }
318 initrd_below_start_ok = 1;
319 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
320
321 memblock_reserve(__pa(initrd_start), initrd_reserve);
322 }
323 }
324 #endif
325
326 data_resource.start = virt_to_phys(&data_start);
327 data_resource.end = virt_to_phys(_end) - 1;
328 code_resource.start = virt_to_phys(_text);
329 code_resource.end = virt_to_phys(&data_start)-1;
330
331
332
333
334 for (i = 0; i < sysram_resource_count; i++) {
335 struct resource *res = &sysram_resources[i];
336 request_resource(res, &code_resource);
337 request_resource(res, &data_resource);
338 }
339 request_resource(&sysram_resources[0], &pdcdata_resource);
340
341
342 pdc_pdt_init();
343
344 memblock_allow_resize();
345 memblock_dump_all();
346 }
347
348 static bool kernel_set_to_readonly;
349
350 static void __init map_pages(unsigned long start_vaddr,
351 unsigned long start_paddr, unsigned long size,
352 pgprot_t pgprot, int force)
353 {
354 pgd_t *pg_dir;
355 pmd_t *pmd;
356 pte_t *pg_table;
357 unsigned long end_paddr;
358 unsigned long start_pmd;
359 unsigned long start_pte;
360 unsigned long tmp1;
361 unsigned long tmp2;
362 unsigned long address;
363 unsigned long vaddr;
364 unsigned long ro_start;
365 unsigned long ro_end;
366 unsigned long kernel_start, kernel_end;
367
368 ro_start = __pa((unsigned long)_text);
369 ro_end = __pa((unsigned long)&data_start);
370 kernel_start = __pa((unsigned long)&__init_begin);
371 kernel_end = __pa((unsigned long)&_end);
372
373 end_paddr = start_paddr + size;
374
375 pg_dir = pgd_offset_k(start_vaddr);
376
377 #if PTRS_PER_PMD == 1
378 start_pmd = 0;
379 #else
380 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
381 #endif
382 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
383
384 address = start_paddr;
385 vaddr = start_vaddr;
386 while (address < end_paddr) {
387 #if PTRS_PER_PMD == 1
388 pmd = (pmd_t *)__pa(pg_dir);
389 #else
390 pmd = (pmd_t *)pgd_address(*pg_dir);
391
392
393
394
395
396 if (!pmd) {
397 pmd = memblock_alloc(PAGE_SIZE << PMD_ORDER,
398 PAGE_SIZE << PMD_ORDER);
399 if (!pmd)
400 panic("pmd allocation failed.\n");
401 pmd = (pmd_t *) __pa(pmd);
402 }
403
404 pgd_populate(NULL, pg_dir, __va(pmd));
405 #endif
406 pg_dir++;
407
408
409
410 pmd = (pmd_t *)__va(pmd) + start_pmd;
411 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
412
413
414
415
416
417 pg_table = (pte_t *)pmd_address(*pmd);
418 if (!pg_table) {
419 pg_table = memblock_alloc(PAGE_SIZE,
420 PAGE_SIZE);
421 if (!pg_table)
422 panic("page table allocation failed\n");
423 pg_table = (pte_t *) __pa(pg_table);
424 }
425
426 pmd_populate_kernel(NULL, pmd, __va(pg_table));
427
428
429
430 pg_table = (pte_t *) __va(pg_table) + start_pte;
431 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
432 pte_t pte;
433 pgprot_t prot;
434 bool huge = false;
435
436 if (force) {
437 prot = pgprot;
438 } else if (address < kernel_start || address >= kernel_end) {
439
440 prot = PAGE_KERNEL;
441 } else if (!kernel_set_to_readonly) {
442
443 prot = PAGE_KERNEL_RWX;
444 huge = true;
445 } else if (address >= ro_start) {
446
447 prot = (address < ro_end) ?
448 PAGE_KERNEL_EXEC : PAGE_KERNEL;
449 huge = true;
450 } else {
451 prot = PAGE_KERNEL;
452 }
453
454 pte = __mk_pte(address, prot);
455 if (huge)
456 pte = pte_mkhuge(pte);
457
458 if (address >= end_paddr)
459 break;
460
461 set_pte(pg_table, pte);
462
463 address += PAGE_SIZE;
464 vaddr += PAGE_SIZE;
465 }
466 start_pte = 0;
467
468 if (address >= end_paddr)
469 break;
470 }
471 start_pmd = 0;
472 }
473 }
474
475 void __init set_kernel_text_rw(int enable_read_write)
476 {
477 unsigned long start = (unsigned long) __init_begin;
478 unsigned long end = (unsigned long) &data_start;
479
480 map_pages(start, __pa(start), end-start,
481 PAGE_KERNEL_RWX, enable_read_write ? 1:0);
482
483
484 flush_cache_all();
485 flush_tlb_all();
486 }
487
488 void __ref free_initmem(void)
489 {
490 unsigned long init_begin = (unsigned long)__init_begin;
491 unsigned long init_end = (unsigned long)__init_end;
492 unsigned long kernel_end = (unsigned long)&_end;
493
494
495 kernel_set_to_readonly = true;
496 map_pages(init_end, __pa(init_end), kernel_end - init_end,
497 PAGE_KERNEL, 0);
498
499
500
501
502
503
504
505
506 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
507 PAGE_KERNEL_RWX, 1);
508
509
510 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
511 PAGE_KERNEL, 1);
512
513
514 __flush_tlb_range(0, init_begin, kernel_end);
515
516
517
518 flush_icache_range(init_begin, init_end);
519
520 free_initmem_default(POISON_FREE_INITMEM);
521
522
523 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
524 }
525
526
527 #ifdef CONFIG_STRICT_KERNEL_RWX
528 void mark_rodata_ro(void)
529 {
530
531
532 unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
533
534 pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
535 }
536 #endif
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
552 #error KERNEL_MAP_START is in gateway reserved region
553 #endif
554 #define MAP_START (KERNEL_MAP_START)
555
556 #define VM_MAP_OFFSET (32*1024)
557 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
558 & ~(VM_MAP_OFFSET-1)))
559
560 void *parisc_vmalloc_start __ro_after_init;
561 EXPORT_SYMBOL(parisc_vmalloc_start);
562
563 #ifdef CONFIG_PA11
564 unsigned long pcxl_dma_start __ro_after_init;
565 #endif
566
567 void __init mem_init(void)
568 {
569
570 BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
571 #ifndef CONFIG_64BIT
572 BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
573 BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
574 BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
575 #endif
576 #ifdef CONFIG_COMPAT
577 BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
578 BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
579 BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
580 BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
581 #endif
582
583
584 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
585 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
586 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
587 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
588 > BITS_PER_LONG);
589
590 high_memory = __va((max_pfn << PAGE_SHIFT));
591 set_max_mapnr(max_low_pfn);
592 memblock_free_all();
593
594 #ifdef CONFIG_PA11
595 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
596 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
597 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
598 + PCXL_DMA_MAP_SIZE);
599 } else
600 #endif
601 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
602
603 mem_init_print_info(NULL);
604
605 #if 0
606
607
608
609
610 printk("virtual kernel memory layout:\n"
611 " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
612 " fixmap : 0x%px - 0x%px (%4ld kB)\n"
613 " memory : 0x%px - 0x%px (%4ld MB)\n"
614 " .init : 0x%px - 0x%px (%4ld kB)\n"
615 " .data : 0x%px - 0x%px (%4ld kB)\n"
616 " .text : 0x%px - 0x%px (%4ld kB)\n",
617
618 (void*)VMALLOC_START, (void*)VMALLOC_END,
619 (VMALLOC_END - VMALLOC_START) >> 20,
620
621 (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
622 (unsigned long)(FIXMAP_SIZE / 1024),
623
624 __va(0), high_memory,
625 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
626
627 __init_begin, __init_end,
628 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
629
630 _etext, _edata,
631 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
632
633 _text, _etext,
634 ((unsigned long)_etext - (unsigned long)_text) >> 10);
635 #endif
636 }
637
638 unsigned long *empty_zero_page __ro_after_init;
639 EXPORT_SYMBOL(empty_zero_page);
640
641
642
643
644
645
646
647
648
649 static void __init pagetable_init(void)
650 {
651 int range;
652
653
654
655 for (range = 0; range < npmem_ranges; range++) {
656 unsigned long start_paddr;
657 unsigned long end_paddr;
658 unsigned long size;
659
660 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
661 size = pmem_ranges[range].pages << PAGE_SHIFT;
662 end_paddr = start_paddr + size;
663
664 map_pages((unsigned long)__va(start_paddr), start_paddr,
665 size, PAGE_KERNEL, 0);
666 }
667
668 #ifdef CONFIG_BLK_DEV_INITRD
669 if (initrd_end && initrd_end > mem_limit) {
670 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
671 map_pages(initrd_start, __pa(initrd_start),
672 initrd_end - initrd_start, PAGE_KERNEL, 0);
673 }
674 #endif
675
676 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
677 if (!empty_zero_page)
678 panic("zero page allocation failed.\n");
679
680 }
681
682 static void __init gateway_init(void)
683 {
684 unsigned long linux_gateway_page_addr;
685
686
687 extern void * const linux_gateway_page;
688
689 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
690
691
692
693
694
695
696
697
698 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
699 PAGE_SIZE, PAGE_GATEWAY, 1);
700 }
701
702 static void __init parisc_bootmem_free(void)
703 {
704 unsigned long zones_size[MAX_NR_ZONES] = { 0, };
705 unsigned long holes_size[MAX_NR_ZONES] = { 0, };
706 unsigned long mem_start_pfn = ~0UL, mem_end_pfn = 0, mem_size_pfn = 0;
707 int i;
708
709 for (i = 0; i < npmem_ranges; i++) {
710 unsigned long start = pmem_ranges[i].start_pfn;
711 unsigned long size = pmem_ranges[i].pages;
712 unsigned long end = start + size;
713
714 if (mem_start_pfn > start)
715 mem_start_pfn = start;
716 if (mem_end_pfn < end)
717 mem_end_pfn = end;
718 mem_size_pfn += size;
719 }
720
721 zones_size[0] = mem_end_pfn - mem_start_pfn;
722 holes_size[0] = zones_size[0] - mem_size_pfn;
723
724 free_area_init_node(0, zones_size, mem_start_pfn, holes_size);
725 }
726
727 void __init paging_init(void)
728 {
729 setup_bootmem();
730 pagetable_init();
731 gateway_init();
732 flush_cache_all_local();
733 flush_tlb_all_local(NULL);
734
735
736
737
738
739 memblocks_present();
740 sparse_init();
741 parisc_bootmem_free();
742 }
743
744 #ifdef CONFIG_PA20
745
746
747
748
749
750
751 #define NR_SPACE_IDS 262144
752
753 #else
754
755
756
757
758
759
760
761
762
763 #define NR_SPACE_IDS 32768
764
765 #endif
766
767 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
768 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
769
770 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 };
771 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
772 static unsigned long space_id_index;
773 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
774 static unsigned long dirty_space_ids = 0;
775
776 static DEFINE_SPINLOCK(sid_lock);
777
778 unsigned long alloc_sid(void)
779 {
780 unsigned long index;
781
782 spin_lock(&sid_lock);
783
784 if (free_space_ids == 0) {
785 if (dirty_space_ids != 0) {
786 spin_unlock(&sid_lock);
787 flush_tlb_all();
788 spin_lock(&sid_lock);
789 }
790 BUG_ON(free_space_ids == 0);
791 }
792
793 free_space_ids--;
794
795 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
796 space_id[index >> SHIFT_PER_LONG] |= (1L << (index & (BITS_PER_LONG - 1)));
797 space_id_index = index;
798
799 spin_unlock(&sid_lock);
800
801 return index << SPACEID_SHIFT;
802 }
803
804 void free_sid(unsigned long spaceid)
805 {
806 unsigned long index = spaceid >> SPACEID_SHIFT;
807 unsigned long *dirty_space_offset;
808
809 dirty_space_offset = dirty_space_id + (index >> SHIFT_PER_LONG);
810 index &= (BITS_PER_LONG - 1);
811
812 spin_lock(&sid_lock);
813
814 BUG_ON(*dirty_space_offset & (1L << index));
815
816 *dirty_space_offset |= (1L << index);
817 dirty_space_ids++;
818
819 spin_unlock(&sid_lock);
820 }
821
822
823 #ifdef CONFIG_SMP
824 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
825 {
826 int i;
827
828
829
830 *ndirtyptr = dirty_space_ids;
831 if (dirty_space_ids != 0) {
832 for (i = 0; i < SID_ARRAY_SIZE; i++) {
833 dirty_array[i] = dirty_space_id[i];
834 dirty_space_id[i] = 0;
835 }
836 dirty_space_ids = 0;
837 }
838
839 return;
840 }
841
842 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
843 {
844 int i;
845
846
847
848 if (ndirty != 0) {
849 for (i = 0; i < SID_ARRAY_SIZE; i++) {
850 space_id[i] ^= dirty_array[i];
851 }
852
853 free_space_ids += ndirty;
854 space_id_index = 0;
855 }
856 }
857
858 #else
859
860 static void recycle_sids(void)
861 {
862 int i;
863
864
865
866 if (dirty_space_ids != 0) {
867 for (i = 0; i < SID_ARRAY_SIZE; i++) {
868 space_id[i] ^= dirty_space_id[i];
869 dirty_space_id[i] = 0;
870 }
871
872 free_space_ids += dirty_space_ids;
873 dirty_space_ids = 0;
874 space_id_index = 0;
875 }
876 }
877 #endif
878
879
880
881
882
883
884
885 #ifdef CONFIG_SMP
886
887 static unsigned long recycle_ndirty;
888 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
889 static unsigned int recycle_inuse;
890
891 void flush_tlb_all(void)
892 {
893 int do_recycle;
894
895 __inc_irq_stat(irq_tlb_count);
896 do_recycle = 0;
897 spin_lock(&sid_lock);
898 if (dirty_space_ids > RECYCLE_THRESHOLD) {
899 BUG_ON(recycle_inuse);
900 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
901 recycle_inuse++;
902 do_recycle++;
903 }
904 spin_unlock(&sid_lock);
905 on_each_cpu(flush_tlb_all_local, NULL, 1);
906 if (do_recycle) {
907 spin_lock(&sid_lock);
908 recycle_sids(recycle_ndirty,recycle_dirty_array);
909 recycle_inuse = 0;
910 spin_unlock(&sid_lock);
911 }
912 }
913 #else
914 void flush_tlb_all(void)
915 {
916 __inc_irq_stat(irq_tlb_count);
917 spin_lock(&sid_lock);
918 flush_tlb_all_local(NULL);
919 recycle_sids();
920 spin_unlock(&sid_lock);
921 }
922 #endif