1/* 2 * SRAM allocator for Blackfin on-chip memory 3 * 4 * Copyright 2004-2009 Analog Devices Inc. 5 * 6 * Licensed under the GPL-2 or later. 7 */ 8 9#include <linux/module.h> 10#include <linux/kernel.h> 11#include <linux/types.h> 12#include <linux/miscdevice.h> 13#include <linux/ioport.h> 14#include <linux/fcntl.h> 15#include <linux/init.h> 16#include <linux/poll.h> 17#include <linux/proc_fs.h> 18#include <linux/seq_file.h> 19#include <linux/spinlock.h> 20#include <linux/rtc.h> 21#include <linux/slab.h> 22#include <asm/blackfin.h> 23#include <asm/mem_map.h> 24#include "blackfin_sram.h" 25 26/* the data structure for L1 scratchpad and DATA SRAM */ 27struct sram_piece { 28 void *paddr; 29 int size; 30 pid_t pid; 31 struct sram_piece *next; 32}; 33 34static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1sram_lock); 35static DEFINE_PER_CPU(struct sram_piece, free_l1_ssram_head); 36static DEFINE_PER_CPU(struct sram_piece, used_l1_ssram_head); 37 38#if L1_DATA_A_LENGTH != 0 39static DEFINE_PER_CPU(struct sram_piece, free_l1_data_A_sram_head); 40static DEFINE_PER_CPU(struct sram_piece, used_l1_data_A_sram_head); 41#endif 42 43#if L1_DATA_B_LENGTH != 0 44static DEFINE_PER_CPU(struct sram_piece, free_l1_data_B_sram_head); 45static DEFINE_PER_CPU(struct sram_piece, used_l1_data_B_sram_head); 46#endif 47 48#if L1_DATA_A_LENGTH || L1_DATA_B_LENGTH 49static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_data_sram_lock); 50#endif 51 52#if L1_CODE_LENGTH != 0 53static DEFINE_PER_CPU_SHARED_ALIGNED(spinlock_t, l1_inst_sram_lock); 54static DEFINE_PER_CPU(struct sram_piece, free_l1_inst_sram_head); 55static DEFINE_PER_CPU(struct sram_piece, used_l1_inst_sram_head); 56#endif 57 58#if L2_LENGTH != 0 59static spinlock_t l2_sram_lock ____cacheline_aligned_in_smp; 60static struct sram_piece free_l2_sram_head, used_l2_sram_head; 61#endif 62 63static struct kmem_cache *sram_piece_cache; 64 65/* L1 Scratchpad SRAM initialization function */ 66static void __init l1sram_init(void) 67{ 68 unsigned int cpu; 69 unsigned long reserve; 70 71#ifdef CONFIG_SMP 72 reserve = 0; 73#else 74 reserve = sizeof(struct l1_scratch_task_info); 75#endif 76 77 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 78 per_cpu(free_l1_ssram_head, cpu).next = 79 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 80 if (!per_cpu(free_l1_ssram_head, cpu).next) { 81 printk(KERN_INFO "Fail to initialize Scratchpad data SRAM.\n"); 82 return; 83 } 84 85 per_cpu(free_l1_ssram_head, cpu).next->paddr = (void *)get_l1_scratch_start_cpu(cpu) + reserve; 86 per_cpu(free_l1_ssram_head, cpu).next->size = L1_SCRATCH_LENGTH - reserve; 87 per_cpu(free_l1_ssram_head, cpu).next->pid = 0; 88 per_cpu(free_l1_ssram_head, cpu).next->next = NULL; 89 90 per_cpu(used_l1_ssram_head, cpu).next = NULL; 91 92 /* mutex initialize */ 93 spin_lock_init(&per_cpu(l1sram_lock, cpu)); 94 printk(KERN_INFO "Blackfin Scratchpad data SRAM: %d KB\n", 95 L1_SCRATCH_LENGTH >> 10); 96 } 97} 98 99static void __init l1_data_sram_init(void) 100{ 101#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0 102 unsigned int cpu; 103#endif 104#if L1_DATA_A_LENGTH != 0 105 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 106 per_cpu(free_l1_data_A_sram_head, cpu).next = 107 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 108 if (!per_cpu(free_l1_data_A_sram_head, cpu).next) { 109 printk(KERN_INFO "Fail to initialize L1 Data A SRAM.\n"); 110 return; 111 } 112 113 per_cpu(free_l1_data_A_sram_head, cpu).next->paddr = 114 (void *)get_l1_data_a_start_cpu(cpu) + (_ebss_l1 - _sdata_l1); 115 per_cpu(free_l1_data_A_sram_head, cpu).next->size = 116 L1_DATA_A_LENGTH - (_ebss_l1 - _sdata_l1); 117 per_cpu(free_l1_data_A_sram_head, cpu).next->pid = 0; 118 per_cpu(free_l1_data_A_sram_head, cpu).next->next = NULL; 119 120 per_cpu(used_l1_data_A_sram_head, cpu).next = NULL; 121 122 printk(KERN_INFO "Blackfin L1 Data A SRAM: %d KB (%d KB free)\n", 123 L1_DATA_A_LENGTH >> 10, 124 per_cpu(free_l1_data_A_sram_head, cpu).next->size >> 10); 125 } 126#endif 127#if L1_DATA_B_LENGTH != 0 128 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 129 per_cpu(free_l1_data_B_sram_head, cpu).next = 130 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 131 if (!per_cpu(free_l1_data_B_sram_head, cpu).next) { 132 printk(KERN_INFO "Fail to initialize L1 Data B SRAM.\n"); 133 return; 134 } 135 136 per_cpu(free_l1_data_B_sram_head, cpu).next->paddr = 137 (void *)get_l1_data_b_start_cpu(cpu) + (_ebss_b_l1 - _sdata_b_l1); 138 per_cpu(free_l1_data_B_sram_head, cpu).next->size = 139 L1_DATA_B_LENGTH - (_ebss_b_l1 - _sdata_b_l1); 140 per_cpu(free_l1_data_B_sram_head, cpu).next->pid = 0; 141 per_cpu(free_l1_data_B_sram_head, cpu).next->next = NULL; 142 143 per_cpu(used_l1_data_B_sram_head, cpu).next = NULL; 144 145 printk(KERN_INFO "Blackfin L1 Data B SRAM: %d KB (%d KB free)\n", 146 L1_DATA_B_LENGTH >> 10, 147 per_cpu(free_l1_data_B_sram_head, cpu).next->size >> 10); 148 /* mutex initialize */ 149 } 150#endif 151 152#if L1_DATA_A_LENGTH != 0 || L1_DATA_B_LENGTH != 0 153 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) 154 spin_lock_init(&per_cpu(l1_data_sram_lock, cpu)); 155#endif 156} 157 158static void __init l1_inst_sram_init(void) 159{ 160#if L1_CODE_LENGTH != 0 161 unsigned int cpu; 162 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 163 per_cpu(free_l1_inst_sram_head, cpu).next = 164 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 165 if (!per_cpu(free_l1_inst_sram_head, cpu).next) { 166 printk(KERN_INFO "Failed to initialize L1 Instruction SRAM\n"); 167 return; 168 } 169 170 per_cpu(free_l1_inst_sram_head, cpu).next->paddr = 171 (void *)get_l1_code_start_cpu(cpu) + (_etext_l1 - _stext_l1); 172 per_cpu(free_l1_inst_sram_head, cpu).next->size = 173 L1_CODE_LENGTH - (_etext_l1 - _stext_l1); 174 per_cpu(free_l1_inst_sram_head, cpu).next->pid = 0; 175 per_cpu(free_l1_inst_sram_head, cpu).next->next = NULL; 176 177 per_cpu(used_l1_inst_sram_head, cpu).next = NULL; 178 179 printk(KERN_INFO "Blackfin L1 Instruction SRAM: %d KB (%d KB free)\n", 180 L1_CODE_LENGTH >> 10, 181 per_cpu(free_l1_inst_sram_head, cpu).next->size >> 10); 182 183 /* mutex initialize */ 184 spin_lock_init(&per_cpu(l1_inst_sram_lock, cpu)); 185 } 186#endif 187} 188 189#ifdef __ADSPBF60x__ 190static irqreturn_t l2_ecc_err(int irq, void *dev_id) 191{ 192 int status; 193 194 printk(KERN_ERR "L2 ecc error happened\n"); 195 status = bfin_read32(L2CTL0_STAT); 196 if (status & 0x1) 197 printk(KERN_ERR "Core channel error type:0x%x, addr:0x%x\n", 198 bfin_read32(L2CTL0_ET0), bfin_read32(L2CTL0_EADDR0)); 199 if (status & 0x2) 200 printk(KERN_ERR "System channel error type:0x%x, addr:0x%x\n", 201 bfin_read32(L2CTL0_ET1), bfin_read32(L2CTL0_EADDR1)); 202 203 status = status >> 8; 204 if (status) 205 printk(KERN_ERR "L2 Bank%d error, addr:0x%x\n", 206 status, bfin_read32(L2CTL0_ERRADDR0 + status)); 207 208 panic("L2 Ecc error"); 209 return IRQ_HANDLED; 210} 211#endif 212 213static void __init l2_sram_init(void) 214{ 215#if L2_LENGTH != 0 216 217#ifdef __ADSPBF60x__ 218 int ret; 219 220 ret = request_irq(IRQ_L2CTL0_ECC_ERR, l2_ecc_err, 0, "l2-ecc-err", 221 NULL); 222 if (unlikely(ret < 0)) { 223 printk(KERN_INFO "Fail to request l2 ecc error interrupt"); 224 return; 225 } 226#endif 227 228 free_l2_sram_head.next = 229 kmem_cache_alloc(sram_piece_cache, GFP_KERNEL); 230 if (!free_l2_sram_head.next) { 231 printk(KERN_INFO "Fail to initialize L2 SRAM.\n"); 232 return; 233 } 234 235 free_l2_sram_head.next->paddr = 236 (void *)L2_START + (_ebss_l2 - _stext_l2); 237 free_l2_sram_head.next->size = 238 L2_LENGTH - (_ebss_l2 - _stext_l2); 239 free_l2_sram_head.next->pid = 0; 240 free_l2_sram_head.next->next = NULL; 241 242 used_l2_sram_head.next = NULL; 243 244 printk(KERN_INFO "Blackfin L2 SRAM: %d KB (%d KB free)\n", 245 L2_LENGTH >> 10, 246 free_l2_sram_head.next->size >> 10); 247 248 /* mutex initialize */ 249 spin_lock_init(&l2_sram_lock); 250#endif 251} 252 253static int __init bfin_sram_init(void) 254{ 255 sram_piece_cache = kmem_cache_create("sram_piece_cache", 256 sizeof(struct sram_piece), 257 0, SLAB_PANIC, NULL); 258 259 l1sram_init(); 260 l1_data_sram_init(); 261 l1_inst_sram_init(); 262 l2_sram_init(); 263 264 return 0; 265} 266pure_initcall(bfin_sram_init); 267 268/* SRAM allocate function */ 269static void *_sram_alloc(size_t size, struct sram_piece *pfree_head, 270 struct sram_piece *pused_head) 271{ 272 struct sram_piece *pslot, *plast, *pavail; 273 274 if (size <= 0 || !pfree_head || !pused_head) 275 return NULL; 276 277 /* Align the size */ 278 size = (size + 3) & ~3; 279 280 pslot = pfree_head->next; 281 plast = pfree_head; 282 283 /* search an available piece slot */ 284 while (pslot != NULL && size > pslot->size) { 285 plast = pslot; 286 pslot = pslot->next; 287 } 288 289 if (!pslot) 290 return NULL; 291 292 if (pslot->size == size) { 293 plast->next = pslot->next; 294 pavail = pslot; 295 } else { 296 /* use atomic so our L1 allocator can be used atomically */ 297 pavail = kmem_cache_alloc(sram_piece_cache, GFP_ATOMIC); 298 299 if (!pavail) 300 return NULL; 301 302 pavail->paddr = pslot->paddr; 303 pavail->size = size; 304 pslot->paddr += size; 305 pslot->size -= size; 306 } 307 308 pavail->pid = current->pid; 309 310 pslot = pused_head->next; 311 plast = pused_head; 312 313 /* insert new piece into used piece list !!! */ 314 while (pslot != NULL && pavail->paddr < pslot->paddr) { 315 plast = pslot; 316 pslot = pslot->next; 317 } 318 319 pavail->next = pslot; 320 plast->next = pavail; 321 322 return pavail->paddr; 323} 324 325/* Allocate the largest available block. */ 326static void *_sram_alloc_max(struct sram_piece *pfree_head, 327 struct sram_piece *pused_head, 328 unsigned long *psize) 329{ 330 struct sram_piece *pslot, *pmax; 331 332 if (!pfree_head || !pused_head) 333 return NULL; 334 335 pmax = pslot = pfree_head->next; 336 337 /* search an available piece slot */ 338 while (pslot != NULL) { 339 if (pslot->size > pmax->size) 340 pmax = pslot; 341 pslot = pslot->next; 342 } 343 344 if (!pmax) 345 return NULL; 346 347 *psize = pmax->size; 348 349 return _sram_alloc(*psize, pfree_head, pused_head); 350} 351 352/* SRAM free function */ 353static int _sram_free(const void *addr, 354 struct sram_piece *pfree_head, 355 struct sram_piece *pused_head) 356{ 357 struct sram_piece *pslot, *plast, *pavail; 358 359 if (!pfree_head || !pused_head) 360 return -1; 361 362 /* search the relevant memory slot */ 363 pslot = pused_head->next; 364 plast = pused_head; 365 366 /* search an available piece slot */ 367 while (pslot != NULL && pslot->paddr != addr) { 368 plast = pslot; 369 pslot = pslot->next; 370 } 371 372 if (!pslot) 373 return -1; 374 375 plast->next = pslot->next; 376 pavail = pslot; 377 pavail->pid = 0; 378 379 /* insert free pieces back to the free list */ 380 pslot = pfree_head->next; 381 plast = pfree_head; 382 383 while (pslot != NULL && addr > pslot->paddr) { 384 plast = pslot; 385 pslot = pslot->next; 386 } 387 388 if (plast != pfree_head && plast->paddr + plast->size == pavail->paddr) { 389 plast->size += pavail->size; 390 kmem_cache_free(sram_piece_cache, pavail); 391 } else { 392 pavail->next = plast->next; 393 plast->next = pavail; 394 plast = pavail; 395 } 396 397 if (pslot && plast->paddr + plast->size == pslot->paddr) { 398 plast->size += pslot->size; 399 plast->next = pslot->next; 400 kmem_cache_free(sram_piece_cache, pslot); 401 } 402 403 return 0; 404} 405 406int sram_free(const void *addr) 407{ 408 409#if L1_CODE_LENGTH != 0 410 if (addr >= (void *)get_l1_code_start() 411 && addr < (void *)(get_l1_code_start() + L1_CODE_LENGTH)) 412 return l1_inst_sram_free(addr); 413 else 414#endif 415#if L1_DATA_A_LENGTH != 0 416 if (addr >= (void *)get_l1_data_a_start() 417 && addr < (void *)(get_l1_data_a_start() + L1_DATA_A_LENGTH)) 418 return l1_data_A_sram_free(addr); 419 else 420#endif 421#if L1_DATA_B_LENGTH != 0 422 if (addr >= (void *)get_l1_data_b_start() 423 && addr < (void *)(get_l1_data_b_start() + L1_DATA_B_LENGTH)) 424 return l1_data_B_sram_free(addr); 425 else 426#endif 427#if L2_LENGTH != 0 428 if (addr >= (void *)L2_START 429 && addr < (void *)(L2_START + L2_LENGTH)) 430 return l2_sram_free(addr); 431 else 432#endif 433 return -1; 434} 435EXPORT_SYMBOL(sram_free); 436 437void *l1_data_A_sram_alloc(size_t size) 438{ 439#if L1_DATA_A_LENGTH != 0 440 unsigned long flags; 441 void *addr; 442 unsigned int cpu; 443 444 cpu = smp_processor_id(); 445 /* add mutex operation */ 446 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 447 448 addr = _sram_alloc(size, &per_cpu(free_l1_data_A_sram_head, cpu), 449 &per_cpu(used_l1_data_A_sram_head, cpu)); 450 451 /* add mutex operation */ 452 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 453 454 pr_debug("Allocated address in l1_data_A_sram_alloc is 0x%lx+0x%lx\n", 455 (long unsigned int)addr, size); 456 457 return addr; 458#else 459 return NULL; 460#endif 461} 462EXPORT_SYMBOL(l1_data_A_sram_alloc); 463 464int l1_data_A_sram_free(const void *addr) 465{ 466#if L1_DATA_A_LENGTH != 0 467 unsigned long flags; 468 int ret; 469 unsigned int cpu; 470 471 cpu = smp_processor_id(); 472 /* add mutex operation */ 473 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 474 475 ret = _sram_free(addr, &per_cpu(free_l1_data_A_sram_head, cpu), 476 &per_cpu(used_l1_data_A_sram_head, cpu)); 477 478 /* add mutex operation */ 479 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 480 481 return ret; 482#else 483 return -1; 484#endif 485} 486EXPORT_SYMBOL(l1_data_A_sram_free); 487 488void *l1_data_B_sram_alloc(size_t size) 489{ 490#if L1_DATA_B_LENGTH != 0 491 unsigned long flags; 492 void *addr; 493 unsigned int cpu; 494 495 cpu = smp_processor_id(); 496 /* add mutex operation */ 497 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 498 499 addr = _sram_alloc(size, &per_cpu(free_l1_data_B_sram_head, cpu), 500 &per_cpu(used_l1_data_B_sram_head, cpu)); 501 502 /* add mutex operation */ 503 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 504 505 pr_debug("Allocated address in l1_data_B_sram_alloc is 0x%lx+0x%lx\n", 506 (long unsigned int)addr, size); 507 508 return addr; 509#else 510 return NULL; 511#endif 512} 513EXPORT_SYMBOL(l1_data_B_sram_alloc); 514 515int l1_data_B_sram_free(const void *addr) 516{ 517#if L1_DATA_B_LENGTH != 0 518 unsigned long flags; 519 int ret; 520 unsigned int cpu; 521 522 cpu = smp_processor_id(); 523 /* add mutex operation */ 524 spin_lock_irqsave(&per_cpu(l1_data_sram_lock, cpu), flags); 525 526 ret = _sram_free(addr, &per_cpu(free_l1_data_B_sram_head, cpu), 527 &per_cpu(used_l1_data_B_sram_head, cpu)); 528 529 /* add mutex operation */ 530 spin_unlock_irqrestore(&per_cpu(l1_data_sram_lock, cpu), flags); 531 532 return ret; 533#else 534 return -1; 535#endif 536} 537EXPORT_SYMBOL(l1_data_B_sram_free); 538 539void *l1_data_sram_alloc(size_t size) 540{ 541 void *addr = l1_data_A_sram_alloc(size); 542 543 if (!addr) 544 addr = l1_data_B_sram_alloc(size); 545 546 return addr; 547} 548EXPORT_SYMBOL(l1_data_sram_alloc); 549 550void *l1_data_sram_zalloc(size_t size) 551{ 552 void *addr = l1_data_sram_alloc(size); 553 554 if (addr) 555 memset(addr, 0x00, size); 556 557 return addr; 558} 559EXPORT_SYMBOL(l1_data_sram_zalloc); 560 561int l1_data_sram_free(const void *addr) 562{ 563 int ret; 564 ret = l1_data_A_sram_free(addr); 565 if (ret == -1) 566 ret = l1_data_B_sram_free(addr); 567 return ret; 568} 569EXPORT_SYMBOL(l1_data_sram_free); 570 571void *l1_inst_sram_alloc(size_t size) 572{ 573#if L1_CODE_LENGTH != 0 574 unsigned long flags; 575 void *addr; 576 unsigned int cpu; 577 578 cpu = smp_processor_id(); 579 /* add mutex operation */ 580 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); 581 582 addr = _sram_alloc(size, &per_cpu(free_l1_inst_sram_head, cpu), 583 &per_cpu(used_l1_inst_sram_head, cpu)); 584 585 /* add mutex operation */ 586 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); 587 588 pr_debug("Allocated address in l1_inst_sram_alloc is 0x%lx+0x%lx\n", 589 (long unsigned int)addr, size); 590 591 return addr; 592#else 593 return NULL; 594#endif 595} 596EXPORT_SYMBOL(l1_inst_sram_alloc); 597 598int l1_inst_sram_free(const void *addr) 599{ 600#if L1_CODE_LENGTH != 0 601 unsigned long flags; 602 int ret; 603 unsigned int cpu; 604 605 cpu = smp_processor_id(); 606 /* add mutex operation */ 607 spin_lock_irqsave(&per_cpu(l1_inst_sram_lock, cpu), flags); 608 609 ret = _sram_free(addr, &per_cpu(free_l1_inst_sram_head, cpu), 610 &per_cpu(used_l1_inst_sram_head, cpu)); 611 612 /* add mutex operation */ 613 spin_unlock_irqrestore(&per_cpu(l1_inst_sram_lock, cpu), flags); 614 615 return ret; 616#else 617 return -1; 618#endif 619} 620EXPORT_SYMBOL(l1_inst_sram_free); 621 622/* L1 Scratchpad memory allocate function */ 623void *l1sram_alloc(size_t size) 624{ 625 unsigned long flags; 626 void *addr; 627 unsigned int cpu; 628 629 cpu = smp_processor_id(); 630 /* add mutex operation */ 631 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); 632 633 addr = _sram_alloc(size, &per_cpu(free_l1_ssram_head, cpu), 634 &per_cpu(used_l1_ssram_head, cpu)); 635 636 /* add mutex operation */ 637 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); 638 639 return addr; 640} 641 642/* L1 Scratchpad memory allocate function */ 643void *l1sram_alloc_max(size_t *psize) 644{ 645 unsigned long flags; 646 void *addr; 647 unsigned int cpu; 648 649 cpu = smp_processor_id(); 650 /* add mutex operation */ 651 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); 652 653 addr = _sram_alloc_max(&per_cpu(free_l1_ssram_head, cpu), 654 &per_cpu(used_l1_ssram_head, cpu), psize); 655 656 /* add mutex operation */ 657 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); 658 659 return addr; 660} 661 662/* L1 Scratchpad memory free function */ 663int l1sram_free(const void *addr) 664{ 665 unsigned long flags; 666 int ret; 667 unsigned int cpu; 668 669 cpu = smp_processor_id(); 670 /* add mutex operation */ 671 spin_lock_irqsave(&per_cpu(l1sram_lock, cpu), flags); 672 673 ret = _sram_free(addr, &per_cpu(free_l1_ssram_head, cpu), 674 &per_cpu(used_l1_ssram_head, cpu)); 675 676 /* add mutex operation */ 677 spin_unlock_irqrestore(&per_cpu(l1sram_lock, cpu), flags); 678 679 return ret; 680} 681 682void *l2_sram_alloc(size_t size) 683{ 684#if L2_LENGTH != 0 685 unsigned long flags; 686 void *addr; 687 688 /* add mutex operation */ 689 spin_lock_irqsave(&l2_sram_lock, flags); 690 691 addr = _sram_alloc(size, &free_l2_sram_head, 692 &used_l2_sram_head); 693 694 /* add mutex operation */ 695 spin_unlock_irqrestore(&l2_sram_lock, flags); 696 697 pr_debug("Allocated address in l2_sram_alloc is 0x%lx+0x%lx\n", 698 (long unsigned int)addr, size); 699 700 return addr; 701#else 702 return NULL; 703#endif 704} 705EXPORT_SYMBOL(l2_sram_alloc); 706 707void *l2_sram_zalloc(size_t size) 708{ 709 void *addr = l2_sram_alloc(size); 710 711 if (addr) 712 memset(addr, 0x00, size); 713 714 return addr; 715} 716EXPORT_SYMBOL(l2_sram_zalloc); 717 718int l2_sram_free(const void *addr) 719{ 720#if L2_LENGTH != 0 721 unsigned long flags; 722 int ret; 723 724 /* add mutex operation */ 725 spin_lock_irqsave(&l2_sram_lock, flags); 726 727 ret = _sram_free(addr, &free_l2_sram_head, 728 &used_l2_sram_head); 729 730 /* add mutex operation */ 731 spin_unlock_irqrestore(&l2_sram_lock, flags); 732 733 return ret; 734#else 735 return -1; 736#endif 737} 738EXPORT_SYMBOL(l2_sram_free); 739 740int sram_free_with_lsl(const void *addr) 741{ 742 struct sram_list_struct *lsl, **tmp; 743 struct mm_struct *mm = current->mm; 744 int ret = -1; 745 746 for (tmp = &mm->context.sram_list; *tmp; tmp = &(*tmp)->next) 747 if ((*tmp)->addr == addr) { 748 lsl = *tmp; 749 ret = sram_free(addr); 750 *tmp = lsl->next; 751 kfree(lsl); 752 break; 753 } 754 755 return ret; 756} 757EXPORT_SYMBOL(sram_free_with_lsl); 758 759/* Allocate memory and keep in L1 SRAM List (lsl) so that the resources are 760 * tracked. These are designed for userspace so that when a process exits, 761 * we can safely reap their resources. 762 */ 763void *sram_alloc_with_lsl(size_t size, unsigned long flags) 764{ 765 void *addr = NULL; 766 struct sram_list_struct *lsl = NULL; 767 struct mm_struct *mm = current->mm; 768 769 lsl = kzalloc(sizeof(struct sram_list_struct), GFP_KERNEL); 770 if (!lsl) 771 return NULL; 772 773 if (flags & L1_INST_SRAM) 774 addr = l1_inst_sram_alloc(size); 775 776 if (addr == NULL && (flags & L1_DATA_A_SRAM)) 777 addr = l1_data_A_sram_alloc(size); 778 779 if (addr == NULL && (flags & L1_DATA_B_SRAM)) 780 addr = l1_data_B_sram_alloc(size); 781 782 if (addr == NULL && (flags & L2_SRAM)) 783 addr = l2_sram_alloc(size); 784 785 if (addr == NULL) { 786 kfree(lsl); 787 return NULL; 788 } 789 lsl->addr = addr; 790 lsl->length = size; 791 lsl->next = mm->context.sram_list; 792 mm->context.sram_list = lsl; 793 return addr; 794} 795EXPORT_SYMBOL(sram_alloc_with_lsl); 796 797#ifdef CONFIG_PROC_FS 798/* Once we get a real allocator, we'll throw all of this away. 799 * Until then, we need some sort of visibility into the L1 alloc. 800 */ 801/* Need to keep line of output the same. Currently, that is 44 bytes 802 * (including newline). 803 */ 804static int _sram_proc_show(struct seq_file *m, const char *desc, 805 struct sram_piece *pfree_head, 806 struct sram_piece *pused_head) 807{ 808 struct sram_piece *pslot; 809 810 if (!pfree_head || !pused_head) 811 return -1; 812 813 seq_printf(m, "--- SRAM %-14s Size PID State \n", desc); 814 815 /* search the relevant memory slot */ 816 pslot = pused_head->next; 817 818 while (pslot != NULL) { 819 seq_printf(m, "%p-%p %10i %5i %-10s\n", 820 pslot->paddr, pslot->paddr + pslot->size, 821 pslot->size, pslot->pid, "ALLOCATED"); 822 823 pslot = pslot->next; 824 } 825 826 pslot = pfree_head->next; 827 828 while (pslot != NULL) { 829 seq_printf(m, "%p-%p %10i %5i %-10s\n", 830 pslot->paddr, pslot->paddr + pslot->size, 831 pslot->size, pslot->pid, "FREE"); 832 833 pslot = pslot->next; 834 } 835 836 return 0; 837} 838static int sram_proc_show(struct seq_file *m, void *v) 839{ 840 unsigned int cpu; 841 842 for (cpu = 0; cpu < num_possible_cpus(); ++cpu) { 843 if (_sram_proc_show(m, "Scratchpad", 844 &per_cpu(free_l1_ssram_head, cpu), &per_cpu(used_l1_ssram_head, cpu))) 845 goto not_done; 846#if L1_DATA_A_LENGTH != 0 847 if (_sram_proc_show(m, "L1 Data A", 848 &per_cpu(free_l1_data_A_sram_head, cpu), 849 &per_cpu(used_l1_data_A_sram_head, cpu))) 850 goto not_done; 851#endif 852#if L1_DATA_B_LENGTH != 0 853 if (_sram_proc_show(m, "L1 Data B", 854 &per_cpu(free_l1_data_B_sram_head, cpu), 855 &per_cpu(used_l1_data_B_sram_head, cpu))) 856 goto not_done; 857#endif 858#if L1_CODE_LENGTH != 0 859 if (_sram_proc_show(m, "L1 Instruction", 860 &per_cpu(free_l1_inst_sram_head, cpu), 861 &per_cpu(used_l1_inst_sram_head, cpu))) 862 goto not_done; 863#endif 864 } 865#if L2_LENGTH != 0 866 if (_sram_proc_show(m, "L2", &free_l2_sram_head, &used_l2_sram_head)) 867 goto not_done; 868#endif 869 not_done: 870 return 0; 871} 872 873static int sram_proc_open(struct inode *inode, struct file *file) 874{ 875 return single_open(file, sram_proc_show, NULL); 876} 877 878static const struct file_operations sram_proc_ops = { 879 .open = sram_proc_open, 880 .read = seq_read, 881 .llseek = seq_lseek, 882 .release = single_release, 883}; 884 885static int __init sram_proc_init(void) 886{ 887 struct proc_dir_entry *ptr; 888 889 ptr = proc_create("sram", S_IRUGO, NULL, &sram_proc_ops); 890 if (!ptr) { 891 printk(KERN_WARNING "unable to create /proc/sram\n"); 892 return -1; 893 } 894 return 0; 895} 896late_initcall(sram_proc_init); 897#endif 898