1/* 2 * Copyright 2010 Tilera Corporation. All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation, version 2. 7 * 8 * This program is distributed in the hope that it will be useful, but 9 * WITHOUT ANY WARRANTY; without even the implied warranty of 10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or 11 * NON INFRINGEMENT. See the GNU General Public License for 12 * more details. 13 * 14 * Based on i386 version, copyright (C) 2001 Rusty Russell. 15 */ 16 17#include <linux/moduleloader.h> 18#include <linux/elf.h> 19#include <linux/vmalloc.h> 20#include <linux/fs.h> 21#include <linux/string.h> 22#include <linux/kernel.h> 23#include <asm/pgtable.h> 24#include <asm/homecache.h> 25#include <arch/opcode.h> 26 27#ifdef MODULE_DEBUG 28#define DEBUGP printk 29#else 30#define DEBUGP(fmt...) 31#endif 32 33/* 34 * Allocate some address space in the range MEM_MODULE_START to 35 * MEM_MODULE_END and populate it with memory. 36 */ 37void *module_alloc(unsigned long size) 38{ 39 struct page **pages; 40 pgprot_t prot_rwx = __pgprot(_PAGE_KERNEL | _PAGE_KERNEL_EXEC); 41 struct vm_struct *area; 42 int i = 0; 43 int npages; 44 45 npages = (size + PAGE_SIZE - 1) / PAGE_SIZE; 46 pages = kmalloc(npages * sizeof(struct page *), GFP_KERNEL); 47 if (pages == NULL) 48 return NULL; 49 for (; i < npages; ++i) { 50 pages[i] = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); 51 if (!pages[i]) 52 goto error; 53 } 54 55 area = __get_vm_area(size, VM_ALLOC, MEM_MODULE_START, MEM_MODULE_END); 56 if (!area) 57 goto error; 58 area->nr_pages = npages; 59 area->pages = pages; 60 61 if (map_vm_area(area, prot_rwx, pages)) { 62 vunmap(area->addr); 63 goto error; 64 } 65 66 return area->addr; 67 68error: 69 while (--i >= 0) 70 __free_page(pages[i]); 71 kfree(pages); 72 return NULL; 73} 74 75 76/* Free memory returned from module_alloc */ 77void module_memfree(void *module_region) 78{ 79 vfree(module_region); 80 81 /* Globally flush the L1 icache. */ 82 flush_remote(0, HV_FLUSH_EVICT_L1I, cpu_online_mask, 83 0, 0, 0, NULL, NULL, 0); 84 85 /* 86 * FIXME: Add module_arch_freeing_init to trim exception 87 * table entries. 88 */ 89} 90 91#ifdef __tilegx__ 92/* 93 * Validate that the high 16 bits of "value" is just the sign-extension of 94 * the low 48 bits. 95 */ 96static int validate_hw2_last(long value, struct module *me) 97{ 98 if (((value << 16) >> 16) != value) { 99 pr_warn("module %s: Out of range HW2_LAST value %#lx\n", 100 me->name, value); 101 return 0; 102 } 103 return 1; 104} 105 106/* 107 * Validate that "value" isn't too big to hold in a JumpOff relocation. 108 */ 109static int validate_jumpoff(long value) 110{ 111 /* Determine size of jump offset. */ 112 int shift = __builtin_clzl(get_JumpOff_X1(create_JumpOff_X1(-1))); 113 114 /* Check to see if it fits into the relocation slot. */ 115 long f = get_JumpOff_X1(create_JumpOff_X1(value)); 116 f = (f << shift) >> shift; 117 118 return f == value; 119} 120#endif 121 122int apply_relocate_add(Elf_Shdr *sechdrs, 123 const char *strtab, 124 unsigned int symindex, 125 unsigned int relsec, 126 struct module *me) 127{ 128 unsigned int i; 129 Elf_Rela *rel = (void *)sechdrs[relsec].sh_addr; 130 Elf_Sym *sym; 131 u64 *location; 132 unsigned long value; 133 134 DEBUGP("Applying relocate section %u to %u\n", relsec, 135 sechdrs[relsec].sh_info); 136 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { 137 /* This is where to make the change */ 138 location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr 139 + rel[i].r_offset; 140 /* 141 * This is the symbol it is referring to. 142 * Note that all undefined symbols have been resolved. 143 */ 144 sym = (Elf_Sym *)sechdrs[symindex].sh_addr 145 + ELF_R_SYM(rel[i].r_info); 146 value = sym->st_value + rel[i].r_addend; 147 148 switch (ELF_R_TYPE(rel[i].r_info)) { 149 150#ifdef __LITTLE_ENDIAN 151# define MUNGE(func) \ 152 (*location = ((*location & ~func(-1)) | func(value))) 153#else 154/* 155 * Instructions are always little-endian, so when we read them as data, 156 * we have to swap them around before and after modifying them. 157 */ 158# define MUNGE(func) \ 159 (*location = swab64((swab64(*location) & ~func(-1)) | func(value))) 160#endif 161 162#ifndef __tilegx__ 163 case R_TILE_32: 164 *(uint32_t *)location = value; 165 break; 166 case R_TILE_IMM16_X0_HA: 167 value = (value + 0x8000) >> 16; 168 /*FALLTHROUGH*/ 169 case R_TILE_IMM16_X0_LO: 170 MUNGE(create_Imm16_X0); 171 break; 172 case R_TILE_IMM16_X1_HA: 173 value = (value + 0x8000) >> 16; 174 /*FALLTHROUGH*/ 175 case R_TILE_IMM16_X1_LO: 176 MUNGE(create_Imm16_X1); 177 break; 178 case R_TILE_JOFFLONG_X1: 179 value -= (unsigned long) location; /* pc-relative */ 180 value = (long) value >> 3; /* count by instrs */ 181 MUNGE(create_JOffLong_X1); 182 break; 183#else 184 case R_TILEGX_64: 185 *location = value; 186 break; 187 case R_TILEGX_IMM16_X0_HW2_LAST: 188 if (!validate_hw2_last(value, me)) 189 return -ENOEXEC; 190 value >>= 16; 191 /*FALLTHROUGH*/ 192 case R_TILEGX_IMM16_X0_HW1: 193 value >>= 16; 194 /*FALLTHROUGH*/ 195 case R_TILEGX_IMM16_X0_HW0: 196 MUNGE(create_Imm16_X0); 197 break; 198 case R_TILEGX_IMM16_X1_HW2_LAST: 199 if (!validate_hw2_last(value, me)) 200 return -ENOEXEC; 201 value >>= 16; 202 /*FALLTHROUGH*/ 203 case R_TILEGX_IMM16_X1_HW1: 204 value >>= 16; 205 /*FALLTHROUGH*/ 206 case R_TILEGX_IMM16_X1_HW0: 207 MUNGE(create_Imm16_X1); 208 break; 209 case R_TILEGX_JUMPOFF_X1: 210 value -= (unsigned long) location; /* pc-relative */ 211 value = (long) value >> 3; /* count by instrs */ 212 if (!validate_jumpoff(value)) { 213 pr_warn("module %s: Out of range jump to %#llx at %#llx (%p)\n", 214 me->name, 215 sym->st_value + rel[i].r_addend, 216 rel[i].r_offset, location); 217 return -ENOEXEC; 218 } 219 MUNGE(create_JumpOff_X1); 220 break; 221#endif 222 223#undef MUNGE 224 225 default: 226 pr_err("module %s: Unknown relocation: %d\n", 227 me->name, (int) ELF_R_TYPE(rel[i].r_info)); 228 return -ENOEXEC; 229 } 230 } 231 return 0; 232} 233