Lines Matching refs:res
131 int res = seq_open(file, &resource_op); in ioports_open() local
132 if (!res) { in ioports_open()
136 return res; in ioports_open()
141 int res = seq_open(file, &resource_op); in iomem_open() local
142 if (!res) { in iomem_open()
146 return res; in iomem_open()
173 static void free_resource(struct resource *res) in free_resource() argument
175 if (!res) in free_resource()
178 if (!PageSlab(virt_to_head_page(res))) { in free_resource()
180 res->sibling = bootmem_resource_free; in free_resource()
181 bootmem_resource_free = res; in free_resource()
184 kfree(res); in free_resource()
190 struct resource *res = NULL; in alloc_resource() local
194 res = bootmem_resource_free; in alloc_resource()
195 bootmem_resource_free = res->sibling; in alloc_resource()
199 if (res) in alloc_resource()
200 memset(res, 0, sizeof(struct resource)); in alloc_resource()
202 res = kzalloc(sizeof(struct resource), flags); in alloc_resource()
204 return res; in alloc_resource()
342 static int find_next_iomem_res(struct resource *res, char *name, in find_next_iomem_res() argument
349 BUG_ON(!res); in find_next_iomem_res()
351 start = res->start; in find_next_iomem_res()
352 end = res->end; in find_next_iomem_res()
361 if (p->flags != res->flags) in find_next_iomem_res()
377 if (res->start < p->start) in find_next_iomem_res()
378 res->start = p->start; in find_next_iomem_res()
379 if (res->end > p->end) in find_next_iomem_res()
380 res->end = p->end; in find_next_iomem_res()
398 struct resource res; in walk_iomem_res() local
402 res.start = start; in walk_iomem_res()
403 res.end = end; in walk_iomem_res()
404 res.flags = flags; in walk_iomem_res()
405 orig_end = res.end; in walk_iomem_res()
406 while ((res.start < res.end) && in walk_iomem_res()
407 (!find_next_iomem_res(&res, name, false))) { in walk_iomem_res()
408 ret = (*func)(res.start, res.end, arg); in walk_iomem_res()
411 res.start = res.end + 1; in walk_iomem_res()
412 res.end = orig_end; in walk_iomem_res()
427 struct resource res; in walk_system_ram_res() local
431 res.start = start; in walk_system_ram_res()
432 res.end = end; in walk_system_ram_res()
433 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; in walk_system_ram_res()
434 orig_end = res.end; in walk_system_ram_res()
435 while ((res.start < res.end) && in walk_system_ram_res()
436 (!find_next_iomem_res(&res, "System RAM", true))) { in walk_system_ram_res()
437 ret = (*func)(res.start, res.end, arg); in walk_system_ram_res()
440 res.start = res.end + 1; in walk_system_ram_res()
441 res.end = orig_end; in walk_system_ram_res()
456 struct resource res; in walk_system_ram_range() local
461 res.start = (u64) start_pfn << PAGE_SHIFT; in walk_system_ram_range()
462 res.end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; in walk_system_ram_range()
463 res.flags = IORESOURCE_MEM | IORESOURCE_BUSY; in walk_system_ram_range()
464 orig_end = res.end; in walk_system_ram_range()
465 while ((res.start < res.end) && in walk_system_ram_range()
466 (find_next_iomem_res(&res, "System RAM", true) >= 0)) { in walk_system_ram_range()
467 pfn = (res.start + PAGE_SIZE - 1) >> PAGE_SHIFT; in walk_system_ram_range()
468 end_pfn = (res.end + 1) >> PAGE_SHIFT; in walk_system_ram_range()
473 res.start = res.end + 1; in walk_system_ram_range()
474 res.end = orig_end; in walk_system_ram_range()
554 static void resource_clip(struct resource *res, resource_size_t min, in resource_clip() argument
557 if (res->start < min) in resource_clip()
558 res->start = min; in resource_clip()
559 if (res->end > max) in resource_clip()
560 res->end = max; in resource_clip()
739 struct resource *res; in lookup_resource() local
742 for (res = root->child; res; res = res->sibling) { in lookup_resource()
743 if (res->start == start) in lookup_resource()
748 return res; in lookup_resource()
876 static int __adjust_resource(struct resource *res, resource_size_t start, in __adjust_resource() argument
879 struct resource *tmp, *parent = res->parent; in __adjust_resource()
889 if (res->sibling && (res->sibling->start <= end)) in __adjust_resource()
893 if (tmp != res) { in __adjust_resource()
894 while (tmp->sibling != res) in __adjust_resource()
901 for (tmp = res->child; tmp; tmp = tmp->sibling) in __adjust_resource()
905 res->start = start; in __adjust_resource()
906 res->end = end; in __adjust_resource()
923 int adjust_resource(struct resource *res, resource_size_t start, in adjust_resource() argument
929 result = __adjust_resource(res, start, size); in adjust_resource()
941 struct resource *res = alloc_resource(GFP_ATOMIC); in __reserve_region_with_split() local
944 if (!res) in __reserve_region_with_split()
947 res->name = name; in __reserve_region_with_split()
948 res->start = start; in __reserve_region_with_split()
949 res->end = end; in __reserve_region_with_split()
950 res->flags = IORESOURCE_BUSY; in __reserve_region_with_split()
954 conflict = __request_resource(parent, res); in __reserve_region_with_split()
958 res = next_res; in __reserve_region_with_split()
964 if (conflict->start <= res->start && in __reserve_region_with_split()
965 conflict->end >= res->end) { in __reserve_region_with_split()
966 free_resource(res); in __reserve_region_with_split()
972 if (conflict->start > res->start) { in __reserve_region_with_split()
973 end = res->end; in __reserve_region_with_split()
974 res->end = conflict->start - 1; in __reserve_region_with_split()
978 free_resource(res); in __reserve_region_with_split()
987 res->start = conflict->end + 1; in __reserve_region_with_split()
1028 resource_size_t resource_alignment(struct resource *res) in resource_alignment() argument
1030 switch (res->flags & (IORESOURCE_SIZEALIGN | IORESOURCE_STARTALIGN)) { in resource_alignment()
1032 return resource_size(res); in resource_alignment()
1034 return res->start; in resource_alignment()
1066 struct resource *res = alloc_resource(GFP_KERNEL); in __request_region() local
1068 if (!res) in __request_region()
1071 res->name = name; in __request_region()
1072 res->start = start; in __request_region()
1073 res->end = start + n - 1; in __request_region()
1074 res->flags = resource_type(parent); in __request_region()
1075 res->flags |= IORESOURCE_BUSY | flags; in __request_region()
1082 conflict = __request_resource(parent, res); in __request_region()
1101 free_resource(res); in __request_region()
1102 res = NULL; in __request_region()
1106 return res; in __request_region()
1130 struct resource *res = *p; in __release_region() local
1132 if (!res) in __release_region()
1134 if (res->start <= start && res->end >= end) { in __release_region()
1135 if (!(res->flags & IORESOURCE_BUSY)) { in __release_region()
1136 p = &res->child; in __release_region()
1139 if (res->start != start || res->end != end) in __release_region()
1141 *p = res->sibling; in __release_region()
1143 if (res->flags & IORESOURCE_MUXED) in __release_region()
1145 free_resource(res); in __release_region()
1148 p = &res->sibling; in __release_region()
1184 struct resource *res; in release_mem_region_adjustable() local
1199 while ((res = *p)) { in release_mem_region_adjustable()
1200 if (res->start >= end) in release_mem_region_adjustable()
1204 if (res->start > start || res->end < end) { in release_mem_region_adjustable()
1205 p = &res->sibling; in release_mem_region_adjustable()
1209 if (!(res->flags & IORESOURCE_MEM)) in release_mem_region_adjustable()
1212 if (!(res->flags & IORESOURCE_BUSY)) { in release_mem_region_adjustable()
1213 p = &res->child; in release_mem_region_adjustable()
1218 if (res->start == start && res->end == end) { in release_mem_region_adjustable()
1220 *p = res->sibling; in release_mem_region_adjustable()
1221 free_resource(res); in release_mem_region_adjustable()
1223 } else if (res->start == start && res->end != end) { in release_mem_region_adjustable()
1225 ret = __adjust_resource(res, end + 1, in release_mem_region_adjustable()
1226 res->end - end); in release_mem_region_adjustable()
1227 } else if (res->start != start && res->end == end) { in release_mem_region_adjustable()
1229 ret = __adjust_resource(res, res->start, in release_mem_region_adjustable()
1230 start - res->start); in release_mem_region_adjustable()
1237 new_res->name = res->name; in release_mem_region_adjustable()
1239 new_res->end = res->end; in release_mem_region_adjustable()
1240 new_res->flags = res->flags; in release_mem_region_adjustable()
1241 new_res->parent = res->parent; in release_mem_region_adjustable()
1242 new_res->sibling = res->sibling; in release_mem_region_adjustable()
1245 ret = __adjust_resource(res, res->start, in release_mem_region_adjustable()
1246 start - res->start); in release_mem_region_adjustable()
1249 res->sibling = new_res; in release_mem_region_adjustable()
1314 static int devm_resource_match(struct device *dev, void *res, void *data) in devm_resource_match() argument
1316 struct resource **ptr = res; in devm_resource_match()
1341 static void devm_region_release(struct device *dev, void *res) in devm_region_release() argument
1343 struct region_devres *this = res; in devm_region_release()
1348 static int devm_region_match(struct device *dev, void *res, void *match_data) in devm_region_match() argument
1350 struct region_devres *this = res, *match = match_data; in devm_region_match()
1361 struct resource *res; in __devm_request_region() local
1372 res = __request_region(parent, start, n, name, 0); in __devm_request_region()
1373 if (res) in __devm_request_region()
1378 return res; in __devm_request_region()
1411 struct resource *res = reserve + x; in reserve_setup() local
1412 res->name = "reserved"; in reserve_setup()
1413 res->start = io_start; in reserve_setup()
1414 res->end = io_start + io_num - 1; in reserve_setup()
1415 res->flags = IORESOURCE_BUSY; in reserve_setup()
1416 res->child = NULL; in reserve_setup()
1417 if (request_resource(res->start >= 0x10000 ? &iomem_resource : &ioport_resource, res) == 0) in reserve_setup()
1513 struct resource_entry *resource_list_create_entry(struct resource *res, in resource_list_create_entry() argument
1521 entry->res = res ? res : &entry->__res; in resource_list_create_entry()