Lines Matching refs:cpu

51 		sysfs_cpus[num].cpu.hotpluggable = 1;  in arch_register_cpu()
54 return register_cpu(&sysfs_cpus[num].cpu, num); in arch_register_cpu()
60 unregister_cpu(&sysfs_cpus[num].cpu); in arch_unregister_cpu()
69 return register_cpu(&sysfs_cpus[num].cpu, num); in arch_register_cpu()
142 static void cache_shared_cpu_map_setup(unsigned int cpu, in cache_shared_cpu_map_setup() argument
149 if (cpu_data(cpu)->threads_per_core <= 1 && in cache_shared_cpu_map_setup()
150 cpu_data(cpu)->cores_per_socket <= 1) { in cache_shared_cpu_map_setup()
151 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); in cache_shared_cpu_map_setup()
164 if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id in cache_shared_cpu_map_setup()
177 static void cache_shared_cpu_map_setup(unsigned int cpu, in cache_shared_cpu_map_setup() argument
180 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); in cache_shared_cpu_map_setup()
299 static void cpu_cache_sysfs_exit(unsigned int cpu) in cpu_cache_sysfs_exit() argument
301 kfree(all_cpu_cache_info[cpu].cache_leaves); in cpu_cache_sysfs_exit()
302 all_cpu_cache_info[cpu].cache_leaves = NULL; in cpu_cache_sysfs_exit()
303 all_cpu_cache_info[cpu].num_cache_leaves = 0; in cpu_cache_sysfs_exit()
304 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); in cpu_cache_sysfs_exit()
308 static int cpu_cache_sysfs_init(unsigned int cpu) in cpu_cache_sysfs_init() argument
337 cache_shared_cpu_map_setup(cpu, in cpu_cache_sysfs_init()
343 all_cpu_cache_info[cpu].cache_leaves = this_cache; in cpu_cache_sysfs_init()
344 all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves; in cpu_cache_sysfs_init()
346 memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject)); in cpu_cache_sysfs_init()
354 unsigned int cpu = sys_dev->id; in cache_add_dev() local
360 if (all_cpu_cache_info[cpu].kobj.parent) in cache_add_dev()
364 retval = set_cpus_allowed_ptr(current, cpumask_of(cpu)); in cache_add_dev()
368 retval = cpu_cache_sysfs_init(cpu); in cache_add_dev()
373 retval = kobject_init_and_add(&all_cpu_cache_info[cpu].kobj, in cache_add_dev()
377 cpu_cache_sysfs_exit(cpu); in cache_add_dev()
381 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) { in cache_add_dev()
382 this_object = LEAF_KOBJECT_PTR(cpu,i); in cache_add_dev()
385 &all_cpu_cache_info[cpu].kobj, in cache_add_dev()
389 kobject_put(&(LEAF_KOBJECT_PTR(cpu,j)->kobj)); in cache_add_dev()
391 kobject_put(&all_cpu_cache_info[cpu].kobj); in cache_add_dev()
392 cpu_cache_sysfs_exit(cpu); in cache_add_dev()
397 kobject_uevent(&all_cpu_cache_info[cpu].kobj, KOBJ_ADD); in cache_add_dev()
404 unsigned int cpu = sys_dev->id; in cache_remove_dev() local
407 for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) in cache_remove_dev()
408 kobject_put(&(LEAF_KOBJECT_PTR(cpu,i)->kobj)); in cache_remove_dev()
410 if (all_cpu_cache_info[cpu].kobj.parent) { in cache_remove_dev()
411 kobject_put(&all_cpu_cache_info[cpu].kobj); in cache_remove_dev()
412 memset(&all_cpu_cache_info[cpu].kobj, in cache_remove_dev()
417 cpu_cache_sysfs_exit(cpu); in cache_remove_dev()
429 unsigned int cpu = (unsigned long)hcpu; in cache_cpu_callback() local
432 sys_dev = get_cpu_device(cpu); in cache_cpu_callback()