/linux-4.1.27/security/tomoyo/ |
H A D | Makefile | 3 targets += builtin-policy.h 6 $(objtree)/scripts/basic/bin2c <$(firstword $(wildcard $(obj)/policy/$(1).conf $(srctree)/$(src)/policy/$(1).conf.default) /dev/null); \ 12 $(obj)/builtin-policy.h: $(wildcard $(obj)/policy/*.conf $(src)/policy/*.conf.default) FORCE 13 $(call if_changed,policy) 15 $(obj)/common.o: $(obj)/builtin-policy.h
|
H A D | load_policy.c | 12 * Path to the policy loader. (default = CONFIG_SECURITY_TOMOYO_POLICY_LOADER) 17 * tomoyo_loader_setup - Set policy loader. 19 * @str: Program to use as a policy loader (e.g. /sbin/tomoyo-init ). 71 * tomoyo_load_policy - Run external policy loader to load policy. 78 * /sbin/tomoyo-init reads policy files in /etc/tomoyo/ directory and 98 printk(KERN_INFO "Calling %s to load policy. Please wait.\n", tomoyo_load_policy()
|
H A D | common.c | 171 /* Permit policy management by non-root user? */ 866 * tomoyo_write_manager - Write manager policy. 886 * tomoyo_read_manager - Read manager policy. 911 * tomoyo_manager - Check whether the current process is a policy manager. 913 * Returns true if the current process is permitted to modify policy 1089 * tomoyo_write_domain2 - Write domain policy. 1138 * tomoyo_write_domain - Write domain policy. 1549 * tomoyo_read_domain2 - Read domain policy. 1572 * tomoyo_read_domain - Read domain policy. 1695 * tomoyo_write_exception - Write exception policy. 1849 * tomoyo_read_exception - Read exception policy. 1991 * violated the policy in enforcing mode, TOMOYO_RETRY_REQUEST if the 1992 * supervisor decided to retry the access request which violated the policy in 2120 * Waits for access requests which violated policy in enforcing mode. 2133 * tomoyo_read_query - Read access requests which violated policy in enforcing mode. 2251 [TOMOYO_MEMORY_POLICY] = "policy:", 2264 * @index: Index for policy type. 2525 /* Call the policy handler. */ tomoyo_read_control() 2538 * tomoyo_parse_policy - Parse a policy line. 2598 /* Read a line and dispatch it to the policy handler. */ tomoyo_write_control() 2719 "policy must be initialized.\n"); tomoyo_check_profile() 2729 * tomoyo_load_builtin_policy - Load built-in policy. 2736 * This include file is manually created and contains built-in policy tomoyo_load_builtin_policy() 2741 #include "builtin-policy.h" tomoyo_load_builtin_policy()
|
H A D | memory.c | 30 /* Memoy currently used by policy/audit log/query. */ 32 /* Memory quota for "policy"/"audit log"/"query". */
|
/linux-4.1.27/drivers/cpufreq/ |
H A D | cpufreq_userspace.c | 26 * @policy: pointer to policy struct where freq is being set 31 static int cpufreq_set(struct cpufreq_policy *policy, unsigned int freq) cpufreq_set() argument 35 pr_debug("cpufreq_set for cpu %u, freq %u kHz\n", policy->cpu, freq); cpufreq_set() 38 if (!per_cpu(cpu_is_managed, policy->cpu)) cpufreq_set() 41 ret = __cpufreq_driver_target(policy, freq, CPUFREQ_RELATION_L); cpufreq_set() 47 static ssize_t show_speed(struct cpufreq_policy *policy, char *buf) show_speed() argument 49 return sprintf(buf, "%u\n", policy->cur); show_speed() 52 static int cpufreq_governor_userspace(struct cpufreq_policy *policy, cpufreq_governor_userspace() argument 55 unsigned int cpu = policy->cpu; cpufreq_governor_userspace() 60 BUG_ON(!policy->cur); cpufreq_governor_userspace() 77 cpu, policy->min, policy->max, cpufreq_governor_userspace() 78 policy->cur); cpufreq_governor_userspace() 80 if (policy->max < policy->cur) cpufreq_governor_userspace() 81 __cpufreq_driver_target(policy, policy->max, cpufreq_governor_userspace() 83 else if (policy->min > policy->cur) cpufreq_governor_userspace() 84 __cpufreq_driver_target(policy, policy->min, cpufreq_governor_userspace() 115 MODULE_DESCRIPTION("CPUfreq policy governor 'userspace'");
|
H A D | unicore2-cpufreq.c | 28 static int ucv2_verify_speed(struct cpufreq_policy *policy) ucv2_verify_speed() argument 30 if (policy->cpu) ucv2_verify_speed() 33 cpufreq_verify_within_cpu_limits(policy); ucv2_verify_speed() 37 static int ucv2_target(struct cpufreq_policy *policy, ucv2_target() argument 44 freqs.old = policy->cur; ucv2_target() 47 cpufreq_freq_transition_begin(policy, &freqs); ucv2_target() 48 ret = clk_set_rate(policy->clk, target_freq * 1000); ucv2_target() 49 cpufreq_freq_transition_end(policy, &freqs, ret); ucv2_target() 54 static int __init ucv2_cpu_init(struct cpufreq_policy *policy) ucv2_cpu_init() argument 56 if (policy->cpu != 0) ucv2_cpu_init() 59 policy->min = policy->cpuinfo.min_freq = 250000; ucv2_cpu_init() 60 policy->max = policy->cpuinfo.max_freq = 1000000; ucv2_cpu_init() 61 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; ucv2_cpu_init() 62 policy->clk = clk_get(NULL, "MAIN_CLK"); ucv2_cpu_init() 63 return PTR_ERR_OR_ZERO(policy->clk); ucv2_cpu_init()
|
H A D | cpufreq.c | 74 static int __cpufreq_governor(struct cpufreq_policy *policy, 76 static unsigned int __cpufreq_get(struct cpufreq_policy *policy); 80 * Two notifier lists: the "policy" list is involved in the 81 * validation process for a new CPU frequency policy; the 115 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy) get_governor_parent_kobj() argument 118 return &policy->kobj; get_governor_parent_kobj() 164 * - policy->cpus with all possible CPUs 166 int cpufreq_generic_init(struct cpufreq_policy *policy, cpufreq_generic_init() argument 172 ret = cpufreq_table_validate_and_show(policy, table); cpufreq_generic_init() 178 policy->cpuinfo.transition_latency = transition_latency; cpufreq_generic_init() 184 cpumask_setall(policy->cpus); cpufreq_generic_init() 192 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu); cpufreq_generic_get() local 194 if (!policy || IS_ERR(policy->clk)) { cpufreq_generic_get() 196 __func__, policy ? "clk" : "policy", cpu); cpufreq_generic_get() 200 return clk_get_rate(policy->clk) / 1000; cpufreq_generic_get() 212 struct cpufreq_policy *policy = NULL; cpufreq_cpu_get() local 226 policy = per_cpu(cpufreq_cpu_data, cpu); cpufreq_cpu_get() 227 if (policy) cpufreq_cpu_get() 228 kobject_get(&policy->kobj); cpufreq_cpu_get() 233 if (!policy) cpufreq_cpu_get() 236 return policy; cpufreq_cpu_get() 240 void cpufreq_cpu_put(struct cpufreq_policy *policy) cpufreq_cpu_put() argument 242 kobject_put(&policy->kobj); cpufreq_cpu_put() 283 static void __cpufreq_notify_transition(struct cpufreq_policy *policy, __cpufreq_notify_transition() argument 303 if ((policy) && (policy->cpu == freqs->cpu) && __cpufreq_notify_transition() 304 (policy->cur) && (policy->cur != freqs->old)) { __cpufreq_notify_transition() 306 freqs->old, policy->cur); __cpufreq_notify_transition() 307 freqs->old = policy->cur; __cpufreq_notify_transition() 322 if (likely(policy) && likely(policy->cpu == freqs->cpu)) __cpufreq_notify_transition() 323 policy->cur = freqs->new; __cpufreq_notify_transition() 336 static void cpufreq_notify_transition(struct cpufreq_policy *policy, cpufreq_notify_transition() argument 339 for_each_cpu(freqs->cpu, policy->cpus) cpufreq_notify_transition() 340 __cpufreq_notify_transition(policy, freqs, state); cpufreq_notify_transition() 344 static void cpufreq_notify_post_transition(struct cpufreq_policy *policy, cpufreq_notify_post_transition() argument 347 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); cpufreq_notify_post_transition() 352 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); cpufreq_notify_post_transition() 353 cpufreq_notify_transition(policy, freqs, CPUFREQ_POSTCHANGE); cpufreq_notify_post_transition() 356 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, cpufreq_freq_transition_begin() argument 369 && current == policy->transition_task); cpufreq_freq_transition_begin() 372 wait_event(policy->transition_wait, !policy->transition_ongoing); cpufreq_freq_transition_begin() 374 spin_lock(&policy->transition_lock); cpufreq_freq_transition_begin() 376 if (unlikely(policy->transition_ongoing)) { cpufreq_freq_transition_begin() 377 spin_unlock(&policy->transition_lock); cpufreq_freq_transition_begin() 381 policy->transition_ongoing = true; cpufreq_freq_transition_begin() 382 policy->transition_task = current; cpufreq_freq_transition_begin() 384 spin_unlock(&policy->transition_lock); cpufreq_freq_transition_begin() 386 cpufreq_notify_transition(policy, freqs, CPUFREQ_PRECHANGE); cpufreq_freq_transition_begin() 390 void cpufreq_freq_transition_end(struct cpufreq_policy *policy, cpufreq_freq_transition_end() argument 393 if (unlikely(WARN_ON(!policy->transition_ongoing))) cpufreq_freq_transition_end() 396 cpufreq_notify_post_transition(policy, freqs, transition_failed); cpufreq_freq_transition_end() 398 policy->transition_ongoing = false; cpufreq_freq_transition_end() 399 policy->transition_task = NULL; cpufreq_freq_transition_end() 401 wake_up(&policy->transition_wait); cpufreq_freq_transition_end() 451 static int cpufreq_parse_governor(char *str_governor, unsigned int *policy, cpufreq_parse_governor() argument 461 *policy = CPUFREQ_POLICY_PERFORMANCE; cpufreq_parse_governor() 465 *policy = CPUFREQ_POLICY_POWERSAVE; cpufreq_parse_governor() 501 * Write out information from cpufreq_driver->policy[cpu]; object must be 507 (struct cpufreq_policy *policy, char *buf) \ 509 return sprintf(buf, "%u\n", policy->object); \ 518 static ssize_t show_scaling_cur_freq(struct cpufreq_policy *policy, char *buf) show_scaling_cur_freq() argument 523 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu)); show_scaling_cur_freq() 525 ret = sprintf(buf, "%u\n", policy->cur); show_scaling_cur_freq() 529 static int cpufreq_set_policy(struct cpufreq_policy *policy, 537 (struct cpufreq_policy *policy, const char *buf, size_t count) \ 542 ret = cpufreq_get_policy(&new_policy, policy->cpu); \ 551 ret = cpufreq_set_policy(policy, &new_policy); \ 553 policy->user_policy.object = temp; \ 564 static ssize_t show_cpuinfo_cur_freq(struct cpufreq_policy *policy, show_cpuinfo_cur_freq() argument 567 unsigned int cur_freq = __cpufreq_get(policy); show_cpuinfo_cur_freq() 574 * show_scaling_governor - show the current policy for the specified CPU 576 static ssize_t show_scaling_governor(struct cpufreq_policy *policy, char *buf) show_scaling_governor() argument 578 if (policy->policy == CPUFREQ_POLICY_POWERSAVE) show_scaling_governor() 580 else if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) show_scaling_governor() 582 else if (policy->governor) show_scaling_governor() 584 policy->governor->name); show_scaling_governor() 589 * store_scaling_governor - store policy for the specified CPU 591 static ssize_t store_scaling_governor(struct cpufreq_policy *policy, store_scaling_governor() argument 598 ret = cpufreq_get_policy(&new_policy, policy->cpu); store_scaling_governor() 606 if (cpufreq_parse_governor(str_governor, &new_policy.policy, store_scaling_governor() 610 ret = cpufreq_set_policy(policy, &new_policy); store_scaling_governor() 612 policy->user_policy.policy = policy->policy; store_scaling_governor() 613 policy->user_policy.governor = policy->governor; store_scaling_governor() 624 static ssize_t show_scaling_driver(struct cpufreq_policy *policy, char *buf) show_scaling_driver() argument 632 static ssize_t show_scaling_available_governors(struct cpufreq_policy *policy, show_scaling_available_governors() argument 675 static ssize_t show_related_cpus(struct cpufreq_policy *policy, char *buf) show_related_cpus() argument 677 return cpufreq_show_cpus(policy->related_cpus, buf); show_related_cpus() 683 static ssize_t show_affected_cpus(struct cpufreq_policy *policy, char *buf) show_affected_cpus() argument 685 return cpufreq_show_cpus(policy->cpus, buf); show_affected_cpus() 688 static ssize_t store_scaling_setspeed(struct cpufreq_policy *policy, store_scaling_setspeed() argument 694 if (!policy->governor || !policy->governor->store_setspeed) store_scaling_setspeed() 701 policy->governor->store_setspeed(policy, freq); store_scaling_setspeed() 706 static ssize_t show_scaling_setspeed(struct cpufreq_policy *policy, char *buf) show_scaling_setspeed() argument 708 if (!policy->governor || !policy->governor->show_setspeed) show_scaling_setspeed() 711 return policy->governor->show_setspeed(policy, buf); show_scaling_setspeed() 717 static ssize_t show_bios_limit(struct cpufreq_policy *policy, char *buf) show_bios_limit() argument 722 ret = cpufreq_driver->bios_limit(policy->cpu, &limit); show_bios_limit() 726 return sprintf(buf, "%u\n", policy->cpuinfo.max_freq); show_bios_limit() 764 struct cpufreq_policy *policy = to_policy(kobj); show() local 771 down_read(&policy->rwsem); show() 774 ret = fattr->show(policy, buf); show() 778 up_read(&policy->rwsem); show() 787 struct cpufreq_policy *policy = to_policy(kobj); store() local 793 if (!cpu_online(policy->cpu)) store() 799 down_write(&policy->rwsem); store() 802 ret = fattr->store(policy, buf, count); store() 806 up_write(&policy->rwsem); store() 817 struct cpufreq_policy *policy = to_policy(kobj); cpufreq_sysfs_release() local 819 complete(&policy->kobj_unregister); cpufreq_sysfs_release() 877 static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy) cpufreq_add_dev_symlink() argument 882 for_each_cpu(j, policy->cpus) { cpufreq_add_dev_symlink() 885 if (j == policy->cpu) cpufreq_add_dev_symlink() 890 ret = sysfs_create_link(&cpu_dev->kobj, &policy->kobj, cpufreq_add_dev_symlink() 898 static int cpufreq_add_dev_interface(struct cpufreq_policy *policy, cpufreq_add_dev_interface() argument 907 ret = sysfs_create_file(&policy->kobj, &((*drv_attr)->attr)); cpufreq_add_dev_interface() 913 ret = sysfs_create_file(&policy->kobj, &cpuinfo_cur_freq.attr); cpufreq_add_dev_interface() 918 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); cpufreq_add_dev_interface() 923 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); cpufreq_add_dev_interface() 928 return cpufreq_add_dev_symlink(policy); cpufreq_add_dev_interface() 931 static void cpufreq_init_policy(struct cpufreq_policy *policy) cpufreq_init_policy() argument 937 memcpy(&new_policy, policy, sizeof(*policy)); cpufreq_init_policy() 940 gov = find_governor(per_cpu(cpufreq_cpu_governor, policy->cpu)); cpufreq_init_policy() 943 policy->governor->name, policy->cpu); cpufreq_init_policy() 949 /* Use the default policy if its valid. */ cpufreq_init_policy() 951 cpufreq_parse_governor(gov->name, &new_policy.policy, NULL); cpufreq_init_policy() 953 /* set default policy */ cpufreq_init_policy() 954 ret = cpufreq_set_policy(policy, &new_policy); cpufreq_init_policy() 956 pr_debug("setting policy failed\n"); cpufreq_init_policy() 958 cpufreq_driver->exit(policy); cpufreq_init_policy() 962 static int cpufreq_add_policy_cpu(struct cpufreq_policy *policy, cpufreq_add_policy_cpu() argument 969 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); cpufreq_add_policy_cpu() 976 down_write(&policy->rwsem); cpufreq_add_policy_cpu() 980 cpumask_set_cpu(cpu, policy->cpus); cpufreq_add_policy_cpu() 981 per_cpu(cpufreq_cpu_data, cpu) = policy; cpufreq_add_policy_cpu() 984 up_write(&policy->rwsem); cpufreq_add_policy_cpu() 987 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); cpufreq_add_policy_cpu() 989 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); cpufreq_add_policy_cpu() 997 return sysfs_create_link(&dev->kobj, &policy->kobj, "cpufreq"); cpufreq_add_policy_cpu() 1002 struct cpufreq_policy *policy; cpufreq_policy_restore() local 1007 policy = per_cpu(cpufreq_cpu_data_fallback, cpu); cpufreq_policy_restore() 1011 if (policy) cpufreq_policy_restore() 1012 policy->governor = NULL; cpufreq_policy_restore() 1014 return policy; cpufreq_policy_restore() 1019 struct cpufreq_policy *policy; cpufreq_policy_alloc() local 1021 policy = kzalloc(sizeof(*policy), GFP_KERNEL); cpufreq_policy_alloc() 1022 if (!policy) cpufreq_policy_alloc() 1025 if (!alloc_cpumask_var(&policy->cpus, GFP_KERNEL)) cpufreq_policy_alloc() 1028 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) cpufreq_policy_alloc() 1031 INIT_LIST_HEAD(&policy->policy_list); cpufreq_policy_alloc() 1032 init_rwsem(&policy->rwsem); cpufreq_policy_alloc() 1033 spin_lock_init(&policy->transition_lock); cpufreq_policy_alloc() 1034 init_waitqueue_head(&policy->transition_wait); cpufreq_policy_alloc() 1035 init_completion(&policy->kobj_unregister); cpufreq_policy_alloc() 1036 INIT_WORK(&policy->update, handle_update); cpufreq_policy_alloc() 1038 return policy; cpufreq_policy_alloc() 1041 free_cpumask_var(policy->cpus); cpufreq_policy_alloc() 1043 kfree(policy); cpufreq_policy_alloc() 1048 static void cpufreq_policy_put_kobj(struct cpufreq_policy *policy) cpufreq_policy_put_kobj() argument 1054 CPUFREQ_REMOVE_POLICY, policy); cpufreq_policy_put_kobj() 1056 down_read(&policy->rwsem); cpufreq_policy_put_kobj() 1057 kobj = &policy->kobj; cpufreq_policy_put_kobj() 1058 cmp = &policy->kobj_unregister; cpufreq_policy_put_kobj() 1059 up_read(&policy->rwsem); cpufreq_policy_put_kobj() 1072 static void cpufreq_policy_free(struct cpufreq_policy *policy) cpufreq_policy_free() argument 1074 free_cpumask_var(policy->related_cpus); cpufreq_policy_free() 1075 free_cpumask_var(policy->cpus); cpufreq_policy_free() 1076 kfree(policy); cpufreq_policy_free() 1079 static int update_policy_cpu(struct cpufreq_policy *policy, unsigned int cpu, update_policy_cpu() argument 1084 if (WARN_ON(cpu == policy->cpu)) update_policy_cpu() 1087 /* Move kobject to the new policy->cpu */ update_policy_cpu() 1088 ret = kobject_move(&policy->kobj, &cpu_dev->kobj); update_policy_cpu() 1094 down_write(&policy->rwsem); update_policy_cpu() 1095 policy->cpu = cpu; update_policy_cpu() 1096 up_write(&policy->rwsem); update_policy_cpu() 1105 struct cpufreq_policy *policy; __cpufreq_add_dev() local 1116 policy = cpufreq_cpu_get_raw(cpu); __cpufreq_add_dev() 1117 if (unlikely(policy)) __cpufreq_add_dev() 1125 for_each_policy(policy) { for_each_policy() 1126 if (cpumask_test_cpu(cpu, policy->related_cpus)) { for_each_policy() 1128 ret = cpufreq_add_policy_cpu(policy, cpu, dev); for_each_policy() 1136 * Restore the saved policy when doing light-weight init and fall back 1139 policy = recover_policy ? cpufreq_policy_restore(cpu) : NULL; 1140 if (!policy) { 1142 policy = cpufreq_policy_alloc(); 1143 if (!policy) 1148 * In the resume path, since we restore a saved policy, the assignment 1149 * to policy->cpu is like an update of the existing policy, rather than 1153 if (recover_policy && cpu != policy->cpu) 1154 WARN_ON(update_policy_cpu(policy, cpu, dev)); 1156 policy->cpu = cpu; 1158 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1163 ret = cpufreq_driver->init(policy); 1169 down_write(&policy->rwsem); 1171 /* related cpus should atleast have policy->cpus */ 1172 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1178 cpumask_and(policy->cpus, policy->cpus, cpu_online_mask); 1181 policy->user_policy.min = policy->min; 1182 policy->user_policy.max = policy->max; 1185 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, 1188 pr_err("%s: failed to init policy->kobj: %d\n", 1195 for_each_cpu(j, policy->cpus) 1196 per_cpu(cpufreq_cpu_data, j) = policy; 1200 policy->cur = cpufreq_driver->get(policy->cpu); 1201 if (!policy->cur) { 1217 * for the next freq which is >= policy->cur ('cur' must be set by now, 1221 * We are passing target-freq as "policy->cur - 1" otherwise 1222 * __cpufreq_driver_target() would simply fail, as policy->cur will be 1228 ret = cpufreq_frequency_table_get_index(policy, policy->cur); 1232 __func__, policy->cpu, policy->cur); 1233 ret = __cpufreq_driver_target(policy, policy->cur - 1, 1243 __func__, policy->cpu, policy->cur); 1248 CPUFREQ_START, policy); 1251 ret = cpufreq_add_dev_interface(policy, dev); 1255 CPUFREQ_CREATE_POLICY, policy); 1259 list_add(&policy->policy_list, &cpufreq_policy_list); 1262 cpufreq_init_policy(policy); 1265 policy->user_policy.policy = policy->policy; 1266 policy->user_policy.governor = policy->governor; 1268 up_write(&policy->rwsem); 1270 kobject_uevent(&policy->kobj, KOBJ_ADD); 1274 /* Callback for handling stuff after policy is ready */ 1276 cpufreq_driver->ready(policy); 1285 for_each_cpu(j, policy->cpus) 1290 kobject_put(&policy->kobj); 1291 wait_for_completion(&policy->kobj_unregister); 1294 up_write(&policy->rwsem); 1297 cpufreq_driver->exit(policy); 1302 cpufreq_policy_put_kobj(policy); 1304 cpufreq_policy_free(policy); 1332 struct cpufreq_policy *policy; __cpufreq_remove_dev_prepare() local 1338 policy = per_cpu(cpufreq_cpu_data, cpu); __cpufreq_remove_dev_prepare() 1340 /* Save the policy somewhere when doing a light-weight tear-down */ __cpufreq_remove_dev_prepare() 1342 per_cpu(cpufreq_cpu_data_fallback, cpu) = policy; __cpufreq_remove_dev_prepare() 1346 if (!policy) { __cpufreq_remove_dev_prepare() 1352 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); __cpufreq_remove_dev_prepare() 1359 policy->governor->name, CPUFREQ_NAME_LEN); __cpufreq_remove_dev_prepare() 1362 down_read(&policy->rwsem); __cpufreq_remove_dev_prepare() 1363 cpus = cpumask_weight(policy->cpus); __cpufreq_remove_dev_prepare() 1364 up_read(&policy->rwsem); __cpufreq_remove_dev_prepare() 1366 if (cpu != policy->cpu) { __cpufreq_remove_dev_prepare() 1370 int new_cpu = cpumask_any_but(policy->cpus, cpu); __cpufreq_remove_dev_prepare() 1374 ret = update_policy_cpu(policy, new_cpu, cpu_dev); __cpufreq_remove_dev_prepare() 1376 if (sysfs_create_link(&cpu_dev->kobj, &policy->kobj, __cpufreq_remove_dev_prepare() 1384 pr_debug("%s: policy Kobject moved to cpu: %d from: %d\n", __cpufreq_remove_dev_prepare() 1387 cpufreq_driver->stop_cpu(policy); __cpufreq_remove_dev_prepare() 1399 struct cpufreq_policy *policy; __cpufreq_remove_dev_finish() local 1402 policy = per_cpu(cpufreq_cpu_data, cpu); __cpufreq_remove_dev_finish() 1406 if (!policy) { __cpufreq_remove_dev_finish() 1411 down_write(&policy->rwsem); __cpufreq_remove_dev_finish() 1412 cpus = cpumask_weight(policy->cpus); __cpufreq_remove_dev_finish() 1415 cpumask_clear_cpu(cpu, policy->cpus); __cpufreq_remove_dev_finish() 1416 up_write(&policy->rwsem); __cpufreq_remove_dev_finish() 1418 /* If cpu is last user of policy, free policy */ __cpufreq_remove_dev_finish() 1421 ret = __cpufreq_governor(policy, __cpufreq_remove_dev_finish() 1431 cpufreq_policy_put_kobj(policy); __cpufreq_remove_dev_finish() 1439 cpufreq_driver->exit(policy); __cpufreq_remove_dev_finish() 1441 /* Remove policy from list of active policies */ __cpufreq_remove_dev_finish() 1443 list_del(&policy->policy_list); __cpufreq_remove_dev_finish() 1447 cpufreq_policy_free(policy); __cpufreq_remove_dev_finish() 1449 ret = __cpufreq_governor(policy, CPUFREQ_GOV_START); __cpufreq_remove_dev_finish() 1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); __cpufreq_remove_dev_finish() 1485 struct cpufreq_policy *policy = handle_update() local 1487 unsigned int cpu = policy->cpu; handle_update() 1495 * @policy: policy managing CPUs 1501 static void cpufreq_out_of_sync(struct cpufreq_policy *policy, cpufreq_out_of_sync() argument 1507 policy->cur, new_freq); cpufreq_out_of_sync() 1509 freqs.old = policy->cur; cpufreq_out_of_sync() 1512 cpufreq_freq_transition_begin(policy, &freqs); cpufreq_out_of_sync() 1513 cpufreq_freq_transition_end(policy, &freqs, 0); cpufreq_out_of_sync() 1517 * cpufreq_quick_get - get the CPU frequency (in kHz) from policy->cur 1525 struct cpufreq_policy *policy; cpufreq_quick_get() local 1531 policy = cpufreq_cpu_get(cpu); cpufreq_quick_get() 1532 if (policy) { cpufreq_quick_get() 1533 ret_freq = policy->cur; cpufreq_quick_get() 1534 cpufreq_cpu_put(policy); cpufreq_quick_get() 1549 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); cpufreq_quick_get_max() local 1552 if (policy) { cpufreq_quick_get_max() 1553 ret_freq = policy->max; cpufreq_quick_get_max() 1554 cpufreq_cpu_put(policy); cpufreq_quick_get_max() 1561 static unsigned int __cpufreq_get(struct cpufreq_policy *policy) __cpufreq_get() argument 1568 ret_freq = cpufreq_driver->get(policy->cpu); __cpufreq_get() 1570 if (ret_freq && policy->cur && __cpufreq_get() 1574 if (unlikely(ret_freq != policy->cur)) { __cpufreq_get() 1575 cpufreq_out_of_sync(policy, ret_freq); __cpufreq_get() 1576 schedule_work(&policy->update); __cpufreq_get() 1591 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); cpufreq_get() local 1594 if (policy) { cpufreq_get() 1595 down_read(&policy->rwsem); cpufreq_get() 1596 ret_freq = __cpufreq_get(policy); cpufreq_get() 1597 up_read(&policy->rwsem); cpufreq_get() 1599 cpufreq_cpu_put(policy); cpufreq_get() 1617 int cpufreq_generic_suspend(struct cpufreq_policy *policy) cpufreq_generic_suspend() argument 1621 if (!policy->suspend_freq) { cpufreq_generic_suspend() 1627 policy->suspend_freq); cpufreq_generic_suspend() 1629 ret = __cpufreq_driver_target(policy, policy->suspend_freq, cpufreq_generic_suspend() 1633 __func__, policy->suspend_freq, ret); cpufreq_generic_suspend() 1649 struct cpufreq_policy *policy; cpufreq_suspend() local 1659 for_each_policy(policy) { for_each_policy() 1660 if (__cpufreq_governor(policy, CPUFREQ_GOV_STOP)) for_each_policy() 1661 pr_err("%s: Failed to stop governor for policy: %p\n", for_each_policy() 1662 __func__, policy); for_each_policy() 1664 && cpufreq_driver->suspend(policy)) for_each_policy() 1666 policy); for_each_policy() 1681 struct cpufreq_policy *policy; cpufreq_resume() local 1693 for_each_policy(policy) { for_each_policy() 1694 if (cpufreq_driver->resume && cpufreq_driver->resume(policy)) for_each_policy() 1696 policy); for_each_policy() 1697 else if (__cpufreq_governor(policy, CPUFREQ_GOV_START) for_each_policy() 1698 || __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS)) for_each_policy() 1699 pr_err("%s: Failed to start governor for policy: %p\n", for_each_policy() 1700 __func__, policy); for_each_policy() 1708 policy = cpufreq_cpu_get_raw(cpumask_first(cpu_online_mask)); 1709 if (WARN_ON(!policy)) 1712 schedule_work(&policy->update); 1757 * changes in cpufreq policy. 1828 static int __target_intermediate(struct cpufreq_policy *policy, __target_intermediate() argument 1833 freqs->new = cpufreq_driver->get_intermediate(policy, index); __target_intermediate() 1840 __func__, policy->cpu, freqs->old, freqs->new); __target_intermediate() 1842 cpufreq_freq_transition_begin(policy, freqs); __target_intermediate() 1843 ret = cpufreq_driver->target_intermediate(policy, index); __target_intermediate() 1844 cpufreq_freq_transition_end(policy, freqs, ret); __target_intermediate() 1853 static int __target_index(struct cpufreq_policy *policy, __target_index() argument 1856 struct cpufreq_freqs freqs = {.old = policy->cur, .flags = 0}; __target_index() 1865 retval = __target_intermediate(policy, &freqs, index); __target_index() 1877 __func__, policy->cpu, freqs.old, freqs.new); __target_index() 1879 cpufreq_freq_transition_begin(policy, &freqs); __target_index() 1882 retval = cpufreq_driver->target_index(policy, index); __target_index() 1888 cpufreq_freq_transition_end(policy, &freqs, retval); __target_index() 1898 freqs.new = policy->restore_freq; __target_index() 1899 cpufreq_freq_transition_begin(policy, &freqs); __target_index() 1900 cpufreq_freq_transition_end(policy, &freqs, 0); __target_index() 1907 int __cpufreq_driver_target(struct cpufreq_policy *policy, __cpufreq_driver_target() argument 1918 if (target_freq > policy->max) __cpufreq_driver_target() 1919 target_freq = policy->max; __cpufreq_driver_target() 1920 if (target_freq < policy->min) __cpufreq_driver_target() 1921 target_freq = policy->min; __cpufreq_driver_target() 1924 policy->cpu, target_freq, relation, old_target_freq); __cpufreq_driver_target() 1932 if (target_freq == policy->cur) __cpufreq_driver_target() 1936 policy->restore_freq = policy->cur; __cpufreq_driver_target() 1939 retval = cpufreq_driver->target(policy, target_freq, relation); __cpufreq_driver_target() 1944 freq_table = cpufreq_frequency_get_table(policy->cpu); __cpufreq_driver_target() 1950 retval = cpufreq_frequency_table_target(policy, freq_table, __cpufreq_driver_target() 1957 if (freq_table[index].frequency == policy->cur) { __cpufreq_driver_target() 1962 retval = __target_index(policy, freq_table, index); __cpufreq_driver_target() 1970 int cpufreq_driver_target(struct cpufreq_policy *policy, cpufreq_driver_target() argument 1976 down_write(&policy->rwsem); cpufreq_driver_target() 1978 ret = __cpufreq_driver_target(policy, target_freq, relation); cpufreq_driver_target() 1980 up_write(&policy->rwsem); cpufreq_driver_target() 1986 static int __cpufreq_governor(struct cpufreq_policy *policy, __cpufreq_governor() argument 2008 if (!policy->governor) __cpufreq_governor() 2011 if (policy->governor->max_transition_latency && __cpufreq_governor() 2012 policy->cpuinfo.transition_latency > __cpufreq_governor() 2013 policy->governor->max_transition_latency) { __cpufreq_governor() 2018 policy->governor->name, gov->name); __cpufreq_governor() 2019 policy->governor = gov; __cpufreq_governor() 2024 if (!try_module_get(policy->governor->owner)) __cpufreq_governor() 2028 policy->cpu, event); __cpufreq_governor() 2031 if ((policy->governor_enabled && event == CPUFREQ_GOV_START) __cpufreq_governor() 2032 || (!policy->governor_enabled __cpufreq_governor() 2039 policy->governor_enabled = false; __cpufreq_governor() 2041 policy->governor_enabled = true; __cpufreq_governor() 2045 ret = policy->governor->governor(policy, event); __cpufreq_governor() 2049 policy->governor->initialized++; __cpufreq_governor() 2051 policy->governor->initialized--; __cpufreq_governor() 2056 policy->governor_enabled = true; __cpufreq_governor() 2058 policy->governor_enabled = false; __cpufreq_governor() 2064 module_put(policy->governor->owner); __cpufreq_governor() 2124 * @policy: struct cpufreq_policy into which the current cpufreq_policy 2127 * Reads the current cpufreq policy. 2129 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu) cpufreq_get_policy() argument 2132 if (!policy) cpufreq_get_policy() 2139 memcpy(policy, cpu_policy, sizeof(*policy)); cpufreq_get_policy() 2147 * policy : current policy. 2148 * new_policy: policy to be set. 2150 static int cpufreq_set_policy(struct cpufreq_policy *policy, cpufreq_set_policy() argument 2156 pr_debug("setting new policy for CPU %u: %u - %u kHz\n", cpufreq_set_policy() 2159 memcpy(&new_policy->cpuinfo, &policy->cpuinfo, sizeof(policy->cpuinfo)); cpufreq_set_policy() 2161 if (new_policy->min > policy->max || new_policy->max < policy->min) cpufreq_set_policy() 2185 /* notification of the new policy */ cpufreq_set_policy() 2189 policy->min = new_policy->min; cpufreq_set_policy() 2190 policy->max = new_policy->max; cpufreq_set_policy() 2193 policy->min, policy->max); cpufreq_set_policy() 2196 policy->policy = new_policy->policy; cpufreq_set_policy() 2201 if (new_policy->governor == policy->governor) cpufreq_set_policy() 2207 old_gov = policy->governor; cpufreq_set_policy() 2210 __cpufreq_governor(policy, CPUFREQ_GOV_STOP); cpufreq_set_policy() 2211 up_write(&policy->rwsem); cpufreq_set_policy() 2212 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); cpufreq_set_policy() 2213 down_write(&policy->rwsem); cpufreq_set_policy() 2217 policy->governor = new_policy->governor; cpufreq_set_policy() 2218 if (!__cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT)) { cpufreq_set_policy() 2219 if (!__cpufreq_governor(policy, CPUFREQ_GOV_START)) cpufreq_set_policy() 2222 up_write(&policy->rwsem); cpufreq_set_policy() 2223 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); cpufreq_set_policy() 2224 down_write(&policy->rwsem); cpufreq_set_policy() 2228 pr_debug("starting governor %s failed\n", policy->governor->name); cpufreq_set_policy() 2230 policy->governor = old_gov; cpufreq_set_policy() 2231 __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_INIT); cpufreq_set_policy() 2232 __cpufreq_governor(policy, CPUFREQ_GOV_START); cpufreq_set_policy() 2239 return __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); cpufreq_set_policy() 2243 * cpufreq_update_policy - re-evaluate an existing cpufreq policy 2246 * Useful for policy notifiers which have different necessities 2251 struct cpufreq_policy *policy = cpufreq_cpu_get(cpu); cpufreq_update_policy() local 2255 if (!policy) cpufreq_update_policy() 2258 down_write(&policy->rwsem); cpufreq_update_policy() 2260 pr_debug("updating policy for CPU %u\n", cpu); cpufreq_update_policy() 2261 memcpy(&new_policy, policy, sizeof(*policy)); cpufreq_update_policy() 2262 new_policy.min = policy->user_policy.min; cpufreq_update_policy() 2263 new_policy.max = policy->user_policy.max; cpufreq_update_policy() 2264 new_policy.policy = policy->user_policy.policy; cpufreq_update_policy() 2265 new_policy.governor = policy->user_policy.governor; cpufreq_update_policy() 2278 if (!policy->cur) { cpufreq_update_policy() 2280 policy->cur = new_policy.cur; cpufreq_update_policy() 2282 if (policy->cur != new_policy.cur && has_target()) cpufreq_update_policy() 2283 cpufreq_out_of_sync(policy, new_policy.cur); cpufreq_update_policy() 2287 ret = cpufreq_set_policy(policy, &new_policy); cpufreq_update_policy() 2290 up_write(&policy->rwsem); cpufreq_update_policy() 2292 cpufreq_cpu_put(policy); cpufreq_update_policy() 2336 struct cpufreq_policy *policy; cpufreq_boost_set_sw() local 2339 for_each_policy(policy) { for_each_policy() 2340 freq_table = cpufreq_frequency_get_table(policy->cpu); for_each_policy() 2342 ret = cpufreq_frequency_table_cpuinfo(policy, for_each_policy() 2349 policy->user_policy.max = policy->max; for_each_policy() 2350 __cpufreq_governor(policy, CPUFREQ_GOV_LIMITS); for_each_policy()
|
H A D | sh-cpufreq.c | 41 static int sh_cpufreq_target(struct cpufreq_policy *policy, sh_cpufreq_target() argument 45 unsigned int cpu = policy->cpu; sh_cpufreq_target() 62 if (freq < (policy->min * 1000) || freq > (policy->max * 1000)) sh_cpufreq_target() 71 cpufreq_freq_transition_begin(policy, &freqs); sh_cpufreq_target() 74 cpufreq_freq_transition_end(policy, &freqs, 0); sh_cpufreq_target() 81 static int sh_cpufreq_verify(struct cpufreq_policy *policy) sh_cpufreq_verify() argument 83 struct clk *cpuclk = &per_cpu(sh_cpuclk, policy->cpu); sh_cpufreq_verify() 88 return cpufreq_frequency_table_verify(policy, freq_table); sh_cpufreq_verify() 90 cpufreq_verify_within_cpu_limits(policy); sh_cpufreq_verify() 92 policy->min = (clk_round_rate(cpuclk, 1) + 500) / 1000; sh_cpufreq_verify() 93 policy->max = (clk_round_rate(cpuclk, ~0UL) + 500) / 1000; sh_cpufreq_verify() 95 cpufreq_verify_within_cpu_limits(policy); sh_cpufreq_verify() 99 static int sh_cpufreq_cpu_init(struct cpufreq_policy *policy) sh_cpufreq_cpu_init() argument 101 unsigned int cpu = policy->cpu; sh_cpufreq_cpu_init() 118 result = cpufreq_table_validate_and_show(policy, freq_table); sh_cpufreq_cpu_init() 125 policy->min = policy->cpuinfo.min_freq = sh_cpufreq_cpu_init() 127 policy->max = policy->cpuinfo.max_freq = sh_cpufreq_cpu_init() 131 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; sh_cpufreq_cpu_init() 135 policy->min / 1000, policy->min % 1000, sh_cpufreq_cpu_init() 136 policy->max / 1000, policy->max % 1000); sh_cpufreq_cpu_init() 141 static int sh_cpufreq_cpu_exit(struct cpufreq_policy *policy) sh_cpufreq_cpu_exit() argument 143 unsigned int cpu = policy->cpu; sh_cpufreq_cpu_exit()
|
H A D | freq_table.c | 21 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, cpufreq_frequency_table_cpuinfo() argument 43 policy->min = policy->cpuinfo.min_freq = min_freq; 44 policy->max = policy->cpuinfo.max_freq = max_freq; 46 if (policy->min == ~0) 54 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, cpufreq_frequency_table_verify() argument 61 pr_debug("request for verification of policy (%u - %u kHz) for cpu %u\n", cpufreq_frequency_table_verify() 62 policy->min, policy->max, policy->cpu); cpufreq_frequency_table_verify() 64 cpufreq_verify_within_cpu_limits(policy); cpufreq_frequency_table_verify() 69 if ((freq >= policy->min) && (freq <= policy->max)) { cpufreq_for_each_valid_entry() 74 if ((next_larger > freq) && (freq > policy->max)) cpufreq_for_each_valid_entry() 79 policy->max = next_larger; 80 cpufreq_verify_within_cpu_limits(policy); 84 policy->min, policy->max, policy->cpu); 91 * Generic routine to verify policy & frequency table, requires driver to set 92 * policy->freq_table prior to it. 94 int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy) cpufreq_generic_frequency_table_verify() argument 97 cpufreq_frequency_get_table(policy->cpu); cpufreq_generic_frequency_table_verify() 101 return cpufreq_frequency_table_verify(policy, table); cpufreq_generic_frequency_table_verify() 105 int cpufreq_frequency_table_target(struct cpufreq_policy *policy, cpufreq_frequency_table_target() argument 123 target_freq, relation, policy->cpu); cpufreq_frequency_table_target() 139 if ((freq < policy->min) || (freq > policy->max)) cpufreq_for_each_valid_entry() 197 int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, cpufreq_frequency_table_get_index() argument 202 table = cpufreq_frequency_get_table(policy->cpu); cpufreq_frequency_table_get_index() 219 static ssize_t show_available_freqs(struct cpufreq_policy *policy, char *buf, show_available_freqs() argument 223 struct cpufreq_frequency_table *pos, *table = policy->freq_table; show_available_freqs() 259 static ssize_t scaling_available_frequencies_show(struct cpufreq_policy *policy, scaling_available_frequencies_show() argument 262 return show_available_freqs(policy, buf, false); scaling_available_frequencies_show() 271 static ssize_t scaling_boost_frequencies_show(struct cpufreq_policy *policy, scaling_boost_frequencies_show() argument 274 return show_available_freqs(policy, buf, true); scaling_boost_frequencies_show() 288 int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, cpufreq_table_validate_and_show() argument 291 int ret = cpufreq_frequency_table_cpuinfo(policy, table); cpufreq_table_validate_and_show() 294 policy->freq_table = table; cpufreq_table_validate_and_show() 304 struct cpufreq_policy *policy = cpufreq_cpu_get_raw(cpu); cpufreq_frequency_get_table() local 305 return policy ? policy->freq_table : NULL; cpufreq_frequency_get_table()
|
H A D | longrun.c | 30 * longrun_get_policy - get the current LongRun policy 31 * @policy: struct cpufreq_policy where current policy is written into 33 * Reads the current LongRun policy by access to MSR_TMTA_LONGRUN_FLAGS 36 static void longrun_get_policy(struct cpufreq_policy *policy) longrun_get_policy() argument 43 policy->policy = CPUFREQ_POLICY_PERFORMANCE; longrun_get_policy() 45 policy->policy = CPUFREQ_POLICY_POWERSAVE; longrun_get_policy() 54 policy->min = policy->max = longrun_high_freq; longrun_get_policy() 56 policy->min = longrun_low_freq + msr_lo * longrun_get_policy() 58 policy->max = longrun_low_freq + msr_hi * longrun_get_policy() 61 policy->cpu = 0; longrun_get_policy() 66 * longrun_set_policy - sets a new CPUFreq policy 67 * @policy: new policy 69 * Sets a new CPUFreq policy on LongRun-capable processors. This function 72 static int longrun_set_policy(struct cpufreq_policy *policy) longrun_set_policy() argument 77 if (!policy) longrun_set_policy() 84 pctg_lo = (policy->min - longrun_low_freq) / longrun_set_policy() 86 pctg_hi = (policy->max - longrun_low_freq) / longrun_set_policy() 98 switch (policy->policy) { longrun_set_policy() 120 * longrun_verify_poliy - verifies a new CPUFreq policy 121 * @policy: the policy to verify 123 * Validates a new CPUFreq policy. This function has to be called with 126 static int longrun_verify_policy(struct cpufreq_policy *policy) longrun_verify_policy() argument 128 if (!policy) longrun_verify_policy() 131 policy->cpu = 0; longrun_verify_policy() 132 cpufreq_verify_within_cpu_limits(policy); longrun_verify_policy() 134 if ((policy->policy != CPUFREQ_POLICY_POWERSAVE) && longrun_verify_policy() 135 (policy->policy != CPUFREQ_POLICY_PERFORMANCE)) longrun_verify_policy() 257 static int longrun_cpu_init(struct cpufreq_policy *policy) longrun_cpu_init() argument 262 if (policy->cpu != 0) longrun_cpu_init() 270 /* cpuinfo and default policy values */ longrun_cpu_init() 271 policy->cpuinfo.min_freq = longrun_low_freq; longrun_cpu_init() 272 policy->cpuinfo.max_freq = longrun_high_freq; longrun_cpu_init() 273 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; longrun_cpu_init() 274 longrun_get_policy(policy); longrun_cpu_init()
|
H A D | cpufreq_stats.c | 44 static ssize_t show_total_trans(struct cpufreq_policy *policy, char *buf) show_total_trans() argument 46 return sprintf(buf, "%d\n", policy->stats->total_trans); show_total_trans() 49 static ssize_t show_time_in_state(struct cpufreq_policy *policy, char *buf) show_time_in_state() argument 51 struct cpufreq_stats *stats = policy->stats; show_time_in_state() 65 static ssize_t show_trans_table(struct cpufreq_policy *policy, char *buf) show_trans_table() argument 67 struct cpufreq_stats *stats = policy->stats; show_trans_table() 133 static void __cpufreq_stats_free_table(struct cpufreq_policy *policy) __cpufreq_stats_free_table() argument 135 struct cpufreq_stats *stats = policy->stats; __cpufreq_stats_free_table() 143 sysfs_remove_group(&policy->kobj, &stats_attr_group); __cpufreq_stats_free_table() 146 policy->stats = NULL; __cpufreq_stats_free_table() 151 struct cpufreq_policy *policy; cpufreq_stats_free_table() local 153 policy = cpufreq_cpu_get(cpu); cpufreq_stats_free_table() 154 if (!policy) cpufreq_stats_free_table() 157 __cpufreq_stats_free_table(policy); cpufreq_stats_free_table() 159 cpufreq_cpu_put(policy); cpufreq_stats_free_table() 162 static int __cpufreq_stats_create_table(struct cpufreq_policy *policy) __cpufreq_stats_create_table() argument 167 unsigned int cpu = policy->cpu; __cpufreq_stats_create_table() 176 if (policy->stats) __cpufreq_stats_create_table() 213 stats->last_index = freq_table_get_index(stats, policy->cur); __cpufreq_stats_create_table() 215 policy->stats = stats; __cpufreq_stats_create_table() 216 ret = sysfs_create_group(&policy->kobj, &stats_attr_group); __cpufreq_stats_create_table() 221 policy->stats = NULL; __cpufreq_stats_create_table() 231 struct cpufreq_policy *policy; cpufreq_stats_create_table() local 234 * "likely(!policy)" because normally cpufreq_stats will be registered cpufreq_stats_create_table() 237 policy = cpufreq_cpu_get(cpu); cpufreq_stats_create_table() 238 if (likely(!policy)) cpufreq_stats_create_table() 241 __cpufreq_stats_create_table(policy); cpufreq_stats_create_table() 243 cpufreq_cpu_put(policy); cpufreq_stats_create_table() 250 struct cpufreq_policy *policy = data; cpufreq_stat_notifier_policy() local 253 ret = __cpufreq_stats_create_table(policy); cpufreq_stat_notifier_policy() 255 __cpufreq_stats_free_table(policy); cpufreq_stat_notifier_policy() 264 struct cpufreq_policy *policy = cpufreq_cpu_get(freq->cpu); cpufreq_stat_notifier_trans() local 268 if (!policy) { cpufreq_stat_notifier_trans() 269 pr_err("%s: No policy found\n", __func__); cpufreq_stat_notifier_trans() 276 if (!policy->stats) { cpufreq_stat_notifier_trans() 281 stats = policy->stats; cpufreq_stat_notifier_trans() 302 cpufreq_cpu_put(policy); cpufreq_stat_notifier_trans()
|
H A D | amd_freq_sensitivity.c | 40 static unsigned int amd_powersave_bias_target(struct cpufreq_policy *policy, amd_powersave_bias_target() argument 47 struct cpu_data_t *data = &per_cpu(cpu_data, policy->cpu); amd_powersave_bias_target() 48 struct dbs_data *od_data = policy->governor_data; amd_powersave_bias_target() 51 od_data->cdata->get_cpu_dbs_info_s(policy->cpu); amd_powersave_bias_target() 56 rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_ACTUAL, amd_powersave_bias_target() 58 rdmsr_on_cpu(policy->cpu, MSR_AMD64_FREQ_SENSITIVITY_REFERENCE, amd_powersave_bias_target() 65 freq_next = policy->cur; amd_powersave_bias_target() 74 freq_next = policy->cur; amd_powersave_bias_target() 85 if (data->freq_prev == policy->cur) amd_powersave_bias_target() 86 freq_next = policy->cur; amd_powersave_bias_target() 88 if (freq_next > policy->cur) amd_powersave_bias_target() 89 freq_next = policy->cur; amd_powersave_bias_target() 90 else if (freq_next < policy->cur) amd_powersave_bias_target() 91 freq_next = policy->min; amd_powersave_bias_target() 95 cpufreq_frequency_table_target(policy, amd_powersave_bias_target() 96 od_info->freq_table, policy->cur - 1, amd_powersave_bias_target()
|
H A D | ppc_cbe_cpufreq.c | 68 static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy) cbe_cpufreq_cpu_init() argument 76 cpu = of_get_cpu_node(policy->cpu, NULL); cbe_cpufreq_cpu_init() 81 pr_debug("init cpufreq on CPU %d\n", policy->cpu); cbe_cpufreq_cpu_init() 86 if (!cbe_get_cpu_pmd_regs(policy->cpu) || cbe_cpufreq_cpu_init() 87 !cbe_get_cpu_mic_tm_regs(policy->cpu)) { cbe_cpufreq_cpu_init() 113 policy->cpuinfo.transition_latency = 25000; 115 cur_pmode = cbe_cpufreq_get_pmode(policy->cpu); 118 policy->cur = cbe_freqs[cur_pmode].frequency; 121 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); 124 /* this ensures that policy->cpuinfo_min 125 * and policy->cpuinfo_max are set correctly */ 126 return cpufreq_table_validate_and_show(policy, cbe_freqs); 129 static int cbe_cpufreq_target(struct cpufreq_policy *policy, cbe_cpufreq_target() argument 134 policy->cpu, cbe_cpufreq_target() 138 return set_pmode(policy->cpu, cbe_pmode_new); cbe_cpufreq_target()
|
H A D | integrator-cpufreq.c | 57 * Validate the speed policy. 59 static int integrator_verify_policy(struct cpufreq_policy *policy) integrator_verify_policy() argument 63 cpufreq_verify_within_cpu_limits(policy); integrator_verify_policy() 65 vco = icst_hz_to_vco(&cclk_params, policy->max * 1000); integrator_verify_policy() 66 policy->max = icst_hz(&cclk_params, vco) / 1000; integrator_verify_policy() 68 vco = icst_hz_to_vco(&cclk_params, policy->min * 1000); integrator_verify_policy() 69 policy->min = icst_hz(&cclk_params, vco) / 1000; integrator_verify_policy() 71 cpufreq_verify_within_cpu_limits(policy); integrator_verify_policy() 76 static int integrator_set_target(struct cpufreq_policy *policy, integrator_set_target() argument 81 int cpu = policy->cpu; integrator_set_target() 115 if (target_freq > policy->max) integrator_set_target() 116 target_freq = policy->max; integrator_set_target() 125 cpufreq_freq_transition_begin(policy, &freqs); integrator_set_target() 146 cpufreq_freq_transition_end(policy, &freqs, 0); integrator_set_target() 181 static int integrator_cpufreq_init(struct cpufreq_policy *policy) integrator_cpufreq_init() argument 184 /* set default policy and cpuinfo */ integrator_cpufreq_init() 185 policy->max = policy->cpuinfo.max_freq = 160000; integrator_cpufreq_init() 186 policy->min = policy->cpuinfo.min_freq = 12000; integrator_cpufreq_init() 187 policy->cpuinfo.transition_latency = 1000000; /* 1 ms, assumed */ integrator_cpufreq_init()
|
H A D | cpufreq_performance.c | 19 static int cpufreq_governor_performance(struct cpufreq_policy *policy, cpufreq_governor_performance() argument 26 policy->max, event); cpufreq_governor_performance() 27 __cpufreq_driver_target(policy, policy->max, cpufreq_governor_performance() 56 MODULE_DESCRIPTION("CPUfreq policy governor 'performance'");
|
H A D | cpufreq_powersave.c | 19 static int cpufreq_governor_powersave(struct cpufreq_policy *policy, cpufreq_governor_powersave() argument 26 policy->min, event); cpufreq_governor_powersave() 27 __cpufreq_driver_target(policy, policy->min, cpufreq_governor_powersave() 56 MODULE_DESCRIPTION("CPUfreq policy governor 'powersave'");
|
H A D | cpufreq-nforce2.c | 244 * nforce2_target - set a new CPUFreq policy 245 * @policy: new policy 250 * Sets a new CPUFreq policy. 252 static int nforce2_target(struct cpufreq_policy *policy, nforce2_target() argument 259 if ((target_freq > policy->max) || (target_freq < policy->min)) nforce2_target() 264 freqs.old = nforce2_get(policy->cpu); nforce2_target() 273 cpufreq_freq_transition_begin(policy, &freqs); nforce2_target() 288 cpufreq_freq_transition_end(policy, &freqs, 0); nforce2_target() 294 * nforce2_verify - verifies a new CPUFreq policy 295 * @policy: new policy 297 static int nforce2_verify(struct cpufreq_policy *policy) nforce2_verify() argument 301 fsb_pol_max = policy->max / (fid * 100); nforce2_verify() 303 if (policy->min < (fsb_pol_max * fid * 100)) nforce2_verify() 304 policy->max = (fsb_pol_max + 1) * fid * 100; nforce2_verify() 306 cpufreq_verify_within_cpu_limits(policy); nforce2_verify() 310 static int nforce2_cpu_init(struct cpufreq_policy *policy) nforce2_cpu_init() argument 316 if (policy->cpu != 0) nforce2_cpu_init() 359 /* cpuinfo and default policy values */ nforce2_cpu_init() 360 policy->min = policy->cpuinfo.min_freq = min_fsb * fid * 100; nforce2_cpu_init() 361 policy->max = policy->cpuinfo.max_freq = max_fsb * fid * 100; nforce2_cpu_init() 362 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; nforce2_cpu_init() 367 static int nforce2_cpu_exit(struct cpufreq_policy *policy) nforce2_cpu_exit() argument
|
H A D | davinci-cpufreq.c | 41 static int davinci_verify_speed(struct cpufreq_policy *policy) davinci_verify_speed() argument 48 return cpufreq_frequency_table_verify(policy, freq_table); davinci_verify_speed() 50 if (policy->cpu) davinci_verify_speed() 53 cpufreq_verify_within_cpu_limits(policy); davinci_verify_speed() 54 policy->min = clk_round_rate(armclk, policy->min * 1000) / 1000; davinci_verify_speed() 55 policy->max = clk_round_rate(armclk, policy->max * 1000) / 1000; davinci_verify_speed() 56 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, davinci_verify_speed() 57 policy->cpuinfo.max_freq); davinci_verify_speed() 61 static int davinci_target(struct cpufreq_policy *policy, unsigned int idx) davinci_target() argument 68 old_freq = policy->cur; davinci_target() 95 static int davinci_cpu_init(struct cpufreq_policy *policy) davinci_cpu_init() argument 101 if (policy->cpu != 0) davinci_cpu_init() 111 policy->clk = cpufreq.armclk; davinci_cpu_init() 119 return cpufreq_generic_init(policy, freq_table, 2000 * 1000); davinci_cpu_init()
|
H A D | at32ap-cpufreq.c | 29 static int at32_set_target(struct cpufreq_policy *policy, unsigned int index) at32_set_target() argument 33 old_freq = policy->cur; at32_set_target() 44 clk_set_rate(policy->clk, new_freq * 1000); at32_set_target() 52 static int at32_cpufreq_driver_init(struct cpufreq_policy *policy) at32_cpufreq_driver_init() argument 58 if (policy->cpu != 0) at32_cpufreq_driver_init() 70 policy->cpuinfo.transition_latency = 0; at32_cpufreq_driver_init() 98 policy->clk = cpuclk; at32_cpufreq_driver_init() 101 retval = cpufreq_table_validate_and_show(policy, freq_table); at32_cpufreq_driver_init()
|
H A D | gx-suspmod.c | 140 /* For the default policy, we want at least some processing power 254 static void gx_set_cpuspeed(struct cpufreq_policy *policy, unsigned int khz) gx_set_cpuspeed() argument 268 cpufreq_freq_transition_begin(policy, &freqs); gx_set_cpuspeed() 317 cpufreq_freq_transition_end(policy, &freqs, 0); gx_set_cpuspeed() 335 static int cpufreq_gx_verify(struct cpufreq_policy *policy) cpufreq_gx_verify() argument 340 if (!stock_freq || !policy) cpufreq_gx_verify() 343 policy->cpu = 0; cpufreq_gx_verify() 344 cpufreq_verify_within_limits(policy, (stock_freq / max_duration), cpufreq_gx_verify() 348 * within policy->min and policy->max. If it is not, policy->max cpufreq_gx_verify() 350 * policy->min may not be decreased, though. This way we guarantee a cpufreq_gx_verify() 353 tmp_freq = gx_validate_speed(policy->min, &tmp1, &tmp2); cpufreq_gx_verify() 354 if (tmp_freq < policy->min) cpufreq_gx_verify() 356 policy->min = tmp_freq; cpufreq_gx_verify() 357 if (policy->min > policy->max) cpufreq_gx_verify() 358 policy->max = tmp_freq; cpufreq_gx_verify() 359 tmp_freq = gx_validate_speed(policy->max, &tmp1, &tmp2); cpufreq_gx_verify() 360 if (tmp_freq > policy->max) cpufreq_gx_verify() 362 policy->max = tmp_freq; cpufreq_gx_verify() 363 if (policy->max < policy->min) cpufreq_gx_verify() 364 policy->max = policy->min; cpufreq_gx_verify() 365 cpufreq_verify_within_limits(policy, (stock_freq / max_duration), cpufreq_gx_verify() 375 static int cpufreq_gx_target(struct cpufreq_policy *policy, cpufreq_gx_target() argument 382 if (!stock_freq || !policy) cpufreq_gx_target() 385 policy->cpu = 0; cpufreq_gx_target() 388 while (tmp_freq < policy->min) { cpufreq_gx_target() 392 while (tmp_freq > policy->max) { cpufreq_gx_target() 397 gx_set_cpuspeed(policy, tmp_freq); cpufreq_gx_target() 402 static int cpufreq_gx_cpu_init(struct cpufreq_policy *policy) cpufreq_gx_cpu_init() argument 406 if (!policy || policy->cpu != 0) cpufreq_gx_cpu_init() 422 policy->cpu = 0; cpufreq_gx_cpu_init() 425 policy->min = maxfreq / max_duration; cpufreq_gx_cpu_init() 427 policy->min = maxfreq / POLICY_MIN_DIV; cpufreq_gx_cpu_init() 428 policy->max = maxfreq; cpufreq_gx_cpu_init() 429 policy->cpuinfo.min_freq = maxfreq / max_duration; cpufreq_gx_cpu_init() 430 policy->cpuinfo.max_freq = maxfreq; cpufreq_gx_cpu_init() 431 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; cpufreq_gx_cpu_init()
|
H A D | s3c64xx-cpufreq.c | 56 static int s3c64xx_cpufreq_set_target(struct cpufreq_policy *policy, s3c64xx_cpufreq_set_target() argument 63 old_freq = clk_get_rate(policy->clk) / 1000; s3c64xx_cpufreq_set_target() 80 ret = clk_set_rate(policy->clk, new_freq * 1000); s3c64xx_cpufreq_set_target() 95 if (clk_set_rate(policy->clk, old_freq * 1000) < 0) s3c64xx_cpufreq_set_target() 104 clk_get_rate(policy->clk) / 1000); s3c64xx_cpufreq_set_target() 148 static int s3c64xx_cpufreq_driver_init(struct cpufreq_policy *policy) s3c64xx_cpufreq_driver_init() argument 153 if (policy->cpu != 0) s3c64xx_cpufreq_driver_init() 161 policy->clk = clk_get(NULL, "armclk"); s3c64xx_cpufreq_driver_init() 162 if (IS_ERR(policy->clk)) { s3c64xx_cpufreq_driver_init() 164 PTR_ERR(policy->clk)); s3c64xx_cpufreq_driver_init() 165 return PTR_ERR(policy->clk); s3c64xx_cpufreq_driver_init() 184 r = clk_round_rate(policy->clk, freq->frequency * 1000); cpufreq_for_each_entry() 194 if (!vddarm && freq->frequency > clk_get_rate(policy->clk) / 1000) cpufreq_for_each_entry() 202 ret = cpufreq_generic_init(policy, s3c64xx_freq_table, 208 clk_put(policy->clk);
|
H A D | cpufreq_governor.c | 38 struct cpufreq_policy *policy; dbs_check_cpu() local 63 policy = cdbs->cur_policy; dbs_check_cpu() 66 for_each_cpu(j, policy->cpus) { dbs_check_cpu() 171 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy, gov_queue_work() argument 177 if (!policy->governor_enabled) gov_queue_work() 190 for_each_cpu(i, policy->cpus) gov_queue_work() 200 struct cpufreq_policy *policy) gov_cancel_work() 205 for_each_cpu(i, policy->cpus) { gov_cancel_work() 242 int cpufreq_governor_dbs(struct cpufreq_policy *policy, cpufreq_governor_dbs() argument 252 unsigned int sampling_rate, latency, ignore_nice, j, cpu = policy->cpu; cpufreq_governor_dbs() 257 dbs_data = policy->governor_data; cpufreq_governor_dbs() 269 policy->governor_data = dbs_data; cpufreq_governor_dbs() 291 rc = sysfs_create_group(get_governor_parent_kobj(policy), cpufreq_governor_dbs() 299 policy->governor_data = dbs_data; cpufreq_governor_dbs() 301 /* policy latency is in ns. Convert it to us first */ cpufreq_governor_dbs() 302 latency = policy->cpuinfo.transition_latency / 1000; cpufreq_governor_dbs() 313 (!policy->governor->initialized)) { cpufreq_governor_dbs() 326 sysfs_remove_group(get_governor_parent_kobj(policy), cpufreq_governor_dbs() 333 (policy->governor->initialized == 1)) { cpufreq_governor_dbs() 345 policy->governor_data = NULL; cpufreq_governor_dbs() 367 if (!policy->cur) cpufreq_governor_dbs() 372 for_each_cpu(j, policy->cpus) { cpufreq_governor_dbs() 378 j_cdbs->cur_policy = policy; cpufreq_governor_dbs() 399 cs_dbs_info->requested_freq = policy->cur; cpufreq_governor_dbs() 411 gov_queue_work(dbs_data, policy, cpufreq_governor_dbs() 419 gov_cancel_work(dbs_data, policy); cpufreq_governor_dbs() 436 if (policy->max < cpu_cdbs->cur_policy->cur) cpufreq_governor_dbs() 438 policy->max, CPUFREQ_RELATION_H); cpufreq_governor_dbs() 439 else if (policy->min > cpu_cdbs->cur_policy->cur) cpufreq_governor_dbs() 441 policy->min, CPUFREQ_RELATION_L); cpufreq_governor_dbs() 199 gov_cancel_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy) gov_cancel_work() argument
|
H A D | omap-cpufreq.c | 42 static int omap_target(struct cpufreq_policy *policy, unsigned int index) omap_target() argument 49 old_freq = policy->cur; omap_target() 53 ret = clk_round_rate(policy->clk, freq); omap_target() 91 ret = clk_set_rate(policy->clk, new_freq * 1000); omap_target() 99 clk_set_rate(policy->clk, old_freq * 1000); omap_target() 113 static int omap_cpu_init(struct cpufreq_policy *policy) omap_cpu_init() argument 117 policy->clk = clk_get(NULL, "cpufreq_ck"); omap_cpu_init() 118 if (IS_ERR(policy->clk)) omap_cpu_init() 119 return PTR_ERR(policy->clk); omap_cpu_init() 126 __func__, policy->cpu, result); omap_cpu_init() 134 result = cpufreq_generic_init(policy, freq_table, 300 * 1000); omap_cpu_init() 140 clk_put(policy->clk); omap_cpu_init() 144 static int omap_cpu_exit(struct cpufreq_policy *policy) omap_cpu_exit() argument 147 clk_put(policy->clk); omap_cpu_exit()
|
H A D | cpufreq_ondemand.c | 76 static unsigned int generic_powersave_bias_target(struct cpufreq_policy *policy, generic_powersave_bias_target() argument 84 policy->cpu); generic_powersave_bias_target() 85 struct dbs_data *dbs_data = policy->governor_data; generic_powersave_bias_target() 94 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_next, generic_powersave_bias_target() 102 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, generic_powersave_bias_target() 106 cpufreq_frequency_table_target(policy, dbs_info->freq_table, freq_avg, generic_powersave_bias_target() 135 static void dbs_freq_increase(struct cpufreq_policy *policy, unsigned int freq) dbs_freq_increase() argument 137 struct dbs_data *dbs_data = policy->governor_data; dbs_freq_increase() 141 freq = od_ops.powersave_bias_target(policy, freq, dbs_freq_increase() 143 else if (policy->cur == policy->max) dbs_freq_increase() 146 __cpufreq_driver_target(policy, freq, od_tuners->powersave_bias ? dbs_freq_increase() 158 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; od_check_cpu() local 159 struct dbs_data *dbs_data = policy->governor_data; od_check_cpu() 167 if (policy->cur < policy->max) od_check_cpu() 170 dbs_freq_increase(policy, policy->max); od_check_cpu() 175 min_f = policy->cpuinfo.min_freq; od_check_cpu() 176 max_f = policy->cpuinfo.max_freq; od_check_cpu() 183 __cpufreq_driver_target(policy, freq_next, od_check_cpu() 188 freq_next = od_ops.powersave_bias_target(policy, freq_next, od_check_cpu() 190 __cpufreq_driver_target(policy, freq_next, CPUFREQ_RELATION_C); od_check_cpu() 262 struct cpufreq_policy *policy; for_each_online_cpu() local 266 policy = cpufreq_cpu_get(cpu); for_each_online_cpu() 267 if (!policy) for_each_online_cpu() 269 if (policy->governor != &cpufreq_gov_ondemand) { for_each_online_cpu() 270 cpufreq_cpu_put(policy); for_each_online_cpu() 274 cpufreq_cpu_put(policy); for_each_online_cpu() 548 struct cpufreq_policy *policy; od_set_powersave_bias() local 562 policy = per_cpu(od_cpu_dbs_info, cpu).cdbs.cur_policy; for_each_online_cpu() 563 if (!policy) for_each_online_cpu() 566 cpumask_or(&done, &done, policy->cpus); for_each_online_cpu() 568 if (policy->governor != &cpufreq_gov_ondemand) for_each_online_cpu() 571 dbs_data = policy->governor_data; for_each_online_cpu() 594 static int od_cpufreq_governor_dbs(struct cpufreq_policy *policy, od_cpufreq_governor_dbs() argument 597 return cpufreq_governor_dbs(policy, &od_dbs_cdata, event); od_cpufreq_governor_dbs()
|
H A D | pxa3xx-cpufreq.c | 90 static int setup_freqs_table(struct cpufreq_policy *policy, setup_freqs_table() argument 111 return cpufreq_table_validate_and_show(policy, table); setup_freqs_table() 158 static int pxa3xx_cpufreq_set(struct cpufreq_policy *policy, unsigned int index) pxa3xx_cpufreq_set() argument 163 if (policy->cpu != 0) pxa3xx_cpufreq_set() 176 static int pxa3xx_cpufreq_init(struct cpufreq_policy *policy) pxa3xx_cpufreq_init() argument 180 /* set default policy and cpuinfo */ pxa3xx_cpufreq_init() 181 policy->min = policy->cpuinfo.min_freq = 104000; pxa3xx_cpufreq_init() 182 policy->max = policy->cpuinfo.max_freq = pxa3xx_cpufreq_init() 184 policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ pxa3xx_cpufreq_init() 187 ret = setup_freqs_table(policy, pxa300_freqs, pxa3xx_cpufreq_init() 191 ret = setup_freqs_table(policy, pxa320_freqs, pxa3xx_cpufreq_init()
|
H A D | loongson2_cpufreq.c | 48 static int loongson2_cpufreq_target(struct cpufreq_policy *policy, loongson2_cpufreq_target() argument 51 unsigned int cpu = policy->cpu; loongson2_cpufreq_target() 65 clk_set_rate(policy->clk, freq * 1000); loongson2_cpufreq_target() 70 static int loongson2_cpufreq_cpu_init(struct cpufreq_policy *policy) loongson2_cpufreq_cpu_init() argument 101 policy->clk = cpuclk; loongson2_cpufreq_cpu_init() 102 return cpufreq_generic_init(policy, &loongson2_clockmod_table[0], 0); loongson2_cpufreq_cpu_init() 105 static int loongson2_cpufreq_exit(struct cpufreq_policy *policy) loongson2_cpufreq_exit() argument 107 clk_put(policy->clk); loongson2_cpufreq_exit()
|
H A D | sfi-cpufreq.c | 57 static int sfi_cpufreq_target(struct cpufreq_policy *policy, unsigned int index) sfi_cpufreq_target() argument 62 next_perf_state = policy->freq_table[index].driver_data; sfi_cpufreq_target() 64 rdmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, &lo, &hi); sfi_cpufreq_target() 68 wrmsr_on_cpu(policy->cpu, MSR_IA32_PERF_CTL, lo, hi); sfi_cpufreq_target() 73 static int sfi_cpufreq_cpu_init(struct cpufreq_policy *policy) sfi_cpufreq_cpu_init() argument 75 policy->shared_type = CPUFREQ_SHARED_TYPE_HW; sfi_cpufreq_cpu_init() 76 policy->cpuinfo.transition_latency = 100000; /* 100us */ sfi_cpufreq_cpu_init() 78 return cpufreq_table_validate_and_show(policy, freq_table); sfi_cpufreq_cpu_init()
|
H A D | qoriq-cpufreq.c | 123 static void set_affected_cpus(struct cpufreq_policy *policy) set_affected_cpus() argument 126 struct cpumask *dstp = policy->cpus; set_affected_cpus() 129 np = cpu_to_clk_node(policy->cpu); set_affected_cpus() 197 static int qoriq_cpufreq_cpu_init(struct cpufreq_policy *policy) qoriq_cpufreq_cpu_init() argument 205 unsigned int cpu = policy->cpu; qoriq_cpufreq_cpu_init() 216 policy->clk = of_clk_get(np, 0); qoriq_cpufreq_cpu_init() 217 if (IS_ERR(policy->clk)) { qoriq_cpufreq_cpu_init() 258 ret = cpufreq_table_validate_and_show(policy, table); qoriq_cpufreq_cpu_init() 267 set_affected_cpus(policy); qoriq_cpufreq_cpu_init() 268 policy->driver_data = data; qoriq_cpufreq_cpu_init() 273 policy->cpuinfo.transition_latency = u64temp + 1; qoriq_cpufreq_cpu_init() 284 policy->driver_data = NULL; qoriq_cpufreq_cpu_init() 292 static int __exit qoriq_cpufreq_cpu_exit(struct cpufreq_policy *policy) qoriq_cpufreq_cpu_exit() argument 294 struct cpu_data *data = policy->driver_data; qoriq_cpufreq_cpu_exit() 299 policy->driver_data = NULL; qoriq_cpufreq_cpu_exit() 304 static int qoriq_cpufreq_target(struct cpufreq_policy *policy, qoriq_cpufreq_target() argument 308 struct cpu_data *data = policy->driver_data; qoriq_cpufreq_target() 311 return clk_set_parent(policy->clk, parent); qoriq_cpufreq_target()
|
H A D | tegra-cpufreq.c | 50 static unsigned int tegra_get_intermediate(struct cpufreq_policy *policy, tegra_get_intermediate() argument 57 * - we are already at it, i.e. policy->cur == ifreq tegra_get_intermediate() 60 if ((freq_table[index].frequency == ifreq) || (policy->cur == ifreq)) tegra_get_intermediate() 66 static int tegra_target_intermediate(struct cpufreq_policy *policy, tegra_target_intermediate() argument 92 static int tegra_target(struct cpufreq_policy *policy, unsigned int index) tegra_target() argument 137 static int tegra_cpu_init(struct cpufreq_policy *policy) tegra_cpu_init() argument 141 if (policy->cpu >= NUM_CPUS) tegra_cpu_init() 148 ret = cpufreq_generic_init(policy, freq_table, 300 * 1000); tegra_cpu_init() 155 policy->clk = cpu_clk; tegra_cpu_init() 156 policy->suspend_freq = freq_table[0].frequency; tegra_cpu_init() 160 static int tegra_cpu_exit(struct cpufreq_policy *policy) tegra_cpu_exit() argument
|
H A D | dbx500-cpufreq.c | 22 static int dbx500_cpufreq_target(struct cpufreq_policy *policy, dbx500_cpufreq_target() argument 29 static int dbx500_cpufreq_init(struct cpufreq_policy *policy) dbx500_cpufreq_init() argument 31 policy->clk = armss_clk; dbx500_cpufreq_init() 32 return cpufreq_generic_init(policy, freq_table, 20 * 1000); dbx500_cpufreq_init()
|
H A D | ia64-acpi-cpufreq.c | 139 struct cpufreq_policy *policy, processor_set_freq() 150 set_cpus_allowed_ptr(current, cpumask_of(policy->cpu)); processor_set_freq() 151 if (smp_processor_id() != policy->cpu) { processor_set_freq() 210 struct cpufreq_policy *policy, acpi_cpufreq_target() 213 return processor_set_freq(acpi_io_data[policy->cpu], policy, index); acpi_cpufreq_target() 218 struct cpufreq_policy *policy) acpi_cpufreq_cpu_init() 221 unsigned int cpu = policy->cpu; acpi_cpufreq_cpu_init() 266 policy->cpuinfo.transition_latency = 0; acpi_cpufreq_cpu_init() 269 policy->cpuinfo.transition_latency) { acpi_cpufreq_cpu_init() 270 policy->cpuinfo.transition_latency = acpi_cpufreq_cpu_init() 286 result = cpufreq_table_validate_and_show(policy, data->freq_table); acpi_cpufreq_cpu_init() 327 struct cpufreq_policy *policy) acpi_cpufreq_cpu_exit() 329 struct cpufreq_acpi_io *data = acpi_io_data[policy->cpu]; acpi_cpufreq_cpu_exit() 334 acpi_io_data[policy->cpu] = NULL; acpi_cpufreq_cpu_exit() 336 policy->cpu); acpi_cpufreq_cpu_exit() 137 processor_set_freq( struct cpufreq_acpi_io *data, struct cpufreq_policy *policy, int state) processor_set_freq() argument 209 acpi_cpufreq_target( struct cpufreq_policy *policy, unsigned int index) acpi_cpufreq_target() argument 217 acpi_cpufreq_cpu_init( struct cpufreq_policy *policy) acpi_cpufreq_cpu_init() argument 326 acpi_cpufreq_cpu_exit( struct cpufreq_policy *policy) acpi_cpufreq_cpu_exit() argument
|
H A D | acpi-cpufreq.c | 145 static ssize_t show_freqdomain_cpus(struct cpufreq_policy *policy, char *buf) show_freqdomain_cpus() argument 147 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); show_freqdomain_cpus() 172 static ssize_t store_cpb(struct cpufreq_policy *policy, const char *buf, store_cpb() argument 178 static ssize_t show_cpb(struct cpufreq_policy *policy, char *buf) show_cpb() argument 408 static int acpi_cpufreq_target(struct cpufreq_policy *policy, acpi_cpufreq_target() argument 411 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); acpi_cpufreq_target() 459 if (policy->shared_type != CPUFREQ_SHARED_TYPE_ANY) acpi_cpufreq_target() 460 cmd.mask = policy->cpus; acpi_cpufreq_target() 462 cmd.mask = cpumask_of(policy->cpu); acpi_cpufreq_target() 470 policy->cpu); acpi_cpufreq_target() 643 static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy) acpi_cpufreq_cpu_init() argument 647 unsigned int cpu = policy->cpu; acpi_cpufreq_cpu_init() 650 struct cpuinfo_x86 *c = &cpu_data(policy->cpu); acpi_cpufreq_cpu_init() 686 policy->shared_type = perf->shared_type; acpi_cpufreq_cpu_init() 689 * Will let policy->cpus know about dependency only when software acpi_cpufreq_cpu_init() 692 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ALL || acpi_cpufreq_cpu_init() 693 policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) { acpi_cpufreq_cpu_init() 694 cpumask_copy(policy->cpus, perf->shared_cpu_map); acpi_cpufreq_cpu_init() 700 if (bios_with_sw_any_bug && !policy_is_shared(policy)) { acpi_cpufreq_cpu_init() 701 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; acpi_cpufreq_cpu_init() 702 cpumask_copy(policy->cpus, cpu_core_mask(cpu)); acpi_cpufreq_cpu_init() 706 cpumask_clear(policy->cpus); acpi_cpufreq_cpu_init() 707 cpumask_set_cpu(cpu, policy->cpus); acpi_cpufreq_cpu_init() 709 policy->shared_type = CPUFREQ_SHARED_TYPE_HW; acpi_cpufreq_cpu_init() 764 policy->cpuinfo.transition_latency = 0; acpi_cpufreq_cpu_init() 767 policy->cpuinfo.transition_latency) acpi_cpufreq_cpu_init() 768 policy->cpuinfo.transition_latency = acpi_cpufreq_cpu_init() 774 policy->cpuinfo.transition_latency > 20 * 1000) { acpi_cpufreq_cpu_init() 775 policy->cpuinfo.transition_latency = 20 * 1000; acpi_cpufreq_cpu_init() 794 result = cpufreq_table_validate_and_show(policy, data->freq_table); acpi_cpufreq_cpu_init() 798 if (perf->states[0].core_frequency * 1000 != policy->cpuinfo.max_freq) acpi_cpufreq_cpu_init() 804 * The core will not set policy->cur, because acpi_cpufreq_cpu_init() 809 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu); acpi_cpufreq_cpu_init() 850 static int acpi_cpufreq_cpu_exit(struct cpufreq_policy *policy) acpi_cpufreq_cpu_exit() argument 852 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); acpi_cpufreq_cpu_exit() 857 per_cpu(acfreq_data, policy->cpu) = NULL; acpi_cpufreq_cpu_exit() 859 policy->cpu); acpi_cpufreq_cpu_exit() 868 static int acpi_cpufreq_resume(struct cpufreq_policy *policy) acpi_cpufreq_resume() argument 870 struct acpi_cpufreq_data *data = per_cpu(acfreq_data, policy->cpu); acpi_cpufreq_resume()
|
H A D | exynos-cpufreq.c | 50 struct cpufreq_policy *policy = cpufreq_cpu_get(0); exynos_cpufreq_scale() local 58 old_freq = policy->cur; exynos_cpufreq_scale() 61 * The policy max have been changed so that we cannot get proper exynos_cpufreq_scale() 63 * policy and get the index from the raw frequency table. exynos_cpufreq_scale() 127 cpufreq_cpu_put(policy); exynos_cpufreq_scale() 132 static int exynos_target(struct cpufreq_policy *policy, unsigned int index) exynos_target() argument 137 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) exynos_cpufreq_cpu_init() argument 139 policy->clk = exynos_info->cpu_clk; exynos_cpufreq_cpu_init() 140 policy->suspend_freq = locking_frequency; exynos_cpufreq_cpu_init() 141 return cpufreq_generic_init(policy, exynos_info->freq_table, 100000); exynos_cpufreq_cpu_init()
|
H A D | ls1x-cpufreq.c | 46 static int ls1x_cpufreq_target(struct cpufreq_policy *policy, ls1x_cpufreq_target() argument 51 old_freq = policy->cur; ls1x_cpufreq_target() 52 new_freq = policy->freq_table[index].frequency; ls1x_cpufreq_target() 64 clk_set_parent(policy->clk, ls1x_cpufreq.osc_clk); ls1x_cpufreq_target() 70 clk_set_parent(policy->clk, ls1x_cpufreq.mux_clk); ls1x_cpufreq_target() 75 static int ls1x_cpufreq_init(struct cpufreq_policy *policy) ls1x_cpufreq_init() argument 105 policy->clk = ls1x_cpufreq.clk; ls1x_cpufreq_init() 106 ret = cpufreq_generic_init(policy, freq_tbl, 0); ls1x_cpufreq_init() 113 static int ls1x_cpufreq_exit(struct cpufreq_policy *policy) ls1x_cpufreq_exit() argument 115 kfree(policy->freq_table); ls1x_cpufreq_exit()
|
H A D | cpufreq_conservative.c | 27 struct cpufreq_policy *policy) get_freq_target() 29 unsigned int freq_target = (cs_tuners->freq_step * policy->max) / 100; get_freq_target() 50 struct cpufreq_policy *policy = dbs_info->cdbs.cur_policy; cs_check_cpu() local 51 struct dbs_data *dbs_data = policy->governor_data; cs_check_cpu() 66 if (dbs_info->requested_freq == policy->max) cs_check_cpu() 69 dbs_info->requested_freq += get_freq_target(cs_tuners, policy); cs_check_cpu() 71 if (dbs_info->requested_freq > policy->max) cs_check_cpu() 72 dbs_info->requested_freq = policy->max; cs_check_cpu() 74 __cpufreq_driver_target(policy, dbs_info->requested_freq, cs_check_cpu() 90 if (policy->cur == policy->min) cs_check_cpu() 93 freq_target = get_freq_target(cs_tuners, policy); cs_check_cpu() 97 dbs_info->requested_freq = policy->min; cs_check_cpu() 99 __cpufreq_driver_target(policy, dbs_info->requested_freq, cs_check_cpu() 133 struct cpufreq_policy *policy; dbs_cpufreq_notifier() local 138 policy = dbs_info->cdbs.cur_policy; dbs_cpufreq_notifier() 144 if (dbs_info->requested_freq > policy->max dbs_cpufreq_notifier() 145 || dbs_info->requested_freq < policy->min) dbs_cpufreq_notifier() 371 static int cs_cpufreq_governor_dbs(struct cpufreq_policy *policy, cs_cpufreq_governor_dbs() argument 374 return cpufreq_governor_dbs(policy, &cs_dbs_cdata, event); cs_cpufreq_governor_dbs() 26 get_freq_target(struct cs_dbs_tuners *cs_tuners, struct cpufreq_policy *policy) get_freq_target() argument
|
H A D | sc520_freq.c | 56 static int sc520_freq_target(struct cpufreq_policy *policy, unsigned int state) sc520_freq_target() argument 75 static int sc520_freq_cpu_init(struct cpufreq_policy *policy) sc520_freq_cpu_init() argument 84 /* cpuinfo and default policy values */ sc520_freq_cpu_init() 85 policy->cpuinfo.transition_latency = 1000000; /* 1ms */ sc520_freq_cpu_init() 87 return cpufreq_table_validate_and_show(policy, sc520_freq_table); sc520_freq_cpu_init()
|
H A D | speedstep-ich.c | 252 * speedstep_target - set a new CPUFreq policy 253 * @policy: new policy 256 * Sets a new CPUFreq policy. 258 static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) speedstep_target() argument 262 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); speedstep_target() 272 struct cpufreq_policy *policy; member in struct:get_freqs 284 &get_freqs->policy->cpuinfo.transition_latency, get_freqs_on_cpu() 288 static int speedstep_cpu_init(struct cpufreq_policy *policy) speedstep_cpu_init() argument 295 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); speedstep_cpu_init() 297 policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask); speedstep_cpu_init() 300 gf.policy = policy; speedstep_cpu_init() 305 return cpufreq_table_validate_and_show(policy, speedstep_freqs); speedstep_cpu_init()
|
H A D | speedstep-centrino.c | 232 static int centrino_cpu_init_table(struct cpufreq_policy *policy) centrino_cpu_init_table() argument 234 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); centrino_cpu_init_table() 259 per_cpu(centrino_model, policy->cpu) = model; centrino_cpu_init_table() 268 static inline int centrino_cpu_init_table(struct cpufreq_policy *policy) centrino_cpu_init_table() argument 343 static int centrino_cpu_init(struct cpufreq_policy *policy) centrino_cpu_init() argument 345 struct cpuinfo_x86 *cpu = &cpu_data(policy->cpu); centrino_cpu_init() 357 if (policy->cpu != 0) centrino_cpu_init() 365 per_cpu(centrino_cpu, policy->cpu) = &cpu_ids[i]; centrino_cpu_init() 367 if (!per_cpu(centrino_cpu, policy->cpu)) { centrino_cpu_init() 374 if (centrino_cpu_init_table(policy)) centrino_cpu_init() 395 policy->cpuinfo.transition_latency = 10000; centrino_cpu_init() 398 return cpufreq_table_validate_and_show(policy, centrino_cpu_init() 399 per_cpu(centrino_model, policy->cpu)->op_points); centrino_cpu_init() 402 static int centrino_cpu_exit(struct cpufreq_policy *policy) centrino_cpu_exit() argument 404 unsigned int cpu = policy->cpu; centrino_cpu_exit() 415 * centrino_setpolicy - set a new CPUFreq policy 416 * @policy: new policy 419 * Sets a new CPUFreq policy. 421 static int centrino_target(struct cpufreq_policy *policy, unsigned int index) centrino_target() argument 423 unsigned int msr, oldmsr = 0, h = 0, cpu = policy->cpu; centrino_target() 439 for_each_cpu(j, policy->cpus) { centrino_target() 446 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) centrino_target() 447 good_cpu = cpumask_any_and(policy->cpus, centrino_target() 481 if (policy->shared_type == CPUFREQ_SHARED_TYPE_ANY) centrino_target() 490 * We have sent callbacks to policy->cpus and centrino_target()
|
H A D | cpufreq-dt.c | 39 static int set_target(struct cpufreq_policy *policy, unsigned int index) set_target() argument 42 struct cpufreq_frequency_table *freq_table = policy->freq_table; set_target() 43 struct clk *cpu_clk = policy->clk; set_target() 44 struct private_data *priv = policy->driver_data; set_target() 185 static int cpufreq_init(struct cpufreq_policy *policy) cpufreq_init() argument 198 ret = allocate_resources(policy->cpu, &cpu_dev, &cpu_reg, &cpu_clk); cpufreq_init() 206 dev_err(cpu_dev, "failed to find cpu%d node\n", policy->cpu); cpufreq_init() 285 policy->driver_data = priv; cpufreq_init() 287 policy->clk = cpu_clk; cpufreq_init() 288 ret = cpufreq_table_validate_and_show(policy, freq_table); cpufreq_init() 295 policy->cpuinfo.transition_latency = transition_latency; cpufreq_init() 299 cpumask_setall(policy->cpus); cpufreq_init() 320 static int cpufreq_exit(struct cpufreq_policy *policy) cpufreq_exit() argument 322 struct private_data *priv = policy->driver_data; cpufreq_exit() 325 dev_pm_opp_free_cpufreq_table(priv->cpu_dev, &policy->freq_table); cpufreq_exit() 327 clk_put(policy->clk); cpufreq_exit() 335 static void cpufreq_ready(struct cpufreq_policy *policy) cpufreq_ready() argument 337 struct private_data *priv = policy->driver_data; cpufreq_ready() 349 policy->related_cpus); cpufreq_ready()
|
H A D | sparc-us3-cpufreq.c | 96 static int us3_freq_target(struct cpufreq_policy *policy, unsigned int index) us3_freq_target() argument 98 unsigned int cpu = policy->cpu; us3_freq_target() 135 static int __init us3_freq_cpu_init(struct cpufreq_policy *policy) us3_freq_cpu_init() argument 137 unsigned int cpu = policy->cpu; us3_freq_cpu_init() 151 policy->cpuinfo.transition_latency = 0; us3_freq_cpu_init() 152 policy->cur = clock_tick; us3_freq_cpu_init() 154 return cpufreq_table_validate_and_show(policy, table); us3_freq_cpu_init() 157 static int us3_freq_cpu_exit(struct cpufreq_policy *policy) us3_freq_cpu_exit() argument 160 us3_freq_target(policy, 0); us3_freq_cpu_exit()
|
H A D | cpufreq_governor.h | 46 * - gov_pol: One governor instance per policy 84 (struct cpufreq_policy *policy, char *buf) \ 86 struct dbs_data *dbs_data = policy->governor_data; \ 100 (struct cpufreq_policy *policy, const char *buf, size_t count) \ 102 struct dbs_data *dbs_data = policy->governor_data; \ 172 /* Per policy Governors sysfs tunables */ 199 struct attribute_group *attr_group_gov_pol; /* one governor - policy */ 218 /* Governor Per policy data */ 232 unsigned int (*powersave_bias_target)(struct cpufreq_policy *policy, 234 void (*freq_increase)(struct cpufreq_policy *policy, unsigned int freq); 261 (struct cpufreq_policy *policy, char *buf) \ 263 struct dbs_data *dbs_data = policy->governor_data; \ 272 int cpufreq_governor_dbs(struct cpufreq_policy *policy, 274 void gov_queue_work(struct dbs_data *dbs_data, struct cpufreq_policy *policy,
|
H A D | cris-artpec3-cpufreq.c | 30 static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state) cris_freq_target() argument 50 static int cris_freq_cpu_init(struct cpufreq_policy *policy) cris_freq_cpu_init() argument 52 return cpufreq_generic_init(policy, cris_freq_table, 1000000); cris_freq_cpu_init()
|
H A D | cris-etraxfs-cpufreq.c | 30 static int cris_freq_target(struct cpufreq_policy *policy, unsigned int state) cris_freq_target() argument 50 static int cris_freq_cpu_init(struct cpufreq_policy *policy) cris_freq_cpu_init() argument 52 return cpufreq_generic_init(policy, cris_freq_table, 1000000); cris_freq_cpu_init()
|
H A D | p4-clockmod.c | 108 static int cpufreq_p4_target(struct cpufreq_policy *policy, unsigned int index) cpufreq_p4_target() argument 116 for_each_cpu(i, policy->cpus) cpufreq_p4_target() 168 static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy) cpufreq_p4_cpu_init() argument 170 struct cpuinfo_x86 *c = &cpu_data(policy->cpu); cpufreq_p4_cpu_init() 175 cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu)); cpufreq_p4_cpu_init() 185 has_N44_O17_errata[policy->cpu] = 1; cpufreq_p4_cpu_init() 192 cpufreq_p4_setdc(policy->cpu, DC_DISABLE); cpufreq_p4_cpu_init() 202 if ((i < 2) && (has_N44_O17_errata[policy->cpu])) cpufreq_p4_cpu_init() 208 /* cpuinfo and default policy values */ cpufreq_p4_cpu_init() 212 policy->cpuinfo.transition_latency = 10000001; cpufreq_p4_cpu_init() 214 return cpufreq_table_validate_and_show(policy, &p4clockmod_table[0]); cpufreq_p4_cpu_init()
|
H A D | arm_big_little.c | 193 static int bL_cpufreq_set_target(struct cpufreq_policy *policy, bL_cpufreq_set_target() argument 196 u32 cpu = policy->cpu, cur_cluster, new_cluster, actual_cluster; bL_cpufreq_set_target() 429 static int bL_cpufreq_init(struct cpufreq_policy *policy) bL_cpufreq_init() argument 431 u32 cur_cluster = cpu_to_cluster(policy->cpu); bL_cpufreq_init() 435 cpu_dev = get_cpu_device(policy->cpu); bL_cpufreq_init() 438 policy->cpu); bL_cpufreq_init() 446 ret = cpufreq_table_validate_and_show(policy, freq_table[cur_cluster]); bL_cpufreq_init() 449 policy->cpu, cur_cluster); bL_cpufreq_init() 457 cpumask_copy(policy->cpus, topology_core_cpumask(policy->cpu)); bL_cpufreq_init() 459 for_each_cpu(cpu, policy->cpus) bL_cpufreq_init() 463 per_cpu(physical_cluster, policy->cpu) = A15_CLUSTER; bL_cpufreq_init() 467 policy->cpuinfo.transition_latency = bL_cpufreq_init() 470 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; bL_cpufreq_init() 473 per_cpu(cpu_last_req_freq, policy->cpu) = clk_get_cpu_rate(policy->cpu); bL_cpufreq_init() 475 dev_info(cpu_dev, "%s: CPU %d initialized\n", __func__, policy->cpu); bL_cpufreq_init() 479 static int bL_cpufreq_exit(struct cpufreq_policy *policy) bL_cpufreq_exit() argument 483 cpu_dev = get_cpu_device(policy->cpu); bL_cpufreq_exit() 486 policy->cpu); bL_cpufreq_exit() 491 dev_dbg(cpu_dev, "%s: Exited, cpu: %d\n", __func__, policy->cpu); bL_cpufreq_exit()
|
H A D | ppc_cbe_cpufreq_pmi.c | 96 struct cpufreq_policy *policy = data; pmi_notifier() local 101 * and CPUFREQ_NOTIFY policy events?) pmi_notifier() 106 cbe_freqs = cpufreq_frequency_get_table(policy->cpu); pmi_notifier() 107 node = cbe_cpu_to_node(policy->cpu); pmi_notifier() 115 cpufreq_verify_within_limits(policy, 0, pmi_notifier()
|
H A D | intel_pstate.c | 697 * policy, or by cpu specific default values determined through intel_pstate_get_min_max() 885 #define ICPU(model, policy) \ 887 (unsigned long)&policy } 961 static int intel_pstate_set_policy(struct cpufreq_policy *policy) intel_pstate_set_policy() argument 963 if (!policy->cpuinfo.max_freq) intel_pstate_set_policy() 966 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE && intel_pstate_set_policy() 967 policy->max >= policy->cpuinfo.max_freq) { intel_pstate_set_policy() 978 limits.min_policy_pct = (policy->min * 100) / policy->cpuinfo.max_freq; intel_pstate_set_policy() 983 limits.max_policy_pct = (policy->max * 100) / policy->cpuinfo.max_freq; intel_pstate_set_policy() 994 static int intel_pstate_verify_policy(struct cpufreq_policy *policy) intel_pstate_verify_policy() argument 996 cpufreq_verify_within_cpu_limits(policy); intel_pstate_verify_policy() 998 if (policy->policy != CPUFREQ_POLICY_POWERSAVE && intel_pstate_verify_policy() 999 policy->policy != CPUFREQ_POLICY_PERFORMANCE) intel_pstate_verify_policy() 1005 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy) intel_pstate_stop_cpu() argument 1007 int cpu_num = policy->cpu; intel_pstate_stop_cpu() 1019 static int intel_pstate_cpu_init(struct cpufreq_policy *policy) intel_pstate_cpu_init() argument 1024 rc = intel_pstate_init_cpu(policy->cpu); intel_pstate_cpu_init() 1028 cpu = all_cpu_data[policy->cpu]; intel_pstate_cpu_init() 1031 policy->policy = CPUFREQ_POLICY_PERFORMANCE; intel_pstate_cpu_init() 1033 policy->policy = CPUFREQ_POLICY_POWERSAVE; intel_pstate_cpu_init() 1035 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling; intel_pstate_cpu_init() 1036 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling; intel_pstate_cpu_init() 1038 /* cpuinfo and default policy values */ intel_pstate_cpu_init() 1039 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling; intel_pstate_cpu_init() 1041 policy->cpuinfo.max_freq = limits.turbo_disabled ? intel_pstate_cpu_init() 1043 policy->cpuinfo.max_freq *= cpu->pstate.scaling; intel_pstate_cpu_init() 1045 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; intel_pstate_cpu_init() 1046 cpumask_set_cpu(policy->cpu, policy->cpus); intel_pstate_cpu_init() 1076 static void copy_pid_params(struct pstate_adjust_policy *policy) copy_pid_params() argument 1078 pid_params.sample_rate_ms = policy->sample_rate_ms; copy_pid_params() 1079 pid_params.p_gain_pct = policy->p_gain_pct; copy_pid_params() 1080 pid_params.i_gain_pct = policy->i_gain_pct; copy_pid_params() 1081 pid_params.d_gain_pct = policy->d_gain_pct; copy_pid_params() 1082 pid_params.deadband = policy->deadband; copy_pid_params() 1083 pid_params.setpoint = policy->setpoint; copy_pid_params()
|
H A D | powernow-k6.c | 139 static int powernow_k6_target(struct cpufreq_policy *policy, powernow_k6_target() argument 153 static int powernow_k6_cpu_init(struct cpufreq_policy *policy) powernow_k6_cpu_init() argument 159 if (policy->cpu != 0) powernow_k6_cpu_init() 213 /* cpuinfo and default policy values */ 214 policy->cpuinfo.transition_latency = 500000; 216 return cpufreq_table_validate_and_show(policy, clock_ratio); 220 static int powernow_k6_cpu_exit(struct cpufreq_policy *policy) powernow_k6_cpu_exit() argument 228 freqs.old = policy->cur; powernow_k6_cpu_exit() 232 cpufreq_freq_transition_begin(policy, &freqs); powernow_k6_cpu_exit() 233 powernow_k6_target(policy, i); powernow_k6_cpu_exit() 234 cpufreq_freq_transition_end(policy, &freqs, 0); powernow_k6_cpu_exit()
|
H A D | pcc-cpufreq.c | 112 static int pcc_cpufreq_verify(struct cpufreq_policy *policy) pcc_cpufreq_verify() argument 114 cpufreq_verify_within_cpu_limits(policy); pcc_cpufreq_verify() 197 static int pcc_cpufreq_target(struct cpufreq_policy *policy, pcc_cpufreq_target() argument 207 cpu = policy->cpu; pcc_cpufreq_target() 215 freqs.old = policy->cur; pcc_cpufreq_target() 217 cpufreq_freq_transition_begin(policy, &freqs); pcc_cpufreq_target() 234 cpufreq_freq_transition_end(policy, &freqs, status != CMD_COMPLETE); pcc_cpufreq_target() 537 static int pcc_cpufreq_cpu_init(struct cpufreq_policy *policy) pcc_cpufreq_cpu_init() argument 539 unsigned int cpu = policy->cpu; pcc_cpufreq_cpu_init() 553 policy->max = policy->cpuinfo.max_freq = pcc_cpufreq_cpu_init() 555 policy->min = policy->cpuinfo.min_freq = pcc_cpufreq_cpu_init() 558 pr_debug("init: policy->max is %d, policy->min is %d\n", pcc_cpufreq_cpu_init() 559 policy->max, policy->min); pcc_cpufreq_cpu_init() 564 static int pcc_cpufreq_cpu_exit(struct cpufreq_policy *policy) pcc_cpufreq_cpu_exit() argument
|
H A D | pasemi-cpufreq.c | 137 static int pas_cpufreq_cpu_init(struct cpufreq_policy *policy) pas_cpufreq_cpu_init() argument 147 cpu = of_get_cpu_node(policy->cpu, NULL); pas_cpufreq_cpu_init() 186 pr_debug("init cpufreq on CPU %d\n", policy->cpu); pas_cpufreq_cpu_init() 206 cur_astate = get_cur_astate(policy->cpu); 209 policy->cur = pas_freqs[cur_astate].frequency; 210 ppc_proc_freq = policy->cur * 1000ul; 212 return cpufreq_generic_init(policy, pas_freqs, get_gizmo_latency()); 223 static int pas_cpufreq_cpu_exit(struct cpufreq_policy *policy) pas_cpufreq_cpu_exit() argument 240 static int pas_cpufreq_target(struct cpufreq_policy *policy, pas_cpufreq_target() argument 246 policy->cpu, pas_cpufreq_target()
|
H A D | speedstep-smi.c | 216 * speedstep_target - set a new CPUFreq policy 217 * @policy: new policy 220 * Sets a new CPUFreq policy/freq. 222 static int speedstep_target(struct cpufreq_policy *policy, unsigned int index) speedstep_target() argument 230 static int speedstep_cpu_init(struct cpufreq_policy *policy) speedstep_cpu_init() argument 236 if (policy->cpu != 0) speedstep_cpu_init() 268 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; speedstep_cpu_init() 269 return cpufreq_table_validate_and_show(policy, speedstep_freqs); speedstep_cpu_init() 280 static int speedstep_resume(struct cpufreq_policy *policy) speedstep_resume() argument
|
H A D | e_powersaver.c | 78 static int eps_acpi_exit(struct cpufreq_policy *policy) eps_acpi_exit() argument 107 struct cpufreq_policy *policy, eps_set_state() 154 static int eps_target(struct cpufreq_policy *policy, unsigned int index) eps_target() argument 157 unsigned int cpu = policy->cpu; eps_target() 167 ret = eps_set_state(centaur, policy, dest_state); eps_target() 173 static int eps_cpu_init(struct cpufreq_policy *policy) eps_cpu_init() argument 193 if (policy->cpu != 0) eps_cpu_init() 299 if (!acpi_processor_get_bios_limit(policy->cpu, &limit)) { eps_cpu_init() 303 eps_acpi_exit(policy); eps_cpu_init() 369 policy->cpuinfo.transition_latency = 140000; /* 844mV -> 700mV in ns */ eps_cpu_init() 371 ret = cpufreq_table_validate_and_show(policy, ¢aur->freq_table[0]); eps_cpu_init() 380 static int eps_cpu_exit(struct cpufreq_policy *policy) eps_cpu_exit() argument 382 unsigned int cpu = policy->cpu; eps_cpu_exit() 106 eps_set_state(struct eps_cpu_data *centaur, struct cpufreq_policy *policy, u32 dest_state) eps_set_state() argument
|
H A D | blackfin-cpufreq.c | 130 static int bfin_target(struct cpufreq_policy *policy, unsigned int index) bfin_target() argument 151 ret = cpu_set_cclk(policy->cpu, new_freq * 1000); bfin_target() 177 static int __bfin_cpu_init(struct cpufreq_policy *policy) __bfin_cpu_init() argument 185 if (policy->cpu == CPUFREQ_CPU) __bfin_cpu_init() 188 policy->cpuinfo.transition_latency = 50000; /* 50us assumed */ __bfin_cpu_init() 190 return cpufreq_table_validate_and_show(policy, bfin_freq_table); __bfin_cpu_init()
|
H A D | pxa2xx-cpufreq.c | 86 /* Use the run mode frequencies for the CPUFREQ_POLICY_PERFORMANCE policy */ 100 /* Use the turbo mode frequencies for the CPUFREQ_POLICY_POWERSAVE policy */ 270 static int pxa_set_target(struct cpufreq_policy *policy, unsigned int idx) pxa_set_target() argument 279 /* Get the current policy */ pxa_set_target() 290 if (vcc_core && new_freq_cpu > policy->cur) { pxa_set_target() 353 if (vcc_core && new_freq_cpu < policy->cur) pxa_set_target() 359 static int pxa_cpufreq_init(struct cpufreq_policy *policy) pxa_cpufreq_init() argument 374 /* set default policy and cpuinfo */ pxa_cpufreq_init() 375 policy->cpuinfo.transition_latency = 1000; /* FIXME: 1 ms, assumed */ pxa_cpufreq_init() 406 * Set the policy's minimum and maximum frequencies from the tables pxa_cpufreq_init() 414 cpufreq_table_validate_and_show(policy, pxa255_freq_table); pxa_cpufreq_init() 417 cpufreq_table_validate_and_show(policy, pxa27x_freq_table); pxa_cpufreq_init()
|
H A D | sparc-us2e-cpufreq.c | 248 static int us2e_freq_target(struct cpufreq_policy *policy, unsigned int index) us2e_freq_target() argument 250 unsigned int cpu = policy->cpu; us2e_freq_target() 276 static int __init us2e_freq_cpu_init(struct cpufreq_policy *policy) us2e_freq_cpu_init() argument 278 unsigned int cpu = policy->cpu; us2e_freq_cpu_init() 296 policy->cpuinfo.transition_latency = 0; us2e_freq_cpu_init() 297 policy->cur = clock_tick; us2e_freq_cpu_init() 299 return cpufreq_table_validate_and_show(policy, table); us2e_freq_cpu_init() 302 static int us2e_freq_cpu_exit(struct cpufreq_policy *policy) us2e_freq_cpu_exit() argument 305 us2e_freq_target(policy, 0); us2e_freq_cpu_exit()
|
H A D | exynos5440-cpufreq.c | 211 static int exynos_target(struct cpufreq_policy *policy, unsigned int index) exynos_target() argument 219 freqs.old = policy->cur; exynos_target() 222 cpufreq_freq_transition_begin(policy, &freqs); exynos_target() 225 for_each_cpu(i, policy->cpus) { exynos_target() 239 struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */ exynos_cpufreq_work() local 247 freqs.old = policy->cur; exynos_cpufreq_work() 261 cpufreq_freq_transition_end(policy, &freqs, 0); exynos_cpufreq_work() 263 cpufreq_cpu_put(policy); exynos_cpufreq_work() 301 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy) exynos_cpufreq_cpu_init() argument 303 policy->clk = dvfs_info->cpu_clk; exynos_cpufreq_cpu_init() 304 return cpufreq_generic_init(policy, dvfs_info->freq_table, exynos_cpufreq_cpu_init()
|
H A D | powernv-cpufreq.c | 159 static ssize_t cpuinfo_nominal_freq_show(struct cpufreq_policy *policy, cpuinfo_nominal_freq_show() argument 342 * mask policy->cpus 344 static int powernv_cpufreq_target_index(struct cpufreq_policy *policy, powernv_cpufreq_target_index() argument 360 * if current CPU is within policy->cpus (core) powernv_cpufreq_target_index() 362 smp_call_function_any(policy->cpus, set_pstate, &freq_data, 1); powernv_cpufreq_target_index() 367 static int powernv_cpufreq_cpu_init(struct cpufreq_policy *policy) powernv_cpufreq_cpu_init() argument 371 base = cpu_first_thread_sibling(policy->cpu); powernv_cpufreq_cpu_init() 374 cpumask_set_cpu(base + i, policy->cpus); powernv_cpufreq_cpu_init() 376 return cpufreq_table_validate_and_show(policy, powernv_freqs); powernv_cpufreq_cpu_init() 398 static void powernv_cpufreq_stop_cpu(struct cpufreq_policy *policy) powernv_cpufreq_stop_cpu() argument 403 smp_call_function_single(policy->cpu, set_pstate, &freq_data, 1); powernv_cpufreq_stop_cpu()
|
H A D | elanfreq.c | 108 static int elanfreq_target(struct cpufreq_policy *policy, elanfreq_target() argument 147 static int elanfreq_cpu_init(struct cpufreq_policy *policy) elanfreq_cpu_init() argument 166 /* cpuinfo and default policy values */ elanfreq_cpu_init() 167 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; elanfreq_cpu_init() 169 return cpufreq_table_validate_and_show(policy, elanfreq_table); elanfreq_cpu_init()
|
H A D | s3c24xx-cpufreq.c | 153 static int s3c_cpufreq_settarget(struct cpufreq_policy *policy, s3c_cpufreq_settarget() argument 215 cpufreq_freq_transition_begin(policy, &freqs.freqs); s3c_cpufreq_settarget() 259 cpufreq_freq_transition_end(policy, &freqs.freqs, 0); s3c_cpufreq_settarget() 275 static int s3c_cpufreq_target(struct cpufreq_policy *policy, s3c_cpufreq_target() argument 290 s3c_freq_dbg("%s: policy %p, target %u, relation %u\n", s3c_cpufreq_target() 291 __func__, policy, target_freq, relation); s3c_cpufreq_target() 294 if (cpufreq_frequency_table_target(policy, ftab, s3c_cpufreq_target() 321 tmp_policy.min = policy->min * 1000; s3c_cpufreq_target() 322 tmp_policy.max = policy->max * 1000; s3c_cpufreq_target() 323 tmp_policy.cpu = policy->cpu; s3c_cpufreq_target() 346 return s3c_cpufreq_settarget(policy, target_freq, pll); s3c_cpufreq_target() 364 static int s3c_cpufreq_init(struct cpufreq_policy *policy) s3c_cpufreq_init() argument 366 policy->clk = clk_arm; s3c_cpufreq_init() 367 return cpufreq_generic_init(policy, ftab, cpu_cur.info->latency); s3c_cpufreq_init() 398 static int s3c_cpufreq_suspend(struct cpufreq_policy *policy) s3c_cpufreq_suspend() argument 407 static int s3c_cpufreq_resume(struct cpufreq_policy *policy) s3c_cpufreq_resume() argument 411 s3c_freq_dbg("%s: resuming with policy %p\n", __func__, policy); s3c_cpufreq_resume()
|
H A D | s5pv210-cpufreq.c | 224 static int s5pv210_target(struct cpufreq_policy *policy, unsigned int index) s5pv210_target() argument 243 old_freq = policy->cur; s5pv210_target() 247 if (cpufreq_frequency_table_target(policy, s5pv210_freq_table, s5pv210_target() 504 static int s5pv210_cpu_init(struct cpufreq_policy *policy) s5pv210_cpu_init() argument 509 policy->clk = clk_get(NULL, "armclk"); s5pv210_cpu_init() 510 if (IS_ERR(policy->clk)) s5pv210_cpu_init() 511 return PTR_ERR(policy->clk); s5pv210_cpu_init() 525 if (policy->cpu != 0) { s5pv210_cpu_init() 549 policy->suspend_freq = SLEEP_FREQ; s5pv210_cpu_init() 550 return cpufreq_generic_init(policy, s5pv210_freq_table, 40000); s5pv210_cpu_init() 555 clk_put(policy->clk); s5pv210_cpu_init()
|
H A D | kirkwood-cpufreq.c | 54 static int kirkwood_cpufreq_target(struct cpufreq_policy *policy, kirkwood_cpufreq_target() argument 90 static int kirkwood_cpufreq_cpu_init(struct cpufreq_policy *policy) kirkwood_cpufreq_cpu_init() argument 92 return cpufreq_generic_init(policy, kirkwood_freq_table, 5000); kirkwood_cpufreq_cpu_init()
|
H A D | s3c2416-cpufreq.c | 219 static int s3c2416_cpufreq_set_target(struct cpufreq_policy *policy, s3c2416_cpufreq_set_target() argument 336 static int s3c2416_cpufreq_driver_init(struct cpufreq_policy *policy) s3c2416_cpufreq_driver_init() argument 344 if (policy->cpu != 0) s3c2416_cpufreq_driver_init() 364 policy->cpuinfo.max_freq = 400000; s3c2416_cpufreq_driver_init() 369 policy->cpuinfo.max_freq = 534000; s3c2416_cpufreq_driver_init() 454 ret = cpufreq_generic_init(policy, s3c_freq->freq_table, s3c2416_cpufreq_driver_init()
|
H A D | imx6q-cpufreq.c | 41 static int imx6q_set_target(struct cpufreq_policy *policy, unsigned int index) imx6q_set_target() argument 139 static int imx6q_cpufreq_init(struct cpufreq_policy *policy) imx6q_cpufreq_init() argument 141 policy->clk = arm_clk; imx6q_cpufreq_init() 142 return cpufreq_generic_init(policy, freq_table, transition_latency); imx6q_cpufreq_init()
|
H A D | sa1110-cpufreq.c | 232 static int sa1110_target(struct cpufreq_policy *policy, unsigned int ppcr) sa1110_target() argument 247 if (policy->max < 147500) { sa1110_target() 307 static int __init sa1110_cpu_init(struct cpufreq_policy *policy) sa1110_cpu_init() argument 309 return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); sa1110_cpu_init()
|
H A D | spear-cpufreq.c | 103 static int spear_cpufreq_target(struct cpufreq_policy *policy, spear_cpufreq_target() argument 153 static int spear_cpufreq_init(struct cpufreq_policy *policy) spear_cpufreq_init() argument 155 policy->clk = spear_cpufreq.clk; spear_cpufreq_init() 156 return cpufreq_generic_init(policy, spear_cpufreq.freq_tbl, spear_cpufreq_init()
|
H A D | pmac32-cpufreq.c | 334 static int do_set_cpu_speed(struct cpufreq_policy *policy, int speed_mode) do_set_cpu_speed() argument 364 static int pmac_cpufreq_target( struct cpufreq_policy *policy, pmac_cpufreq_target() argument 369 rc = do_set_cpu_speed(policy, index); pmac_cpufreq_target() 375 static int pmac_cpufreq_cpu_init(struct cpufreq_policy *policy) pmac_cpufreq_cpu_init() argument 377 return cpufreq_generic_init(policy, pmac_cpu_freqs, transition_latency); pmac_cpufreq_cpu_init() 399 static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) pmac_cpufreq_suspend() argument 411 do_set_cpu_speed(policy, CPUFREQ_HIGH); pmac_cpufreq_suspend() 415 static int pmac_cpufreq_resume(struct cpufreq_policy *policy) pmac_cpufreq_resume() argument 427 do_set_cpu_speed(policy, sleep_freq == low_freq ? pmac_cpufreq_resume()
|
H A D | longhaul.c | 245 static int longhaul_setstate(struct cpufreq_policy *policy, longhaul_setstate() argument 629 static int longhaul_target(struct cpufreq_policy *policy, longhaul_target() argument 638 retval = longhaul_setstate(policy, table_index); longhaul_target() 653 retval = longhaul_setstate(policy, i); longhaul_target() 662 retval = longhaul_setstate(policy, table_index); longhaul_target() 771 static int longhaul_cpu_init(struct cpufreq_policy *policy) longhaul_cpu_init() argument 909 policy->cpuinfo.transition_latency = 200000; /* nsec */ longhaul_cpu_init() 911 return cpufreq_table_validate_and_show(policy, longhaul_table); longhaul_cpu_init() 969 struct cpufreq_policy *policy = cpufreq_cpu_get(0); longhaul_exit() local 976 freqs.old = policy->cur; longhaul_exit() 980 cpufreq_freq_transition_begin(policy, &freqs); longhaul_exit() 981 longhaul_setstate(policy, i); longhaul_exit() 982 cpufreq_freq_transition_end(policy, &freqs, 0); longhaul_exit() 987 cpufreq_cpu_put(policy); longhaul_exit()
|
H A D | powernow-k7.c | 250 static int powernow_target(struct cpufreq_policy *policy, unsigned int index) powernow_target() argument 605 static int powernow_cpu_init(struct cpufreq_policy *policy) powernow_cpu_init() argument 610 if (policy->cpu != 0) powernow_cpu_init() 654 policy->cpuinfo.transition_latency = powernow_cpu_init() 657 return cpufreq_table_validate_and_show(policy, powernow_table); powernow_cpu_init() 660 static int powernow_cpu_exit(struct cpufreq_policy *policy) powernow_cpu_exit() argument
|
H A D | maple-cpufreq.c | 131 static int maple_cpufreq_target(struct cpufreq_policy *policy, maple_cpufreq_target() argument 142 static int maple_cpufreq_cpu_init(struct cpufreq_policy *policy) maple_cpufreq_cpu_init() argument 144 return cpufreq_generic_init(policy, maple_cpu_freqs, 12000); maple_cpufreq_cpu_init()
|
H A D | sa1100-cpufreq.c | 180 static int sa1100_target(struct cpufreq_policy *policy, unsigned int ppcr) sa1100_target() argument 198 static int __init sa1100_cpu_init(struct cpufreq_policy *policy) sa1100_cpu_init() argument 200 return cpufreq_generic_init(policy, sa11x0_freq_table, CPUFREQ_ETERNAL); sa1100_cpu_init()
|
/linux-4.1.27/scripts/selinux/mdp/ |
H A D | Makefile | 5 clean-files := policy.* file_contexts
|
H A D | mdp.c | 3 * mdp - make dummy policy 5 * When pointed at a kernel tree, builds a dummy policy for that kernel 139 printf("Wrote policy, but cannot open %s for writing\n", ctxout); main()
|
/linux-4.1.27/drivers/staging/lustre/lustre/ptlrpc/ |
H A D | nrs.c | 59 static int nrs_policy_init(struct ptlrpc_nrs_policy *policy) nrs_policy_init() argument 61 return policy->pol_desc->pd_ops->op_policy_init != NULL ? nrs_policy_init() 62 policy->pol_desc->pd_ops->op_policy_init(policy) : 0; nrs_policy_init() 65 static void nrs_policy_fini(struct ptlrpc_nrs_policy *policy) nrs_policy_fini() argument 67 LASSERT(policy->pol_ref == 0); nrs_policy_fini() 68 LASSERT(policy->pol_req_queued == 0); nrs_policy_fini() 70 if (policy->pol_desc->pd_ops->op_policy_fini != NULL) nrs_policy_fini() 71 policy->pol_desc->pd_ops->op_policy_fini(policy); nrs_policy_fini() 74 static int nrs_policy_ctl_locked(struct ptlrpc_nrs_policy *policy, nrs_policy_ctl_locked() argument 78 * The policy may be stopped, but the lprocfs files and nrs_policy_ctl_locked() 80 * Do not perform the ctl operation if the policy is stopped, as nrs_policy_ctl_locked() 81 * policy->pol_private will be NULL in such a case. nrs_policy_ctl_locked() 83 if (policy->pol_state == NRS_POL_STATE_STOPPED) nrs_policy_ctl_locked() 86 return policy->pol_desc->pd_ops->op_policy_ctl != NULL ? nrs_policy_ctl_locked() 87 policy->pol_desc->pd_ops->op_policy_ctl(policy, opc, arg) : nrs_policy_ctl_locked() 91 static void nrs_policy_stop0(struct ptlrpc_nrs_policy *policy) nrs_policy_stop0() argument 93 struct ptlrpc_nrs *nrs = policy->pol_nrs; nrs_policy_stop0() 95 if (policy->pol_desc->pd_ops->op_policy_stop != NULL) { nrs_policy_stop0() 98 policy->pol_desc->pd_ops->op_policy_stop(policy); nrs_policy_stop0() 103 LASSERT(list_empty(&policy->pol_list_queued)); nrs_policy_stop0() 104 LASSERT(policy->pol_req_queued == 0 && nrs_policy_stop0() 105 policy->pol_req_started == 0); nrs_policy_stop0() 107 policy->pol_private = NULL; nrs_policy_stop0() 109 policy->pol_state = NRS_POL_STATE_STOPPED; nrs_policy_stop0() 111 if (atomic_dec_and_test(&policy->pol_desc->pd_refs)) nrs_policy_stop0() 112 module_put(policy->pol_desc->pd_owner); nrs_policy_stop0() 115 static int nrs_policy_stop_locked(struct ptlrpc_nrs_policy *policy) nrs_policy_stop_locked() argument 117 struct ptlrpc_nrs *nrs = policy->pol_nrs; nrs_policy_stop_locked() 119 if (nrs->nrs_policy_fallback == policy && !nrs->nrs_stopping) nrs_policy_stop_locked() 122 if (policy->pol_state == NRS_POL_STATE_STARTING) nrs_policy_stop_locked() 126 if (policy->pol_state != NRS_POL_STATE_STARTED) nrs_policy_stop_locked() 129 policy->pol_state = NRS_POL_STATE_STOPPING; nrs_policy_stop_locked() 132 if (nrs->nrs_policy_primary == policy) { nrs_policy_stop_locked() 136 LASSERT(nrs->nrs_policy_fallback == policy); nrs_policy_stop_locked() 141 if (policy->pol_ref == 1) nrs_policy_stop_locked() 142 nrs_policy_stop0(policy); nrs_policy_stop_locked() 148 * Transitions the \a nrs NRS head's primary policy to 149 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPING and if the policy has no 171 * Transitions a policy across the ptlrpc_nrs_pol_state range of values, in 172 * response to an lprocfs command to start a policy. 174 * If a primary policy different to the current one is specified, this function 175 * will transition the new policy to the 178 * the old primary policy (if there is one) to 180 * references on the policy to ptlrpc_nrs_pol_stae::NRS_POL_STATE_STOPPED. 182 * If the fallback policy is specified, this is taken to indicate an instruction 183 * to stop the current primary policy, without substituting it with another 184 * primary policy, so the primary policy (if any) is transitioned to 186 * references on the policy to ptlrpc_nrs_pol_stae::NRS_POL_STATE_STOPPED. In 187 * this case, the fallback policy is only left active in the NRS head. 189 static int nrs_policy_start_locked(struct ptlrpc_nrs_policy *policy) nrs_policy_start_locked() argument 191 struct ptlrpc_nrs *nrs = policy->pol_nrs; nrs_policy_start_locked() 201 LASSERT(policy->pol_state != NRS_POL_STATE_STARTING); nrs_policy_start_locked() 203 if (policy->pol_state == NRS_POL_STATE_STOPPING) nrs_policy_start_locked() 206 if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) { nrs_policy_start_locked() 208 * This is for cases in which the user sets the policy to the nrs_policy_start_locked() 209 * fallback policy (currently fifo for all services); i.e. the nrs_policy_start_locked() 210 * user is resetting the policy to the default; so we stop the nrs_policy_start_locked() 211 * primary policy, if any. nrs_policy_start_locked() 213 if (policy == nrs->nrs_policy_fallback) { nrs_policy_start_locked() 219 * If we reach here, we must be setting up the fallback policy nrs_policy_start_locked() 220 * at service startup time, and only a single policy with the nrs_policy_start_locked() 227 * Shouldn't start primary policy if w/o fallback policy. nrs_policy_start_locked() 232 if (policy->pol_state == NRS_POL_STATE_STARTED) nrs_policy_start_locked() 240 if (atomic_inc_return(&policy->pol_desc->pd_refs) == 1 && nrs_policy_start_locked() 241 !try_module_get(policy->pol_desc->pd_owner)) { nrs_policy_start_locked() 242 atomic_dec(&policy->pol_desc->pd_refs); nrs_policy_start_locked() 243 CERROR("NRS: cannot get module for policy %s; is it alive?\n", nrs_policy_start_locked() 244 policy->pol_desc->pd_name); nrs_policy_start_locked() 249 * Serialize policy starting across the NRS head nrs_policy_start_locked() 253 policy->pol_state = NRS_POL_STATE_STARTING; nrs_policy_start_locked() 255 if (policy->pol_desc->pd_ops->op_policy_start) { nrs_policy_start_locked() 258 rc = policy->pol_desc->pd_ops->op_policy_start(policy); nrs_policy_start_locked() 262 if (atomic_dec_and_test(&policy->pol_desc->pd_refs)) nrs_policy_start_locked() 263 module_put(policy->pol_desc->pd_owner); nrs_policy_start_locked() 265 policy->pol_state = NRS_POL_STATE_STOPPED; nrs_policy_start_locked() 270 policy->pol_state = NRS_POL_STATE_STARTED; nrs_policy_start_locked() 272 if (policy->pol_flags & PTLRPC_NRS_FL_FALLBACK) { nrs_policy_start_locked() 276 nrs->nrs_policy_fallback = policy; nrs_policy_start_locked() 279 * Try to stop the current primary policy if there is one. nrs_policy_start_locked() 284 * And set the newly-started policy as the primary one. nrs_policy_start_locked() 286 nrs->nrs_policy_primary = policy; nrs_policy_start_locked() 296 * Increases the policy's usage reference count. 298 static inline void nrs_policy_get_locked(struct ptlrpc_nrs_policy *policy) nrs_policy_get_locked() argument 300 policy->pol_ref++; nrs_policy_get_locked() 304 * Decreases the policy's usage reference count, and stops the policy in case it 309 static void nrs_policy_put_locked(struct ptlrpc_nrs_policy *policy) nrs_policy_put_locked() argument 311 LASSERT(policy->pol_ref > 0); nrs_policy_put_locked() 313 policy->pol_ref--; nrs_policy_put_locked() 314 if (unlikely(policy->pol_ref == 0 && nrs_policy_put_locked() 315 policy->pol_state == NRS_POL_STATE_STOPPING)) nrs_policy_put_locked() 316 nrs_policy_stop0(policy); nrs_policy_put_locked() 319 static void nrs_policy_put(struct ptlrpc_nrs_policy *policy) nrs_policy_put() argument 321 spin_lock(&policy->pol_nrs->nrs_lock); nrs_policy_put() 322 nrs_policy_put_locked(policy); nrs_policy_put() 323 spin_unlock(&policy->pol_nrs->nrs_lock); nrs_policy_put() 327 * Find and return a policy by name. 346 * policy instance resource. 350 struct ptlrpc_nrs_policy *policy = res->res_policy; nrs_resource_put() local 352 if (policy->pol_desc->pd_ops->op_res_put != NULL) { nrs_resource_put() 357 policy->pol_desc->pd_ops->op_res_put(policy, res); nrs_resource_put() 364 * \a nrq if it is to be handled by \a policy. 366 * \param[in] policy the policy 379 struct ptlrpc_nrs_resource *nrs_resource_get(struct ptlrpc_nrs_policy *policy, nrs_resource_get() argument 391 rc = policy->pol_desc->pd_ops->op_res_get(policy, nrq, res, nrs_resource_get() 401 tmp->res_policy = policy; nrs_resource_get() 414 * Obtains resources for the resource hierarchies and policy references for 415 * the fallback and current primary policy (if any), that will later be used 423 * request from a policy on the regular NRS head to a 424 * policy on the HP NRS head (via 441 * Obtain policy references. nrs_resource_get_safe() 464 * A primary policy may exist which may not wish to serve a nrs_resource_get_safe() 466 * reference on the policy as it will not be used for this nrs_resource_get_safe() 516 * Obtains an NRS request from \a policy for handling or examination; the 519 * Calling into this function implies we already know the policy has a request 522 * \param[in] policy the policy from which a request 525 * from the policy. 526 * \param[in] force when set, it will force a policy to return a request if it 532 struct ptlrpc_nrs_request *nrs_request_get(struct ptlrpc_nrs_policy *policy, nrs_request_get() argument 537 LASSERT(policy->pol_req_queued > 0); nrs_request_get() 539 nrq = policy->pol_desc->pd_ops->op_req_get(policy, peek, force); nrs_request_get() 541 LASSERT(ergo(nrq != NULL, nrs_request_policy(nrq) == policy)); nrs_request_get() 549 * function attempts to enqueue the request first on the primary policy 558 struct ptlrpc_nrs_policy *policy; nrs_request_enqueue() local 563 * Try in descending order, because the primary policy (if any) is nrs_request_enqueue() 571 policy = nrq->nr_res_ptrs[i]->res_policy; nrs_request_enqueue() 573 rc = policy->pol_desc->pd_ops->op_req_enqueue(policy, nrq); nrs_request_enqueue() 575 policy->pol_nrs->nrs_req_queued++; nrs_request_enqueue() 576 policy->pol_req_queued++; nrs_request_enqueue() 581 * Should never get here, as at least the primary policy's nrs_request_enqueue() 598 struct ptlrpc_nrs_policy *policy = nrs_request_policy(nrq); nrs_request_stop() local 600 if (policy->pol_desc->pd_ops->op_req_stop) nrs_request_stop() 601 policy->pol_desc->pd_ops->op_req_stop(policy, nrq); nrs_request_stop() 603 LASSERT(policy->pol_nrs->nrs_req_started > 0); nrs_request_stop() 604 LASSERT(policy->pol_req_started > 0); nrs_request_stop() 606 policy->pol_nrs->nrs_req_started--; nrs_request_stop() 607 policy->pol_req_started--; nrs_request_stop() 613 * Handles opcodes that are common to all policy types within NRS core, and 614 * passes any unknown opcodes to the policy-specific control function. 616 * \param[in] nrs the NRS head this policy belongs to. 617 * \param[in] name the human-readable policy name; should be the same as 622 * the policy at some level, or generic policy status 631 struct ptlrpc_nrs_policy *policy; nrs_policy_ctl() local 636 policy = nrs_policy_find_locked(nrs, name); nrs_policy_ctl() 637 if (policy == NULL) { nrs_policy_ctl() 644 * Unknown opcode, pass it down to the policy-specific control nrs_policy_ctl() 648 rc = nrs_policy_ctl_locked(policy, opc, arg); nrs_policy_ctl() 652 * Start \e policy nrs_policy_ctl() 655 rc = nrs_policy_start_locked(policy); nrs_policy_ctl() 659 if (policy != NULL) nrs_policy_ctl() 660 nrs_policy_put_locked(policy); nrs_policy_ctl() 668 * Unregisters a policy by name. 670 * \param[in] nrs the NRS head this policy belongs to. 671 * \param[in] name the human-readable policy name; should be the same as 679 struct ptlrpc_nrs_policy *policy = NULL; nrs_policy_unregister() local 683 policy = nrs_policy_find_locked(nrs, name); nrs_policy_unregister() 684 if (policy == NULL) { nrs_policy_unregister() 687 CERROR("Can't find NRS policy %s\n", name); nrs_policy_unregister() 691 if (policy->pol_ref > 1) { nrs_policy_unregister() 693 (int)policy->pol_ref); nrs_policy_unregister() 694 nrs_policy_put_locked(policy); nrs_policy_unregister() 700 LASSERT(policy->pol_req_queued == 0); nrs_policy_unregister() 701 LASSERT(policy->pol_req_started == 0); nrs_policy_unregister() 703 if (policy->pol_state != NRS_POL_STATE_STOPPED) { nrs_policy_unregister() 704 nrs_policy_stop_locked(policy); nrs_policy_unregister() 705 LASSERT(policy->pol_state == NRS_POL_STATE_STOPPED); nrs_policy_unregister() 708 list_del(&policy->pol_list); nrs_policy_unregister() 711 nrs_policy_put_locked(policy); nrs_policy_unregister() 715 nrs_policy_fini(policy); nrs_policy_unregister() 717 LASSERT(policy->pol_private == NULL); nrs_policy_unregister() 718 OBD_FREE_PTR(policy); nrs_policy_unregister() 724 * Register a policy from \policy descriptor \a desc with NRS head \a nrs. 726 * \param[in] nrs the NRS head on which the policy will be registered. 727 * \param[in] desc the policy descriptor from which the information will be 728 * obtained to register the policy. 736 struct ptlrpc_nrs_policy *policy; nrs_policy_register() local 749 OBD_CPT_ALLOC_GFP(policy, svcpt->scp_service->srv_cptable, nrs_policy_register() 750 svcpt->scp_cpt, sizeof(*policy), GFP_NOFS); nrs_policy_register() 751 if (policy == NULL) nrs_policy_register() 754 policy->pol_nrs = nrs; nrs_policy_register() 755 policy->pol_desc = desc; nrs_policy_register() 756 policy->pol_state = NRS_POL_STATE_STOPPED; nrs_policy_register() 757 policy->pol_flags = desc->pd_flags; nrs_policy_register() 759 INIT_LIST_HEAD(&policy->pol_list); nrs_policy_register() 760 INIT_LIST_HEAD(&policy->pol_list_queued); nrs_policy_register() 762 rc = nrs_policy_init(policy); nrs_policy_register() 764 OBD_FREE_PTR(policy); nrs_policy_register() 770 tmp = nrs_policy_find_locked(nrs, policy->pol_desc->pd_name); nrs_policy_register() 772 CERROR("NRS policy %s has been registered, can't register it for %s\n", nrs_policy_register() 773 policy->pol_desc->pd_name, nrs_policy_register() 778 nrs_policy_fini(policy); nrs_policy_register() 779 OBD_FREE_PTR(policy); nrs_policy_register() 784 list_add_tail(&policy->pol_list, &nrs->nrs_policy_list); nrs_policy_register() 787 if (policy->pol_flags & PTLRPC_NRS_FL_REG_START) nrs_policy_register() 788 rc = nrs_policy_start_locked(policy); nrs_policy_register() 793 (void) nrs_policy_unregister(nrs, policy->pol_desc->pd_name); nrs_policy_register() 806 struct ptlrpc_nrs_policy *policy; ptlrpc_nrs_req_add_nolock() local 814 policy = nrs_request_policy(&req->rq_nrq); ptlrpc_nrs_req_add_nolock() 816 * Add the policy to the NRS head's list of policies with enqueued ptlrpc_nrs_req_add_nolock() 819 if (unlikely(list_empty(&policy->pol_list_queued))) ptlrpc_nrs_req_add_nolock() 820 list_add_tail(&policy->pol_list_queued, ptlrpc_nrs_req_add_nolock() 821 &policy->pol_nrs->nrs_policy_queued); ptlrpc_nrs_req_add_nolock() 842 * Returns a boolean predicate indicating whether the policy described by 846 * \param[in] desc the policy descriptor 848 * \retval false the policy is not compatible with the service 849 * \retval true the policy is compatible with the service 884 CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n", nrs_register_policies_locked() 990 struct ptlrpc_nrs_policy *policy; nrs_svcpt_cleanup_locked() local 1001 list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, nrs_svcpt_cleanup_locked() 1003 rc = nrs_policy_unregister(nrs, policy->pol_desc->pd_name); nrs_svcpt_cleanup_locked() 1020 * Returns the descriptor for a policy as identified by by \a name. 1022 * \param[in] name the policy name 1024 * \retval the policy descriptor 1039 * Removes the policy from all supported NRS heads of all partitions of all 1042 * \param[in] desc the policy descriptor to unregister 1045 * \retval 0 successfully unregistered policy on all supported NRS heads 1074 * Ignore -ENOENT as the policy may not have registered ptlrpc_service_for_each_part() 1080 CERROR("Failed to unregister NRS policy %s for partition %d of service %s: %d\n", ptlrpc_service_for_each_part() 1100 * Registers a new policy with NRS core. 1102 * The function will only succeed if policy registration with all compatible 1106 * time when registering a policy that ships with NRS core, or in a 1109 * \param[in] conf configuration information for the new policy to register 1133 * registration might fail. In such a case, some policy instances may ptlrpc_nrs_policy_register() 1136 * from a policy unless the service is unregistering, we just disallow ptlrpc_nrs_policy_register() 1142 CERROR("NRS: failing to register policy %s. Please check policy flags; external policies cannot act as fallback policies, or be started immediately upon registration without interaction with lprocfs\n", ptlrpc_nrs_policy_register() 1150 CERROR("NRS: failing to register policy %s which has already been registered with NRS core!\n", ptlrpc_nrs_policy_register() 1173 * ptlrpc), do not register the policy with all compatible services, ptlrpc_nrs_policy_register() 1183 * Register the new policy on all compatible services ptlrpc_nrs_policy_register() 1203 CERROR("Failed to register NRS policy %s for partition %d of service %s: %d\n", ptlrpc_service_for_each_part() 1253 * Unregisters a previously registered policy with NRS core. All instances of 1254 * the policy on all NRS heads of all supported services are removed. 1261 * \param[in] conf configuration information for the policy to unregister 1274 CERROR("Unable to unregister a fallback policy, unless the PTLRPC service is stopping.\n"); ptlrpc_nrs_policy_unregister() 1284 CERROR("Failing to unregister NRS policy %s which has not been registered with NRS core!\n", ptlrpc_nrs_policy_unregister() 1295 CERROR("Please first stop policy %s on all service partitions and then retry to unregister the policy.\n", ptlrpc_nrs_policy_unregister() 1300 CDEBUG(D_INFO, "Unregistering policy %s from NRS core.\n", ptlrpc_nrs_policy_unregister() 1477 static void nrs_request_removed(struct ptlrpc_nrs_policy *policy) nrs_request_removed() argument 1479 LASSERT(policy->pol_nrs->nrs_req_queued > 0); nrs_request_removed() 1480 LASSERT(policy->pol_req_queued > 0); nrs_request_removed() 1482 policy->pol_nrs->nrs_req_queued--; nrs_request_removed() 1483 policy->pol_req_queued--; nrs_request_removed() 1486 * If the policy has no more requests queued, remove it from nrs_request_removed() 1489 if (unlikely(policy->pol_req_queued == 0)) { nrs_request_removed() 1490 list_del_init(&policy->pol_list_queued); nrs_request_removed() 1494 * current policy to the end so that we can round robin over nrs_request_removed() 1497 } else if (policy->pol_req_queued != policy->pol_nrs->nrs_req_queued) { nrs_request_removed() 1498 LASSERT(policy->pol_req_queued < nrs_request_removed() 1499 policy->pol_nrs->nrs_req_queued); nrs_request_removed() 1501 list_move_tail(&policy->pol_list_queued, nrs_request_removed() 1502 &policy->pol_nrs->nrs_policy_queued); nrs_request_removed() 1515 * from the policy. 1516 * \param[in] force when set, it will force a policy to return a request if it 1527 struct ptlrpc_nrs_policy *policy; ptlrpc_nrs_req_get_nolock0() local 1532 * inactive, because the user can change policy status at runtime. ptlrpc_nrs_req_get_nolock0() 1534 list_for_each_entry(policy, &nrs->nrs_policy_queued, ptlrpc_nrs_req_get_nolock0() 1536 nrq = nrs_request_get(policy, peek, force); ptlrpc_nrs_req_get_nolock0() 1541 policy->pol_req_started++; ptlrpc_nrs_req_get_nolock0() 1542 policy->pol_nrs->nrs_req_started++; ptlrpc_nrs_req_get_nolock0() 1544 nrs_request_removed(policy); ptlrpc_nrs_req_get_nolock0() 1555 * Dequeues request \a req from the policy it has been enqueued on. 1561 struct ptlrpc_nrs_policy *policy = nrs_request_policy(&req->rq_nrq); ptlrpc_nrs_req_del_nolock() local 1563 policy->pol_desc->pd_ops->op_req_dequeue(policy, &req->rq_nrq); ptlrpc_nrs_req_del_nolock() 1567 nrs_request_removed(policy); ptlrpc_nrs_req_del_nolock() 1633 * Carries out a control operation \a opc on the policy identified by the 1637 * \param[in] svc the service the policy belongs to. 1638 * \param[in] queue whether to carry out the command on the policy which 1641 * \param[in] name the policy to act upon, by human-readable name 1734 * Removes all policy descriptors from nrs_core::nrs_policies, and frees the 1735 * policy descriptors. 1738 * instances of any policies, because each service will have stopped its policy
|
H A D | nrs_fifo.c | 30 * Network Request Scheduler (NRS) FIFO policy 32 * Handles RPCs in a FIFO manner, as received from the network. This policy is 34 * default and fallback policy for all types of RPCs on all PTLRPC service 36 * the policy is the one enabled at PTLRPC service partition startup time, and 37 * fallback means the policy is used to handle RPCs that are not handled 38 * successfully or are not handled at all by any primary policy that may be 58 * The FIFO policy is a logical wrapper around previous, non-NRS functionality. 67 * Is called before the policy transitions into 69 * policy-specific private data structure. 71 * \param[in] policy The policy to start 79 static int nrs_fifo_start(struct ptlrpc_nrs_policy *policy) nrs_fifo_start() argument 83 OBD_CPT_ALLOC_PTR(head, nrs_pol2cptab(policy), nrs_pol2cptid(policy)); nrs_fifo_start() 88 policy->pol_private = head; nrs_fifo_start() 93 * Is called before the policy transitions into 94 * ptlrpc_nrs_pol_state::NRS_POL_STATE_STOPPED; deallocates the policy-specific 97 * \param[in] policy The policy to stop 101 static void nrs_fifo_stop(struct ptlrpc_nrs_policy *policy) nrs_fifo_stop() argument 103 struct nrs_fifo_head *head = policy->pol_private; nrs_fifo_stop() 112 * Is called for obtaining a FIFO policy resource. 114 * \param[in] policy The policy on which the request is being asked for 116 * \param[in] parent Parent resource, unused in this policy 119 * policy 121 * \retval 1 The FIFO policy only has a one-level resource hierarchy, as since 129 static int nrs_fifo_res_get(struct ptlrpc_nrs_policy *policy, nrs_fifo_res_get() argument 138 *resp = &((struct nrs_fifo_head *)policy->pol_private)->fh_res; nrs_fifo_res_get() 143 * Called when getting a request from the FIFO policy for handling, or just 144 * peeking; removes the request from the policy when it is to be handled. 146 * \param[in] policy The policy 149 * from the policy. 150 * \param[in] force Force the policy to return a request; unused in this 151 * policy 160 struct ptlrpc_nrs_request *nrs_fifo_req_get(struct ptlrpc_nrs_policy *policy, nrs_fifo_req_get() argument 163 struct nrs_fifo_head *head = policy->pol_private; nrs_fifo_req_get() 178 policy->pol_desc->pd_name, libcfs_id2str(req->rq_peer), nrs_fifo_req_get() 186 * Adds request \a nrq to \a policy's list of queued requests 188 * \param[in] policy The policy 194 static int nrs_fifo_req_add(struct ptlrpc_nrs_policy *policy, nrs_fifo_req_add() argument 211 * Removes request \a nrq from \a policy's list of queued requests. 213 * \param[in] policy The policy 216 static void nrs_fifo_req_del(struct ptlrpc_nrs_policy *policy, nrs_fifo_req_del() argument 227 * \param[in] policy The policy handling the request 233 static void nrs_fifo_req_stop(struct ptlrpc_nrs_policy *policy, nrs_fifo_req_stop() argument 240 policy->pol_desc->pd_name, libcfs_id2str(req->rq_peer), nrs_fifo_req_stop() 245 * FIFO policy operations 258 * FIFO policy configuration
|
H A D | sec.c | 58 * policy registers * 66 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy) sptlrpc_register_policy() argument 68 __u16 number = policy->sp_policy; sptlrpc_register_policy() 70 LASSERT(policy->sp_name); sptlrpc_register_policy() 71 LASSERT(policy->sp_cops); sptlrpc_register_policy() 72 LASSERT(policy->sp_sops); sptlrpc_register_policy() 82 policies[number] = policy; sptlrpc_register_policy() 85 CDEBUG(D_SEC, "%s: registered\n", policy->sp_name); sptlrpc_register_policy() 90 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy) sptlrpc_unregister_policy() argument 92 __u16 number = policy->sp_policy; sptlrpc_unregister_policy() 99 CERROR("%s: already unregistered\n", policy->sp_name); sptlrpc_unregister_policy() 103 LASSERT(policies[number] == policy); sptlrpc_unregister_policy() 107 CDEBUG(D_SEC, "%s: unregistered\n", policy->sp_name); sptlrpc_unregister_policy() 117 struct ptlrpc_sec_policy *policy; sptlrpc_wireflavor2policy() local 126 policy = policies[number]; sptlrpc_wireflavor2policy() 127 if (policy && !try_module_get(policy->sp_owner)) sptlrpc_wireflavor2policy() 128 policy = NULL; sptlrpc_wireflavor2policy() 129 if (policy == NULL) sptlrpc_wireflavor2policy() 133 if (policy != NULL || flag != 0 || sptlrpc_wireflavor2policy() 151 return policy; sptlrpc_wireflavor2policy() 1006 CERROR("reply policy %u doesn't match request policy %u\n", do_cli_unwrap_reply() 1210 struct ptlrpc_sec_policy *policy = sec->ps_policy; sec_cop_flush_ctx_cache() local 1212 LASSERT(policy->sp_cops); sec_cop_flush_ctx_cache() 1213 LASSERT(policy->sp_cops->flush_ctx_cache); sec_cop_flush_ctx_cache() 1215 return policy->sp_cops->flush_ctx_cache(sec, uid, grace, force); sec_cop_flush_ctx_cache() 1220 struct ptlrpc_sec_policy *policy = sec->ps_policy; sec_cop_destroy_sec() local 1224 LASSERT(policy->sp_cops->destroy_sec); sec_cop_destroy_sec() 1228 policy->sp_cops->destroy_sec(sec); sec_cop_destroy_sec() 1229 sptlrpc_policy_put(policy); sec_cop_destroy_sec() 1272 * policy module is responsible for taking reference of import 1280 struct ptlrpc_sec_policy *policy; sptlrpc_sec_create() local 1292 policy = sptlrpc_policy_get(svc_ctx->sc_policy); sptlrpc_sec_create() 1302 policy = sptlrpc_wireflavor2policy(sf->sf_rpc); sptlrpc_sec_create() 1303 if (!policy) { sptlrpc_sec_create() 1309 sec = policy->sp_cops->create_sec(imp, svc_ctx, sf); sptlrpc_sec_create() 1315 if (sec->ps_gc_interval && policy->sp_cops->gc_ctx) sptlrpc_sec_create() 1318 sptlrpc_policy_put(policy); sptlrpc_sec_create() 1537 struct ptlrpc_sec_policy *policy; sptlrpc_cli_alloc_reqbuf() local 1546 policy = ctx->cc_sec->ps_policy; sptlrpc_cli_alloc_reqbuf() 1547 rc = policy->sp_cops->alloc_reqbuf(ctx->cc_sec, req, msgsize); sptlrpc_cli_alloc_reqbuf() 1567 struct ptlrpc_sec_policy *policy; sptlrpc_cli_free_reqbuf() local 1577 policy = ctx->cc_sec->ps_policy; sptlrpc_cli_free_reqbuf() 1578 policy->sp_cops->free_reqbuf(ctx->cc_sec, req); sptlrpc_cli_free_reqbuf() 1664 struct ptlrpc_sec_policy *policy; sptlrpc_cli_alloc_repbuf() local 1673 policy = ctx->cc_sec->ps_policy; sptlrpc_cli_alloc_repbuf() 1674 return policy->sp_cops->alloc_repbuf(ctx->cc_sec, req, msgsize); sptlrpc_cli_alloc_repbuf() 1684 struct ptlrpc_sec_policy *policy; sptlrpc_cli_free_repbuf() local 1695 policy = ctx->cc_sec->ps_policy; sptlrpc_cli_free_repbuf() 1696 policy->sp_cops->free_repbuf(ctx->cc_sec, req); sptlrpc_cli_free_repbuf() 1703 struct ptlrpc_sec_policy *policy = ctx->cc_sec->ps_policy; sptlrpc_cli_install_rvs_ctx() local 1705 if (!policy->sp_cops->install_rctx) sptlrpc_cli_install_rvs_ctx() 1707 return policy->sp_cops->install_rctx(imp, ctx->cc_sec, ctx); sptlrpc_cli_install_rvs_ctx() 1713 struct ptlrpc_sec_policy *policy = ctx->sc_policy; sptlrpc_svc_install_rvs_ctx() local 1715 if (!policy->sp_sops->install_rctx) sptlrpc_svc_install_rvs_ctx() 1717 return policy->sp_sops->install_rctx(imp, ctx); sptlrpc_svc_install_rvs_ctx() 2021 struct ptlrpc_sec_policy *policy; sptlrpc_svc_unwrap_request() local 2049 policy = sptlrpc_wireflavor2policy(req->rq_flvr.sf_rpc); sptlrpc_svc_unwrap_request() 2050 if (!policy) { sptlrpc_svc_unwrap_request() 2055 LASSERT(policy->sp_sops->accept); sptlrpc_svc_unwrap_request() 2056 rc = policy->sp_sops->accept(req); sptlrpc_svc_unwrap_request() 2057 sptlrpc_policy_put(policy); sptlrpc_svc_unwrap_request() 2080 struct ptlrpc_sec_policy *policy; sptlrpc_svc_alloc_rs() local 2087 policy = req->rq_svc_ctx->sc_policy; sptlrpc_svc_alloc_rs() 2088 LASSERT(policy->sp_sops->alloc_rs); sptlrpc_svc_alloc_rs() 2090 rc = policy->sp_sops->alloc_rs(req, msglen); sptlrpc_svc_alloc_rs() 2108 rc = policy->sp_sops->alloc_rs(req, msglen); sptlrpc_svc_alloc_rs() 2129 struct ptlrpc_sec_policy *policy; sptlrpc_svc_wrap_reply() local 2135 policy = req->rq_svc_ctx->sc_policy; sptlrpc_svc_wrap_reply() 2136 LASSERT(policy->sp_sops->authorize); sptlrpc_svc_wrap_reply() 2138 rc = policy->sp_sops->authorize(req); sptlrpc_svc_wrap_reply() 2149 struct ptlrpc_sec_policy *policy; sptlrpc_svc_free_rs() local 2155 policy = rs->rs_svc_ctx->sc_policy; sptlrpc_svc_free_rs() 2156 LASSERT(policy->sp_sops->free_rs); sptlrpc_svc_free_rs() 2159 policy->sp_sops->free_rs(rs); sptlrpc_svc_free_rs()
|
H A D | ptlrpc_internal.h | 102 * Protects nrs_core::nrs_policies, serializes external policy 109 * List of all policy descriptors registered with NRS core; protected 174 static inline int nrs_pol2cptid(const struct ptlrpc_nrs_policy *policy) nrs_pol2cptid() argument 176 return policy->pol_nrs->nrs_svcpt->scp_cpt; nrs_pol2cptid() 180 struct ptlrpc_service *nrs_pol2svc(struct ptlrpc_nrs_policy *policy) nrs_pol2svc() argument 182 return policy->pol_nrs->nrs_svcpt->scp_service; nrs_pol2svc() 186 struct ptlrpc_service_part *nrs_pol2svcpt(struct ptlrpc_nrs_policy *policy) nrs_pol2svcpt() argument 188 return policy->pol_nrs->nrs_svcpt; nrs_pol2svcpt() 192 struct cfs_cpt_table *nrs_pol2cptab(struct ptlrpc_nrs_policy *policy) nrs_pol2cptab() argument 194 return nrs_pol2svc(policy)->srv_cptable; nrs_pol2cptab()
|
H A D | lproc_ptlrpc.c | 428 * \param[in] state The policy state 449 * Obtains status information for \a policy. 453 * \param[in] policy The policy 456 void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy, nrs_policy_get_info_locked() argument 459 LASSERT(policy != NULL); nrs_policy_get_info_locked() 461 assert_spin_locked(&policy->pol_nrs->nrs_lock); nrs_policy_get_info_locked() 463 memcpy(info->pi_name, policy->pol_desc->pd_name, NRS_POL_NAME_MAX); nrs_policy_get_info_locked() 465 info->pi_fallback = !!(policy->pol_flags & PTLRPC_NRS_FL_FALLBACK); nrs_policy_get_info_locked() 466 info->pi_state = policy->pol_state; nrs_policy_get_info_locked() 471 info->pi_req_queued = policy->pol_req_queued; nrs_policy_get_info_locked() 472 info->pi_req_started = policy->pol_req_started; nrs_policy_get_info_locked() 476 * Reads and prints policy status information for all policies of a PTLRPC 484 struct ptlrpc_nrs_policy *policy; ptlrpc_lprocfs_nrs_seq_show() local 494 * Serialize NRS core lprocfs operations with policy registration/ ptlrpc_lprocfs_nrs_seq_show() 523 list_for_each_entry(policy, &nrs->nrs_policy_list, ptlrpc_service_for_each_part() 527 nrs_policy_get_info_locked(policy, &tmp); ptlrpc_service_for_each_part() 549 * instances of the same policy in different ptlrpc_service_for_each_part() 630 * The longest valid command string is the maximum policy name size, plus the 636 * Starts and stops a given policy on a PTLRPC service. 638 * Commands consist of the policy name, followed by an optional [reg|hp] token; 709 * Serialize NRS core lprocfs operations with policy registration/ ptlrpc_lprocfs_nrs_seq_write()
|
/linux-4.1.27/drivers/md/ |
H A D | dm-cache-policy-cleaner.c | 4 * writeback cache policy supporting flushing out dirty cache blocks. 9 #include "dm-cache-policy.h" 38 struct policy { struct 39 struct dm_cache_policy policy; member in struct:policy 66 static struct policy *to_policy(struct dm_cache_policy *p) to_policy() 68 return container_of(p, struct policy, policy); to_policy() 97 static int alloc_cache_blocks_with_hash(struct policy *p, dm_cblock_t cache_size) alloc_cache_blocks_with_hash() 119 static void free_cache_blocks_and_hash(struct policy *p) free_cache_blocks_and_hash() 125 static struct wb_cache_entry *alloc_cache_entry(struct policy *p) alloc_cache_entry() 140 static struct wb_cache_entry *lookup_cache_entry(struct policy *p, dm_oblock_t oblock) lookup_cache_entry() 159 static void insert_cache_hash_entry(struct policy *p, struct wb_cache_entry *e) insert_cache_hash_entry() 171 /* Public interface (see dm-cache-policy.h */ wb_map() 177 struct policy *p = to_policy(pe); wb_map() 204 struct policy *p = to_policy(pe); wb_lookup() 226 struct policy *p = to_policy(pe); __set_clear_dirty() 249 struct policy *p = to_policy(pe); wb_set_dirty() 259 struct policy *p = to_policy(pe); wb_clear_dirty() 267 static void add_cache_entry(struct policy *p, struct wb_cache_entry *e) add_cache_entry() 281 struct policy *p = to_policy(pe); wb_load_mapping() 299 struct policy *p = to_policy(pe); wb_destroy() 305 static struct wb_cache_entry *__wb_force_remove_mapping(struct policy *p, dm_oblock_t oblock) __wb_force_remove_mapping() 319 struct policy *p = to_policy(pe); wb_remove_mapping() 334 struct policy *p = to_policy(pe); wb_force_mapping() 345 static struct wb_cache_entry *get_next_dirty_entry(struct policy *p) get_next_dirty_entry() 365 struct policy *p = to_policy(pe); wb_writeback_work() 388 /* Init the policy plugin interface function pointers. */ init_policy_functions() 389 static void init_policy_functions(struct policy *p) init_policy_functions() 391 p->policy.destroy = wb_destroy; init_policy_functions() 392 p->policy.map = wb_map; init_policy_functions() 393 p->policy.lookup = wb_lookup; init_policy_functions() 394 p->policy.set_dirty = wb_set_dirty; init_policy_functions() 395 p->policy.clear_dirty = wb_clear_dirty; init_policy_functions() 396 p->policy.load_mapping = wb_load_mapping; init_policy_functions() 397 p->policy.walk_mappings = NULL; init_policy_functions() 398 p->policy.remove_mapping = wb_remove_mapping; init_policy_functions() 399 p->policy.writeback_work = wb_writeback_work; init_policy_functions() 400 p->policy.force_mapping = wb_force_mapping; init_policy_functions() 401 p->policy.residency = wb_residency; init_policy_functions() 402 p->policy.tick = NULL; init_policy_functions() 410 struct policy *p = kzalloc(sizeof(*p), GFP_KERNEL); wb_create() 427 return &p->policy; wb_create() 468 MODULE_DESCRIPTION("cleaner cache policy");
|
H A D | dm-cache-policy-internal.h | 10 #include "dm-cache-policy.h" 15 * Little inline functions that simplify calling the policy methods. 109 * Creates a new cache policy given a policy name, a cache size, an origin size and the block size. 115 * Destroys the policy. This drops references to the policy module as well 117 * the policy->destroy method directly.
|
H A D | dm-cache-policy.h | 16 /* FIXME: make it clear which methods are optional. Get debug policy to 21 * The cache policy makes the important decisions about which blocks get to 25 * policy. This returns an instruction telling the core target what to do. 34 * This block is currently on the origin device, but the policy wants to 43 * This block is currently on the origin device. The policy wants to 57 * approach avoids having transactional semantics in the policy (ie, the 58 * core informing the policy when a migration is complete), and hence makes 61 * In general policy methods should never block, except in the case of the 73 * When issuing a POLICY_REPLACE the policy needs to make a callback to 97 * The cache policy object. Just a bunch of methods. It is envisaged that 98 * this structure will be embedded in a bigger, policy specific structure 122 * instructions. If denied and the policy would have 128 * policy about this sooner, so it can recycle that 156 * mapping from the metadata device into the policy. 199 * queue merging has occurred). To stop the policy being fooled by 200 * these the core target sends regular tick() calls to the policy. 201 * The policy should only count an entry as hit once per tick. 214 * Book keeping ptr for the policy register, not for general use. 222 * We maintain a little register of the different policy types. 233 * what gets passed on the target line to select your policy.
|
H A D | dm-cache-policy.c | 7 #include "dm-cache-policy-internal.h" 15 #define DM_MSG_PREFIX "cache-policy" 91 DMWARN("attempt to register policy under duplicate name %s", type->name); dm_cache_policy_register() 121 DMWARN("unknown policy type"); dm_cache_policy_create()
|
H A D | dm-cache-metadata.h | 11 #include "dm-cache-policy-internal.h" 90 struct dm_cache_policy *policy, 119 * The policy is invited to save a 32bit hint value for every cblock (eg, 120 * for a hit count). These are stored against the policy name. If 126 * rather than querying the policy for each cblock, we let it walk its data
|
H A D | dm-cache-policy-mq.c | 7 #include "dm-cache-policy.h" 17 #define DM_MSG_PREFIX "cache-policy-mq" 418 struct dm_cache_policy policy; member in struct:mq_policy 538 * Now we get to the meat of the policy. This section deals with deciding 733 * value for the promotion_threshold is crucial to this policy. 974 * Public interface, via the policy struct. See dm-cache-policy.h for a 980 return container_of(p, struct mq_policy, policy); to_mq_policy() 1345 /* Init the policy plugin interface function pointers. */ init_policy_functions() 1348 mq->policy.destroy = mq_destroy; init_policy_functions() 1349 mq->policy.map = mq_map; init_policy_functions() 1350 mq->policy.lookup = mq_lookup; init_policy_functions() 1351 mq->policy.set_dirty = mq_set_dirty; init_policy_functions() 1352 mq->policy.clear_dirty = mq_clear_dirty; init_policy_functions() 1353 mq->policy.load_mapping = mq_load_mapping; init_policy_functions() 1354 mq->policy.walk_mappings = mq_walk_mappings; init_policy_functions() 1355 mq->policy.remove_mapping = mq_remove_mapping; init_policy_functions() 1356 mq->policy.remove_cblock = mq_remove_cblock; init_policy_functions() 1357 mq->policy.writeback_work = mq_writeback_work; init_policy_functions() 1358 mq->policy.force_mapping = mq_force_mapping; init_policy_functions() 1359 mq->policy.residency = mq_residency; init_policy_functions() 1360 mq->policy.tick = mq_tick; init_policy_functions() 1361 mq->policy.emit_config_values = mq_emit_config_values; init_policy_functions() 1362 mq->policy.set_config_value = mq_set_config_value; init_policy_functions() 1410 return &mq->policy; mq_create() 1489 MODULE_DESCRIPTION("mq cache policy");
|
H A D | dm-cache-metadata.c | 1068 struct dm_cache_policy *policy) policy_unchanged() 1070 const char *policy_name = dm_cache_policy_get_name(policy); policy_unchanged() 1071 const unsigned *policy_version = dm_cache_policy_get_version(policy); policy_unchanged() 1072 size_t policy_hint_size = dm_cache_policy_get_hint_size(policy); policy_unchanged() 1075 * Ensure policy names match. policy_unchanged() 1081 * Ensure policy major versions match. policy_unchanged() 1087 * Ensure policy hint sizes match. policy_unchanged() 1101 struct dm_cache_policy *policy) hints_array_available() 1103 return cmd->clean_when_opened && policy_unchanged(cmd, policy) && hints_array_available() 1138 struct dm_cache_policy *policy, __load_mappings() 1148 thunk.hints_valid = hints_array_available(cmd, policy); __load_mappings() 1154 struct dm_cache_policy *policy, dm_cache_load_mappings() 1160 r = __load_mappings(cmd, policy, fn, context); dm_cache_load_mappings() 1304 static int begin_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) begin_hints() argument 1309 const char *policy_name = dm_cache_policy_get_name(policy); begin_hints() 1310 const unsigned *policy_version = dm_cache_policy_get_version(policy); begin_hints() 1316 if (!policy_unchanged(cmd, policy)) { begin_hints() 1320 hint_size = dm_cache_policy_get_hint_size(policy); begin_hints() 1362 static int write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) write_hints() argument 1366 r = begin_hints(cmd, policy); write_hints() 1372 return policy_walk_mappings(policy, save_hint, cmd); write_hints() 1375 int dm_cache_write_hints(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) dm_cache_write_hints() argument 1380 r = write_hints(cmd, policy); dm_cache_write_hints() 1067 policy_unchanged(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) policy_unchanged() argument 1100 hints_array_available(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy) hints_array_available() argument 1137 __load_mappings(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy, load_mapping_fn fn, void *context) __load_mappings() argument 1153 dm_cache_load_mappings(struct dm_cache_metadata *cmd, struct dm_cache_policy *policy, load_mapping_fn fn, void *context) dm_cache_load_mappings() argument
|
H A D | dm-cache-target.c | 269 struct dm_cache_policy *policy; member in struct:cache 535 policy_set_dirty(cache->policy, oblock); set_dirty() 542 policy_clear_dirty(cache->policy, oblock); clear_dirty() 931 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock); migration_failure() 938 policy_remove_mapping(cache->policy, mg->new_oblock); migration_failure() 959 policy_force_mapping(cache->policy, mg->new_oblock, migration_success_pre_commit() 969 policy_remove_mapping(cache->policy, mg->new_oblock); migration_success_pre_commit() 1002 policy_remove_mapping(cache->policy, mg->old_oblock); migration_success_post_commit() 1503 r = policy_map(cache->policy, block, true, can_migrate, discarded_block, process_bio() 1570 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__, process_bio() 1695 r = policy_writeback_work(cache->policy, &oblock, &cblock); writeback_some_dirty_blocks() 1701 policy_set_dirty(cache->policy, oblock); writeback_some_dirty_blocks() 1723 r = policy_remove_cblock(cache->policy, to_cblock(begin)); process_invalidation_request() 1877 policy_tick(cache->policy); do_waker() 1943 if (cache->policy) destroy() 1944 dm_cache_policy_destroy(cache->policy); destroy() 1972 * <policy> <#policy args> [<policy arg>]* 1982 * policy : the replacement policy to use 1983 * #policy args : an even number of policy arguments corresponding 1984 * to key/value pairs passed to the policy 1985 * policy args : key/value pairs passed to the policy 2183 {0, 1024, "Invalid number of policy arguments"}, parse_policy() 2265 r = policy_set_config_value(cache->policy, key, value); set_config_value() 2278 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs."); set_config_values() 2302 *error = "Error creating cache's policy"; create_cache_policy() 2305 cache->policy = p; create_cache_policy() 2418 *error = "Error setting cache policy's config values"; cache_create() 2424 dm_cache_policy_get_hint_size(cache->policy)); cache_create() 2653 r = policy_map(cache->policy, block, false, can_migrate, discarded_block, __cache_map() 2660 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r); __cache_map() 2710 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__, __cache_map() 2743 policy_tick(cache->policy); cache_end_io() 2807 r3 = dm_cache_write_hints(cache->cmd, cache->policy); sync_metadata() 2842 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid); load_mapping() 3006 r = dm_cache_load_mappings(cache->cmd, cache->policy, cache_preresume() 3057 * <policy name> <#policy args> <policy args>* 3093 residency = policy_residency(cache->policy); cache_status() 3126 DMEMIT("%s ", dm_cache_policy_get_name(cache->policy)); cache_status() 3128 r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz); cache_status()
|
/linux-4.1.27/fs/ext4/ |
H A D | crypto_policy.c | 6 * This contains encryption policy functions for ext4 26 * check whether the policy is consistent with the encryption context 30 struct inode *inode, const struct ext4_encryption_policy *policy) ext4_is_encryption_context_consistent_with_policy() 38 return (memcmp(ctx.master_key_descriptor, policy->master_key_descriptor, ext4_is_encryption_context_consistent_with_policy() 41 policy->flags) && ext4_is_encryption_context_consistent_with_policy() 43 policy->contents_encryption_mode) && ext4_is_encryption_context_consistent_with_policy() 45 policy->filenames_encryption_mode)); ext4_is_encryption_context_consistent_with_policy() 49 struct inode *inode, const struct ext4_encryption_policy *policy) ext4_create_encryption_context_from_policy() 55 memcpy(ctx.master_key_descriptor, policy->master_key_descriptor, ext4_create_encryption_context_from_policy() 57 if (!ext4_valid_contents_enc_mode(policy->contents_encryption_mode)) { ext4_create_encryption_context_from_policy() 60 policy->contents_encryption_mode); ext4_create_encryption_context_from_policy() 63 if (!ext4_valid_filenames_enc_mode(policy->filenames_encryption_mode)) { ext4_create_encryption_context_from_policy() 66 policy->filenames_encryption_mode); ext4_create_encryption_context_from_policy() 69 if (policy->flags & ~EXT4_POLICY_FLAGS_VALID) ext4_create_encryption_context_from_policy() 71 ctx.contents_encryption_mode = policy->contents_encryption_mode; ext4_create_encryption_context_from_policy() 72 ctx.filenames_encryption_mode = policy->filenames_encryption_mode; ext4_create_encryption_context_from_policy() 73 ctx.flags = policy->flags; ext4_create_encryption_context_from_policy() 85 int ext4_process_policy(const struct ext4_encryption_policy *policy, ext4_process_policy() argument 88 if (policy->version != 0) ext4_process_policy() 95 policy); ext4_process_policy() 98 if (ext4_is_encryption_context_consistent_with_policy(inode, policy)) ext4_process_policy() 106 int ext4_get_policy(struct inode *inode, struct ext4_encryption_policy *policy) ext4_get_policy() argument 117 policy->version = 0; ext4_get_policy() 118 policy->contents_encryption_mode = ctx.contents_encryption_mode; ext4_get_policy() 119 policy->filenames_encryption_mode = ctx.filenames_encryption_mode; ext4_get_policy() 120 policy->flags = ctx.flags; ext4_get_policy() 121 memcpy(&policy->master_key_descriptor, ctx.master_key_descriptor, ext4_get_policy() 29 ext4_is_encryption_context_consistent_with_policy( struct inode *inode, const struct ext4_encryption_policy *policy) ext4_is_encryption_context_consistent_with_policy() argument 48 ext4_create_encryption_context_from_policy( struct inode *inode, const struct ext4_encryption_policy *policy) ext4_create_encryption_context_from_policy() argument
|
H A D | ioctl.c | 630 struct ext4_encryption_policy policy; ext4_ioctl() local 633 if (copy_from_user(&policy, ext4_ioctl() 635 sizeof(policy))) { ext4_ioctl() 640 err = ext4_process_policy(&policy, inode); ext4_ioctl() 685 struct ext4_encryption_policy policy; ext4_ioctl() local 690 err = ext4_get_policy(inode, &policy); ext4_ioctl() 693 if (copy_to_user((void *)arg, &policy, sizeof(policy))) ext4_ioctl()
|
/linux-4.1.27/arch/powerpc/platforms/cell/ |
H A D | cpufreq_spudemand.c | 37 struct cpufreq_policy *policy; member in struct:spu_gov_info_struct 48 cpu = info->policy->cpu; calc_freq() 55 return info->policy->max * info->busy_spus / FIXED_1; calc_freq() 66 /* after cancel_delayed_work_sync we unset info->policy */ spu_gov_work() 67 BUG_ON(info->policy == NULL); spu_gov_work() 70 __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); spu_gov_work() 73 schedule_delayed_work_on(info->policy->cpu, &info->work, delay); spu_gov_work() 80 schedule_delayed_work_on(info->policy->cpu, &info->work, delay); spu_gov_init_work() 88 static int spu_gov_govern(struct cpufreq_policy *policy, unsigned int event) spu_gov_govern() argument 90 unsigned int cpu = policy->cpu; spu_gov_govern() 105 if (!policy->cur) { spu_gov_govern() 106 printk(KERN_ERR "no cpu specified in policy\n"); spu_gov_govern() 112 for_each_cpu(i, policy->cpus) { spu_gov_govern() 114 affected_info->policy = policy; spu_gov_govern() 129 for_each_cpu (i, policy->cpus) { spu_gov_govern() 131 info->policy = NULL; spu_gov_govern()
|
/linux-4.1.27/drivers/block/drbd/ |
H A D | drbd_nla.h | 5 const struct nla_policy *policy);
|
H A D | drbd_nla.c | 31 const struct nla_policy *policy) drbd_nla_parse_nested() 37 err = nla_parse_nested(tb, maxtype, nla, policy); drbd_nla_parse_nested() 30 drbd_nla_parse_nested(struct nlattr *tb[], int maxtype, struct nlattr *nla, const struct nla_policy *policy) drbd_nla_parse_nested() argument
|
/linux-4.1.27/security/selinux/include/ |
H A D | initial_sid_to_string.h | 29 "policy",
|
H A D | netnode.h | 5 * mapping is maintained as part of the normal policy but a fast cache is
|
H A D | netport.h | 5 * mapping is maintained as part of the normal policy but a fast cache is
|
H A D | security.h | 21 /* Identify specific policy version changes */ 39 /* Range of policy versions we understand*/ 86 * available at the kernel policy version >= POLICYDB_VERSION_BOUNDARY 219 u32 policyload; /* times of policy reloaded */
|
H A D | avc.h | 81 * the policy contains an explicit dontaudit rule for that avc_audit_required() 121 * with the policy. This function is typically called by
|
/linux-4.1.27/net/tipc/ |
H A D | netlink.c | 74 .policy = tipc_nl_policy, 79 .policy = tipc_nl_policy, 85 .policy = tipc_nl_policy, 90 .policy = tipc_nl_policy, 95 .policy = tipc_nl_policy, 100 .policy = tipc_nl_policy, 106 .policy = tipc_nl_policy, 111 .policy = tipc_nl_policy, 116 .policy = tipc_nl_policy, 122 .policy = tipc_nl_policy, 127 .policy = tipc_nl_policy, 132 .policy = tipc_nl_policy, 137 .policy = tipc_nl_policy, 142 .policy = tipc_nl_policy, 147 .policy = tipc_nl_policy,
|
/linux-4.1.27/include/linux/ |
H A D | cpufreq.h | 57 unsigned int policy; /* see above */ member in struct:cpufreq_real_policy 68 unsigned int cpu; /* cpu nr of CPU managing this policy */ 76 unsigned int restore_freq; /* = policy->cur before transition */ 79 unsigned int policy; /* see above */ member in struct:cpufreq_policy 96 * - Any routine that wants to read from the policy structure will 98 * - Any routine that will write to the policy structure and/or may take away 99 * the policy altogether (eg. CPU hotplug), will hold this lock in write 129 void cpufreq_cpu_put(struct cpufreq_policy *policy); 135 static inline void cpufreq_cpu_put(struct cpufreq_policy *policy) { } cpufreq_cpu_put() argument 138 static inline bool policy_is_shared(struct cpufreq_policy *policy) policy_is_shared() argument 140 return cpumask_weight(policy->cpus) > 1; policy_is_shared() 157 int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu); 160 struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy); 226 int (*init)(struct cpufreq_policy *policy); 227 int (*verify)(struct cpufreq_policy *policy); 230 int (*setpolicy)(struct cpufreq_policy *policy); 233 * On failure, should always restore frequency to policy->restore_freq 236 int (*target)(struct cpufreq_policy *policy, 239 int (*target_index)(struct cpufreq_policy *policy, 256 unsigned int (*get_intermediate)(struct cpufreq_policy *policy, 258 int (*target_intermediate)(struct cpufreq_policy *policy, 267 int (*exit)(struct cpufreq_policy *policy); 268 void (*stop_cpu)(struct cpufreq_policy *policy); 269 int (*suspend)(struct cpufreq_policy *policy); 270 int (*resume)(struct cpufreq_policy *policy); 273 void (*ready)(struct cpufreq_policy *policy); 323 static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, cpufreq_verify_within_limits() argument 326 if (policy->min < min) cpufreq_verify_within_limits() 327 policy->min = min; cpufreq_verify_within_limits() 328 if (policy->max < min) cpufreq_verify_within_limits() 329 policy->max = min; cpufreq_verify_within_limits() 330 if (policy->min > max) cpufreq_verify_within_limits() 331 policy->min = max; cpufreq_verify_within_limits() 332 if (policy->max > max) cpufreq_verify_within_limits() 333 policy->max = max; cpufreq_verify_within_limits() 334 if (policy->min > policy->max) cpufreq_verify_within_limits() 335 policy->min = policy->max; cpufreq_verify_within_limits() 340 cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy) cpufreq_verify_within_cpu_limits() argument 342 cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, cpufreq_verify_within_cpu_limits() 343 policy->cpuinfo.max_freq); cpufreq_verify_within_cpu_limits() 349 int cpufreq_generic_suspend(struct cpufreq_policy *policy); 378 void cpufreq_freq_transition_begin(struct cpufreq_policy *policy, cpufreq_resume() 380 void cpufreq_freq_transition_end(struct cpufreq_policy *policy, cpufreq_resume() 443 int (*governor) (struct cpufreq_policy *policy, 445 ssize_t (*show_setspeed) (struct cpufreq_policy *policy, 447 int (*store_setspeed) (struct cpufreq_policy *policy, 457 int cpufreq_driver_target(struct cpufreq_policy *policy, 460 int __cpufreq_driver_target(struct cpufreq_policy *policy, 556 int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy, 559 int cpufreq_frequency_table_verify(struct cpufreq_policy *policy, 561 int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy); 563 int cpufreq_frequency_table_target(struct cpufreq_policy *policy, 568 int cpufreq_frequency_table_get_index(struct cpufreq_policy *policy, 597 int cpufreq_table_validate_and_show(struct cpufreq_policy *policy, 601 int cpufreq_generic_init(struct cpufreq_policy *policy,
|
H A D | ioprio.h | 64 if (task->policy == SCHED_IDLE) task_nice_ioclass() 66 else if (task->policy == SCHED_FIFO || task->policy == SCHED_RR) task_nice_ioclass()
|
H A D | mempolicy.h | 22 * Describe a memory policy. 25 * For VMA related allocations the VMA policy is preferred, otherwise 26 * the process policy is used. Interrupts ignore the memory policy 29 * Locking policy for interlave: 34 * Freeing policy: 38 * Duplicating policy objects: 60 * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. 112 * carry the policy. As a special twist the pseudo mm is indexed in pages, not 120 struct mempolicy *policy; member in struct:sp_node
|
H A D | shmem_fs.h | 22 struct shared_policy policy; /* NUMA memory alloc policy */ member in struct:shmem_inode_info 37 struct mempolicy *mpol; /* default memory policy for mappings */
|
/linux-4.1.27/tools/power/cpupower/lib/ |
H A D | cpufreq.c | 62 void cpufreq_put_policy(struct cpufreq_policy *policy) cpufreq_put_policy() argument 64 if ((!policy) || (!policy->governor)) cpufreq_put_policy() 67 free(policy->governor); cpufreq_put_policy() 68 policy->governor = NULL; cpufreq_put_policy() 69 free(policy); cpufreq_put_policy() 150 int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy) cpufreq_set_policy() argument 152 if (!policy || !(policy->governor)) cpufreq_set_policy() 155 return sysfs_set_freq_policy(cpu, policy); cpufreq_set_policy()
|
H A D | sysfs.c | 249 struct cpufreq_policy *policy; sysfs_get_freq_policy() local 251 policy = malloc(sizeof(struct cpufreq_policy)); sysfs_get_freq_policy() 252 if (!policy) sysfs_get_freq_policy() 255 policy->governor = sysfs_cpufreq_get_one_string(cpu, SCALING_GOVERNOR); sysfs_get_freq_policy() 256 if (!policy->governor) { sysfs_get_freq_policy() 257 free(policy); sysfs_get_freq_policy() 260 policy->min = sysfs_cpufreq_get_one_value(cpu, SCALING_MIN_FREQ); sysfs_get_freq_policy() 261 policy->max = sysfs_cpufreq_get_one_value(cpu, SCALING_MAX_FREQ); sysfs_get_freq_policy() 262 if ((!policy->min) || (!policy->max)) { sysfs_get_freq_policy() 263 free(policy->governor); sysfs_get_freq_policy() 264 free(policy); sysfs_get_freq_policy() 268 return policy; sysfs_get_freq_policy() 583 int sysfs_set_freq_policy(unsigned int cpu, struct cpufreq_policy *policy) sysfs_set_freq_policy() argument 592 if (!policy || !(policy->governor)) sysfs_set_freq_policy() 595 if (policy->max < policy->min) sysfs_set_freq_policy() 598 if (verify_gov(gov, policy->governor)) sysfs_set_freq_policy() 601 snprintf(min, SYSFS_PATH_MAX, "%lu", policy->min); sysfs_set_freq_policy() 602 snprintf(max, SYSFS_PATH_MAX, "%lu", policy->max); sysfs_set_freq_policy() 605 write_max_first = (old_min && (policy->max < old_min) ? 0 : 1); sysfs_set_freq_policy()
|
H A D | cpufreq.h | 93 * considerations by cpufreq policy notifiers in the kernel. 112 /* determine CPUfreq policy currently used 121 extern void cpufreq_put_policy(struct cpufreq_policy *policy); 190 /* set new cpufreq policy 192 * Tries to set the passed policy as new policy as close as possible, 196 extern int cpufreq_set_policy(unsigned int cpu, struct cpufreq_policy *policy); 199 /* modify a policy by only changing min/max freq or governor
|
/linux-4.1.27/security/apparmor/ |
H A D | crypto.c | 4 * This file contains AppArmor policy loading interface function definitions. 13 * Fns to provide a checksum of policy that has been loaded this can be 14 * compared to userspace policy compiles to check loaded policy is what 90 aa_info_message("AppArmor sha1 policy hashing enabled"); init_profile_hash()
|
H A D | policy.c | 4 * This file contains AppArmor policy manipulation functions 15 * AppArmor policy is based around profiles, which contain the rules a 27 * namespace://profile - used by policy 52 * eg. if a mail program starts an editor, the policy might make the 56 * is preserved. This feature isn't exploited by AppArmor reference policy 87 #include "include/policy.h" 119 * policy_init - initialize a policy structure 120 * @policy: policy to initialize (NOT NULL) 122 * @name: name of the policy, init will make a copy of it (NOT NULL) 126 * Returns: true if policy init successful 128 static bool policy_init(struct aa_policy *policy, const char *prefix, policy_init() argument 133 policy->hname = kmalloc(strlen(prefix) + strlen(name) + 3, policy_init() 135 if (policy->hname) policy_init() 136 sprintf(policy->hname, "%s//%s", prefix, name); policy_init() 138 policy->hname = kstrdup(name, GFP_KERNEL); policy_init() 139 if (!policy->hname) policy_init() 142 policy->name = (char *)hname_tail(policy->hname); policy_init() 143 INIT_LIST_HEAD(&policy->list); policy_init() 144 INIT_LIST_HEAD(&policy->profiles); policy_init() 150 * policy_destroy - free the elements referenced by @policy 151 * @policy: policy that is to have its elements freed (NOT NULL) 153 static void policy_destroy(struct aa_policy *policy) policy_destroy() argument 156 if (on_list_rcu(&policy->profiles)) { policy_destroy() 158 "policy '%s' still contains profiles\n", policy_destroy() 159 __func__, policy->name); policy_destroy() 162 if (on_list_rcu(&policy->list)) { policy_destroy() 163 AA_ERROR("%s: internal error, policy '%s' still on list\n", policy_destroy() 164 __func__, policy->name); policy_destroy() 169 kzfree(policy->hname); policy_destroy() 173 * __policy_find - find a policy by @name on a policy list 179 * Returns: unrefcounted policy that match @name or NULL if not found 183 struct aa_policy *policy; __policy_find() local 185 list_for_each_entry_rcu(policy, head, list) { list_for_each_entry_rcu() 186 if (!strcmp(policy->name, name)) list_for_each_entry_rcu() 187 return policy; list_for_each_entry_rcu() 193 * __policy_strn_find - find a policy that's name matches @len chars of @str 200 * Returns: unrefcounted policy that match @str or NULL if not found 203 * other wise it allows searching for policy by a partial match of name 208 struct aa_policy *policy; __policy_strn_find() local 210 list_for_each_entry_rcu(policy, head, list) { list_for_each_entry_rcu() 211 if (aa_strneq(policy->name, str, len)) list_for_each_entry_rcu() 212 return policy; list_for_each_entry_rcu() 610 aa_put_dfa(profile->policy.dfa); aa_free_profile() 787 * Returns: unrefcounted policy or NULL if not found 792 struct aa_policy *policy; __lookup_parent() local 796 policy = &ns->base; __lookup_parent() 799 profile = __strn_find_child(&policy->profiles, hname, __lookup_parent() 803 policy = &profile->base; __lookup_parent() 894 * aa_audit_policy - Do auditing of policy changes 895 * @op: policy operation being performed 920 * aa_may_manage_policy - can the current task manage policy 921 * @op: the policy manipulation operation being done 923 * Returns: true if the task is allowed to manipulate policy 927 /* check if loading policy is locked out */ aa_may_manage_policy() 934 audit_policy(op, GFP_KERNEL, NULL, "not policy admin", -EACCES); aa_may_manage_policy() 1094 struct aa_policy *policy; aa_replace_profiles() local 1116 /* no ref on policy only use inside lock */ aa_replace_profiles() 1117 policy = __lookup_parent(ns, ent->new->base.hname); aa_replace_profiles() 1118 if (!policy) { aa_replace_profiles() 1128 } else if (policy != &ns->base) { aa_replace_profiles() 1130 struct aa_profile *p = (struct aa_profile *) policy; aa_replace_profiles()
|
H A D | Makefile | 6 path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ 37 # required by policy load to map policy ordering of RLIMITs to internal
|
H A D | policy_unpack.c | 4 * This file contains AppArmor functions for unpacking policy loaded from 15 * AppArmor uses a serialized binary format for loading policy. To find 16 * policy format documentation look in Documentation/security/apparmor.txt 17 * All policy is validated before it is used. 29 #include "include/policy.h" 83 * audit_iface - do audit message for policy unpacking/load/replace/remove 580 /* generic policy dfa - optional and may be NULL */ unpack_profile() 581 profile->policy.dfa = unpack_dfa(e); unpack_profile() 582 if (IS_ERR(profile->policy.dfa)) { unpack_profile() 583 error = PTR_ERR(profile->policy.dfa); unpack_profile() 584 profile->policy.dfa = NULL; unpack_profile() 587 if (!unpack_u32(e, &profile->policy.start[0], "start")) unpack_profile() 589 profile->policy.start[0] = DFA_START; unpack_profile() 592 profile->policy.start[i] = unpack_profile() 593 aa_dfa_next(profile->policy.dfa, unpack_profile() 594 profile->policy.start[0], unpack_profile()
|
H A D | resource.c | 20 #include "include/policy.h" 67 * aa_map_resouce - map compiled policy resource to internal # 68 * @resource: flattened policy resource number 72 * rlimit resource can vary based on architecture, map the compiled policy
|
H A D | lib.c | 31 * Split a namespace name from a profile name (see policy.c for naming 86 * It is possible that policy being loaded from the user is larger than
|
/linux-4.1.27/drivers/net/wireless/cw1200/ |
H A D | txrx.h | 36 struct tx_policy policy; member in struct:tx_policy_cache_entry 45 spinlock_t lock; /* Protect policy cache */ 49 /* TX policy cache */ 50 /* Intention of TX policy cache is an overcomplicated WSM API. 52 * It uses "tx retry policy id" instead, so driver code has to sync 53 * linux tx retry sequences with a retry policy table in the device.
|
H A D | txrx.c | 48 /* TX policy cache implementation */ 50 static void tx_policy_dump(struct tx_policy *policy) tx_policy_dump() argument 52 pr_debug("[TX policy] %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n", tx_policy_dump() 53 policy->raw[0] & 0x0F, policy->raw[0] >> 4, tx_policy_dump() 54 policy->raw[1] & 0x0F, policy->raw[1] >> 4, tx_policy_dump() 55 policy->raw[2] & 0x0F, policy->raw[2] >> 4, tx_policy_dump() 56 policy->raw[3] & 0x0F, policy->raw[3] >> 4, tx_policy_dump() 57 policy->raw[4] & 0x0F, policy->raw[4] >> 4, tx_policy_dump() 58 policy->raw[5] & 0x0F, policy->raw[5] >> 4, tx_policy_dump() 59 policy->raw[6] & 0x0F, policy->raw[6] >> 4, tx_policy_dump() 60 policy->raw[7] & 0x0F, policy->raw[7] >> 4, tx_policy_dump() 61 policy->raw[8] & 0x0F, policy->raw[8] >> 4, tx_policy_dump() 62 policy->raw[9] & 0x0F, policy->raw[9] >> 4, tx_policy_dump() 63 policy->raw[10] & 0x0F, policy->raw[10] >> 4, tx_policy_dump() 64 policy->raw[11] & 0x0F, policy->raw[11] >> 4, tx_policy_dump() 65 policy->defined); tx_policy_dump() 69 /* [out] */ struct tx_policy *policy, tx_policy_build() 76 memset(policy, 0, sizeof(*policy)); tx_policy_build() 107 /* Re-fill policy trying to keep every requested rate and with tx_policy_build() 125 * policy. tx_policy_build() 173 policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1; tx_policy_build() 187 policy->tbl[off] |= __cpu_to_le32(retries << shift); tx_policy_build() 188 policy->retry_count += retries; tx_policy_build() 191 pr_debug("[TX policy] Policy (%zu): %d:%d, %d:%d, %d:%d, %d:%d\n", tx_policy_build() 224 /* First search for policy in "used" list */ tx_policy_find() 226 if (tx_policy_is_equal(wanted, &it->policy)) tx_policy_find() 231 if (tx_policy_is_equal(wanted, &it->policy)) tx_policy_find() 240 ++entry->policy.usage_count; tx_policy_use() 247 int ret = --entry->policy.usage_count; tx_policy_release() 268 if (WARN_ON(entry->policy.usage_count)) { tx_policy_clean() 269 entry->policy.usage_count = 0; tx_policy_clean() 272 memset(&entry->policy, 0, sizeof(entry->policy)); tx_policy_clean() 282 /* External TX policy cache API */ 316 pr_debug("[TX policy] Used TX policy: %d\n", idx); tx_policy_get() 321 /* If policy is not found create a new one tx_policy_get() 326 entry->policy = wanted; tx_policy_get() 328 pr_debug("[TX policy] New TX policy: %d\n", idx); tx_policy_get() 329 tx_policy_dump(&entry->policy); tx_policy_get() 366 struct tx_policy *src = &cache->cache[i].policy; tx_policy_upload() 384 pr_debug("[TX policy] Upload %d policies\n", arg.num); tx_policy_upload() 393 pr_debug("[TX] TX policy upload.\n"); tx_policy_upload_work() 668 pr_debug("[TX] TX policy renew.\n"); cw1200_tx_h_rate_policy() 68 tx_policy_build(const struct cw1200_common *priv, struct tx_policy *policy, struct ieee80211_tx_rate *rates, size_t count) tx_policy_build() argument
|
/linux-4.1.27/security/apparmor/include/ |
H A D | crypto.h | 4 * This file contains AppArmor policy loading interface function definitions. 17 #include "policy.h"
|
H A D | policy_unpack.h | 4 * This file contains AppArmor policy loading interface function definitions.
|
H A D | policy.h | 4 * This file contains AppArmor policy definitions. 82 * @list: list policy object is on 106 * @base: common policy 143 /* struct aa_policydb - match engine for a policy 148 /* Generic policy DFA specific rule types will be subsections of it */ 176 * @policy: general match rules governing policy 217 struct aa_policydb policy; member in struct:aa_profile
|
/linux-4.1.27/drivers/power/ |
H A D | wm8350_power.c | 90 struct wm8350_charger_policy *policy) wm8350_charger_config() 94 if (!policy) { wm8350_charger_config() 96 "No charger policy, charger not configured.\n"); wm8350_charger_config() 101 if (policy->fast_limit_USB_mA > 500) { wm8350_charger_config() 106 eoc_mA = WM8350_CHG_EOC_mA(policy->eoc_mA); wm8350_charger_config() 113 reg | eoc_mA | policy->trickle_start_mV | wm8350_charger_config() 120 WM8350_CHG_FAST_LIMIT_mA(policy->fast_limit_USB_mA); wm8350_charger_config() 122 policy->charge_mV | policy->trickle_charge_USB_mA | wm8350_charger_config() 124 policy->charge_timeout)); wm8350_charger_config() 128 WM8350_CHG_FAST_LIMIT_mA(policy->fast_limit_mA); wm8350_charger_config() 130 policy->charge_mV | policy->trickle_charge_mA | wm8350_charger_config() 132 policy->charge_timeout)); wm8350_charger_config() 191 struct wm8350_charger_policy *policy = power->policy; wm8350_charger_handler() local 211 wm8350_charger_config(wm8350, policy); wm8350_charger_handler() 232 wm8350_charger_config(wm8350, policy); wm8350_charger_handler() 473 struct wm8350_charger_policy *policy = power->policy; wm8350_power_probe() local 499 if (wm8350_charger_config(wm8350, policy) == 0) { wm8350_power_probe() 89 wm8350_charger_config(struct wm8350 *wm8350, struct wm8350_charger_policy *policy) wm8350_charger_config() argument
|
/linux-4.1.27/drivers/staging/lustre/lustre/include/ |
H A D | lustre_net.h | 530 * Activate the policy. 546 * ORR policy operations 558 * NRS policy operations. 560 * These determine the behaviour of a policy, and are called in response to 565 * Called during policy registration; this operation is optional. 567 * \param[in,out] policy The policy being initialized 569 int (*op_policy_init) (struct ptlrpc_nrs_policy *policy); 571 * Called during policy unregistration; this operation is optional. 573 * \param[in,out] policy The policy being unregistered/finalized 575 void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy); 577 * Called when activating a policy via lprocfs; policies allocate and 580 * \param[in,out] policy The policy being started 584 int (*op_policy_start) (struct ptlrpc_nrs_policy *policy); 586 * Called when deactivating a policy via lprocfs; policies deallocate 589 * \param[in,out] policy The policy being stopped 593 void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy); 595 * Used for policy-specific operations; i.e. not generic ones like 599 * \param[in,out] policy The policy carrying out operation \a opc 609 int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy, 618 * \param[in,out] policy The policy we're getting resources for. 623 * fallback policy in an NRS head should 648 int (*op_res_get) (struct ptlrpc_nrs_policy *policy, 657 * \param[in,out] policy The policy the resource belongs to 664 void (*op_res_put) (struct ptlrpc_nrs_policy *policy, 668 * Obtains a request for handling from the policy, and optionally 669 * removes the request from the policy; this operation is mandatory. 671 * \param[in,out] policy The policy to poll 674 * request is not removed from the policy. 675 * \param[in] force When set, it will force a policy to return a 684 (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek, 687 * Called when attempting to add a request to a policy for later 690 * \param[in,out] policy The policy on which to enqueue \a nrq 698 int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy, 701 * Removes a request from the policy's set of pending requests. Normally 702 * called after a request has been polled successfully from the policy 705 * \param[in,out] policy The policy the request \a nrq belongs to 710 void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy, 716 * \param[in,out] policy The policy which is stopping to handle request 724 void (*op_req_stop) (struct ptlrpc_nrs_policy *policy, 727 * Registers the policy's lprocfs interface with a PTLRPC service. 736 * Unegisters the policy's lprocfs interface with a PTLRPC service. 738 * In cases of failed policy registration in 740 * service which has not registered the policy successfully, so 754 * Fallback policy, use this flag only on a single supported policy per 760 * Start policy immediately after registering. 764 * This is a policy registering from a module different to the one NRS 789 * policies, of which one and only one policy is acting as the fallback policy, 790 * and optionally a different policy may be acting as the primary policy. For 792 * enqueue the RPC using the primary policy (if any). The fallback policy is 794 * - when there was no primary policy in the 797 * - when the primary policy that was at the 802 * - when the primary policy that was at the 830 * Primary policy, which is the preferred policy for handling RPCs 834 * Fallback policy, which is the backup policy for handling RPCs 854 * This NRS head is in progress of starting a policy 869 * Service compatibility predicate; this determines whether a policy is adequate 872 * XXX:This should give the same result during policy registration and 882 * Human-readable policy name 886 * NRS operations for this policy 901 * Owner module for this policy descriptor; policies registering from a 913 * NRS policy registering descriptor 915 * Is used to hold a description of a policy that can be passed to NRS core in 916 * order to register the policy with NRS heads in different PTLRPC services. 920 * Human-readable policy name 928 * NRS operations for this policy 942 * Owner module for this policy descriptor. 946 * - If one or more instances of the policy are at a state where they 950 * call into the policy's ptlrpc_nrs_pol_ops() handlers. A reference 958 * - During external policy registration, because this should happen in 963 * - During external policy unregistration, because this should happen 964 * in a module's exit() function, and any attempts to start a policy 969 * and cleanup, and policy registration, unregistration and policy 974 * - During any policy-specific lprocfs operations, because a reference 992 * NRS policy state 998 * Not a valid policy state. 1003 * transition here when the user selects a different policy to act 1016 * A policy is in this state in two cases: 1017 * - it is the fallback policy, which is always in this state. 1018 * - it has been activated by the user; i.e. it is the primary policy, 1024 * NRS policy information 1026 * Used for obtaining information for the status of a policy via lprocfs 1034 * Current policy state 1038 * # RPCs enqueued for later dispatching by the policy 1042 * # RPCs started for dispatch by the policy 1046 * Is this a fallback policy? 1052 * NRS policy 1054 * There is one instance of this for each policy in each NRS head of each 1069 * Current state of this policy 1077 * # RPCs enqueued for later dispatching by the policy 1081 * # RPCs started for dispatch by the policy 1085 * Usage Reference count taken on the policy instance 1089 * The NRS head this policy has been created at 1093 * Private policy data; varies by policy type 1097 * Policy descriptor for this policy instance. 1106 * - Inside NRS policies, in the policy's private data in 1109 * policies; e.g. on a policy that performs round robin or similar order 1111 * client NID. On a policy which performs round robin scheduling across 1114 * performed by the policy. 1117 * in policy instances are the parent entities, with all scheduling entities 1118 * a policy schedules across being the children, thus forming a simple resource 1120 * future if the ability to have more than one primary policy is added. 1132 * policy instances; i.e. those are top-level ones. 1136 * The policy associated with this resource. 1149 * FIFO policy 1151 * This policy is a logical wrapper around previous, non-NRS functionality. 1153 * policy is currently used as the fallback policy, and the only enabled policy 1159 * Private data structure for the FIFO policy 1163 * Resource object for policy instance. 1198 * policy that was used to enqueue the request. 1214 * Fields for the FIFO policy 1542 void nrs_policy_get_info_locked(struct ptlrpc_nrs_policy *policy, 2237 * Service compatibility function; the policy is compatible with all services. 2239 * \param[in] svc The service the policy is attempting to register with. 2240 * \param[in] desc The policy descriptor 2242 * \retval true The policy is compatible with the service 2253 * Service compatibility function; the policy is compatible with only a specific 2257 * \param[in] svc The service the policy is attempting to register with. 2258 * \param[in] desc The policy descriptor 2260 * \retval false The policy is not compatible with the service 2261 * \retval true The policy is compatible with the service 2903 /* ptlrpc daemon bind policy */ 2918 /* ptlrpc daemon load policy 2939 void ptlrpcd_add_req(struct ptlrpc_request *req, pdl_policy_t policy, int idx);
|
H A D | lustre_sec.h | 76 * | 4b (bulk svc) | 4b (bulk type) | 4b (svc) | 4b (mech) | 4b (policy) | 140 #define MAKE_FLVR(policy, mech, svc, btype, bsvc) \ 141 (((__u32)(policy) << FLVR_POLICY_OFFSET) | \ 520 * client side policy operation vector. 530 * When necessary, policy module is responsible for taking reference 548 * Notify that this ptlrpc_sec is going to die. Optionally, policy 557 * Given \a vcred, lookup and/or create its context. The policy module 570 * Called then the reference of \a ctx dropped to 0. The policy module 690 * server side policy operation vector. 798 __u16 sp_policy; /* policy number */ 898 __u8 bsd_data[0]; /* policy-specific token */ 927 int sptlrpc_register_policy(struct ptlrpc_sec_policy *policy); 928 int sptlrpc_unregister_policy(struct ptlrpc_sec_policy *policy); 938 struct ptlrpc_sec_policy *sptlrpc_policy_get(struct ptlrpc_sec_policy *policy) sptlrpc_policy_get() argument 940 __module_get(policy->sp_owner); sptlrpc_policy_get() 941 return policy; sptlrpc_policy_get() 945 void sptlrpc_policy_put(struct ptlrpc_sec_policy *policy) sptlrpc_policy_put() argument 947 module_put(policy->sp_owner); sptlrpc_policy_put() 1002 * internal apis which only used by policy implementation
|
/linux-4.1.27/tools/power/cpupower/utils/ |
H A D | cpufreq-info.c | 58 struct cpufreq_policy *policy; proc_cpufreq_output() local 67 policy = cpufreq_get_policy(cpu); proc_cpufreq_output() 68 if (!policy) proc_cpufreq_output() 74 min_pctg = (policy->min * 100) / max; proc_cpufreq_output() 75 max_pctg = (policy->max * 100) / max; proc_cpufreq_output() 78 cpu , policy->min, max ? min_pctg : 0, policy->max, proc_cpufreq_output() 79 max ? max_pctg : 0, policy->governor); proc_cpufreq_output() 81 cpufreq_put_policy(policy); proc_cpufreq_output() 255 struct cpufreq_policy *policy; debug_output_one() local 334 policy = cpufreq_get_policy(cpu); debug_output_one() 335 if (policy) { debug_output_one() 336 printf(_(" current policy: frequency should be within ")); debug_output_one() 337 print_speed(policy->min); debug_output_one() 339 print_speed(policy->max); debug_output_one() 344 policy->governor); debug_output_one() 345 cpufreq_put_policy(policy); debug_output_one() 432 /* --policy / -p */ 436 struct cpufreq_policy *policy = cpufreq_get_policy(cpu); get_policy() local 437 if (!policy) get_policy() 439 printf("%lu %lu %s\n", policy->min, policy->max, policy->governor); get_policy() 440 cpufreq_put_policy(policy); get_policy() 545 { .name = "policy", .has_arg = no_argument, .flag = NULL, .val = 'p'},
|
/linux-4.1.27/mm/ |
H A D | mempolicy.c | 2 * Simple NUMA memory policy for the Linux kernel. 8 * NUMA policy allows the user to give hints in which node(s) memory should 13 * The VMA policy has priority over the process policy for a page fault. 19 * for anonymous memory. For process policy an process counter 32 * process policy. 35 * use the process policy. This is what Linux always did 38 * The process policy is applied for most non interrupt memory allocations 40 * try to allocate on the local CPU. The VMA policy is only applied for memory 43 * Currently there are a few corner cases in swapping where the policy 44 * is not applied, but the majority should be handled. When process policy 48 * requesting a lower zone just use default policy. This implies that 52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between 57 fix mmap readahead to honour policy and enable policy for any page cache 60 global policy for page cache? currently it uses process policy. Requires 62 handle mremap for shared memory (currently ignored for the policy) 64 make bind policy root only? It can trigger oom much faster and the 116 * run-time system-wide default policy => local allocation 207 * any, for the new policy. mpol_new() has already validated the nodes 208 * parameter with respect to the policy mode and flags. But, we need to 250 * This function just creates a new policy, does some check and simple 256 struct mempolicy *policy; mpol_new() local 285 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); mpol_new() 286 if (!policy) mpol_new() 288 atomic_set(&policy->refcnt, 1); mpol_new() 289 policy->mode = mode; mpol_new() 290 policy->flags = flags; mpol_new() 292 return policy; mpol_new() 384 * mpol_rebind_policy - Migrate a policy to a different set of nodes 659 * Apply policy to a single VMA 694 /* Step 2: apply policy to a range and do splits. */ mbind_range() 755 /* Set the process memory policy */ do_set_mempolicy() 793 * Return nodemask for policy for get_mempolicy() query 832 /* Retrieve NUMA policy */ do_get_mempolicy() 833 static long do_get_mempolicy(int *policy, nodemask_t *nmask, do_get_mempolicy() argument 848 *policy = 0; /* just so it's initialized */ do_get_mempolicy() 857 * Do NOT fall back to task policy if the do_get_mempolicy() 858 * vma/shared policy at addr is NULL. We do_get_mempolicy() 882 *policy = err; do_get_mempolicy() 885 *policy = current->il_next; do_get_mempolicy() 891 *policy = pol == &default_policy ? MPOL_DEFAULT : do_get_mempolicy() 895 * the policy to userspace. do_get_mempolicy() 897 *policy |= (pol->flags & MPOL_MODE_FLAGS); do_get_mempolicy() 1091 * Allocate a new page for page migration based on vma policy. 1115 * if !vma, alloc_page_vma() will use task or system default policy new_page() 1175 * If we are using the default policy then operation do_mbind() 1322 /* Set the process memory policy */ SYSCALL_DEFINE3() 1437 /* Retrieve NUMA policy */ SYSCALL_DEFINE5() 1438 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, SYSCALL_DEFINE5() 1454 if (policy && put_user(pval, policy)) SYSCALL_DEFINE5() 1465 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy, COMPAT_SYSCALL_DEFINE5() 1481 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags); COMPAT_SYSCALL_DEFINE5() 1556 * shmem_alloc_page() passes MPOL_F_SHARED policy with __get_vma_policy() 1571 * @vma: virtual memory area whose policy is sought 1572 * @addr: address in @vma for shared policy lookup 1574 * Returns effective policy for a VMA at specified address. 1575 * Falls back to current->mempolicy or system default policy, as necessary. 1614 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone) apply_policy_zone() argument 1621 * if policy->v.nodes has movable memory only, apply_policy_zone() 1622 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only. apply_policy_zone() 1624 * policy->v.nodes is intersect with node_states[N_MEMORY]. apply_policy_zone() 1626 * policy->v.nodes has movable memory only. apply_policy_zone() 1628 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY])) apply_policy_zone() 1638 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy) policy_nodemask() argument 1641 if (unlikely(policy->mode == MPOL_BIND) && policy_nodemask() 1642 apply_policy_zone(policy, gfp_zone(gfp)) && policy_nodemask() 1643 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes)) policy_nodemask() 1644 return &policy->v.nodes; policy_nodemask() 1650 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy, policy_zonelist() argument 1653 switch (policy->mode) { policy_zonelist() 1655 if (!(policy->flags & MPOL_F_LOCAL)) policy_zonelist() 1656 nd = policy->v.preferred_node; policy_zonelist() 1666 unlikely(!node_isset(nd, policy->v.nodes))) policy_zonelist() 1667 nd = first_node(policy->v.nodes); policy_zonelist() 1676 static unsigned interleave_nodes(struct mempolicy *policy) interleave_nodes() argument 1682 next = next_node(nid, policy->v.nodes); interleave_nodes() 1684 next = first_node(policy->v.nodes); interleave_nodes() 1691 * Depending on the memory policy provide a node from which to allocate the 1696 struct mempolicy *policy; mempolicy_slab_node() local 1702 policy = current->mempolicy; mempolicy_slab_node() 1703 if (!policy || policy->flags & MPOL_F_LOCAL) mempolicy_slab_node() 1706 switch (policy->mode) { mempolicy_slab_node() 1711 return policy->v.preferred_node; mempolicy_slab_node() 1714 return interleave_nodes(policy); mempolicy_slab_node() 1718 * Follow bind policy behavior and start allocation at the mempolicy_slab_node() 1726 &policy->v.nodes, mempolicy_slab_node() 1796 * @vma: virtual memory area whose policy is sought 1797 * @addr: address in @vma for shared policy lookup and interleave policy 1804 * If the effective policy is 'BIND, returns a pointer to the mempolicy's 1833 * to indicate default policy. Otherwise, extract the policy nodemask 1834 * for 'bind' or 'interleave' policy into the argument nodemask, or 1836 * 'preferred' or 'local' policy and return 'true' to indicate presence 1883 * policy. Otherwise, check for intersection between mask and the policy 1884 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local' 1885 * policy, always return true since it may allocate elsewhere on fallback. 1923 /* Allocate a page in interleaved policy. 1951 * @node: Which node to prefer for allocation (modulo policy). 1955 * a NUMA policy associated with the VMA or the current process. 1988 * For hugepage allocation and non-interleave policy which alloc_pages_vma() 1994 * If the policy is interleave, or does not allow the current alloc_pages_vma() 2032 * interrupt context and apply the current process NUMA policy. 2144 * Shared memory backing store policy support. 2184 /* Insert a new shared policy into the list. */ 2205 new->policy ? new->policy->mode : 0); sp_insert() 2208 /* Find shared policy intersecting idx */ 2220 mpol_get(sn->policy); mpol_shared_policy_lookup() 2221 pol = sn->policy; mpol_shared_policy_lookup() 2229 mpol_put(n->policy); sp_free() 2234 * mpol_misplaced - check whether current page node is valid in policy 2240 * Lookup current policy node id for vma,addr and "compare to" page's 2287 * use current page if in policy nodemask, mpol_misplaced() 2332 node->policy = pol; sp_node_init() 2356 /* Replace a policy range. */ shared_policy_replace() 2377 /* Old policy spanning whole new range. */ shared_policy_replace() 2382 *mpol_new = *n->policy; shared_policy_replace() 2423 * mpol_shared_policy_init - initialize shared policy for inode 2424 * @sp: pointer to inode shared policy 2427 * Install non-NULL @mpol in inode's shared policy rb-tree. 2457 /* Create pseudo-vma that contains just the policy */ mpol_shared_policy_init() 2459 pvma.vm_end = TASK_SIZE; /* policy covers entire file */ mpol_shared_policy_init() 2495 /* Free a backing policy store on inode delete. */ mpol_free_shared_policy() 2587 * Set interleaving policy for system init. Interleaving is only 2616 /* Reset policy of current process to default */ numa_default_policy()
|
/linux-4.1.27/net/netfilter/ |
H A D | xt_policy.c | 1 /* IP tables module for matching IPsec policy 21 MODULE_DESCRIPTION("Xtables: IPsec policy match"); 136 pr_info("neither incoming nor outgoing policy selected\n"); policy_mt_check() 141 pr_info("output policy not valid in PREROUTING and INPUT\n"); policy_mt_check() 146 pr_info("input policy not valid in POSTROUTING and OUTPUT\n"); policy_mt_check() 150 pr_info("too many policy elements\n"); policy_mt_check() 158 .name = "policy", 166 .name = "policy",
|
H A D | xt_CT.c | 134 pr_info("Timeout policy base is empty\n"); xt_ct_set_timeout() 149 pr_info("No such timeout policy \"%s\"\n", timeout_name); xt_ct_set_timeout() 155 pr_info("Timeout policy `%s' can only be used by L3 protocol " xt_ct_set_timeout() 159 /* Make sure the timeout policy matches any existing protocol tracker, xt_ct_set_timeout() 165 pr_info("Timeout policy `%s' can only be used by L4 protocol " xt_ct_set_timeout()
|
H A D | nfnetlink_cttimeout.c | 114 /* You cannot replace one timeout policy by another of cttimeout_new_timeout() 519 .policy = cttimeout_nla_policy }, 522 .policy = cttimeout_nla_policy }, 525 .policy = cttimeout_nla_policy }, 528 .policy = cttimeout_nla_policy }, 531 .policy = cttimeout_nla_policy },
|
H A D | nfnetlink_acct.c | 376 .policy = nfnl_acct_policy }, 379 .policy = nfnl_acct_policy }, 382 .policy = nfnl_acct_policy }, 385 .policy = nfnl_acct_policy },
|
H A D | nft_counter.c | 93 .policy = nft_counter_policy,
|
H A D | nft_limit.c | 99 .policy = nft_limit_policy,
|
/linux-4.1.27/lib/ |
H A D | nlattr.c | 31 const struct nla_policy *policy) validate_nla() 39 pt = &policy[type]; validate_nla() 114 * @policy: validation policy 117 * specified policy. Attributes with a type exceeding maxtype will be 123 const struct nla_policy *policy) nla_validate() 129 err = validate_nla(nla, maxtype, policy); nla_for_each_attr() 141 * nla_policy_len - Determin the max. length of a policy 142 * @policy: policy to use 145 * Determines the max. length of the policy. It is currently used 173 * @policy: validation policy 178 * reasons. policy may be set to NULL if no validation is required. 183 int len, const struct nla_policy *policy) nla_parse() 194 if (policy) { nla_for_each_attr() 195 err = validate_nla(nla, maxtype, policy); nla_for_each_attr() 30 validate_nla(const struct nlattr *nla, int maxtype, const struct nla_policy *policy) validate_nla() argument 122 nla_validate(const struct nlattr *head, int len, int maxtype, const struct nla_policy *policy) nla_validate() argument 182 nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head, int len, const struct nla_policy *policy) nla_parse() argument
|
/linux-4.1.27/include/uapi/linux/ |
H A D | mempolicy.h | 52 to policy */ 53 #define MPOL_MF_MOVE_ALL (1<<2) /* Move every page to conform to policy */ 69 #define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
|
H A D | capability.h | 261 /* Allow setting zone reclaim policy */ 322 The base kernel enforces no MAC policy. 323 An LSM may enforce a MAC policy, and if it does and it chooses 324 to implement capability based overrides of that policy, this is 331 An LSM may enforce a MAC policy, and if it does and it chooses 333 policy or the data required to maintain it, this is the
|
H A D | atmsvc.h | 45 * Some policy stuff for atmsigd and for net/atm/svc.c. Both have to agree on
|
H A D | ipv6_route.h | 32 #define RTF_POLICY 0x04000000 /* policy route */
|
H A D | xfrm.h | 46 /* Selector, used as selector both on policy rules (SPD) and SAs. */ 264 XFRM_AE_CU=64, /* Event cause is policy update */ 415 #define XFRM_POLICY_LOCALOK 1 /* Allow user to override global policy */ 431 struct xfrm_userpolicy_info policy; member in struct:xfrm_user_acquire
|
/linux-4.1.27/net/bridge/netfilter/ |
H A D | ebtable_filter.c | 20 .policy = EBT_ACCEPT, 24 .policy = EBT_ACCEPT, 28 .policy = EBT_ACCEPT,
|
H A D | ebtable_nat.c | 20 .policy = EBT_ACCEPT, 24 .policy = EBT_ACCEPT, 28 .policy = EBT_ACCEPT,
|
H A D | ebtable_broute.c | 23 .policy = EBT_ACCEPT,
|
/linux-4.1.27/security/integrity/ima/ |
H A D | ima_main.c | 173 * bitmask based on the appraise/audit/measurement policy. process_measurement() 259 * ima_file_mmap - based on policy, collect/store measurement. 264 * policy decision. 267 * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. 277 * ima_bprm_check - based on policy, collect/store measurement. 287 * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. 295 * ima_path_check - based on policy, collect/store measurement. 299 * Measure files based on the ima_must_measure() policy decision. 302 * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. 313 * ima_module_check - based on policy, collect/store/appraise measurement. 316 * Measure/appraise kernel modules based on policy. 319 * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
|
H A D | ima_fs.c | 306 * ima_open_policy: sequentialize access to the policy file 319 * ima_release_policy - start using the new measure policy rules. 321 * Initially, ima_measure points to the default policy rules, now 322 * point to the new policy rules, and remove the securityfs policy file, 323 * assuming a valid policy. 329 pr_info("IMA: policy update %s\n", cause); ima_release_policy() 385 ima_policy = securityfs_create_file("policy", ima_fs_init()
|
H A D | ima_policy.c | 10 * - initialize default measure policy rules 67 * Without LSM specific knowledge, the default policy can only be 174 * Although the IMA policy does not change, the LSM policy can be 176 * stale LSM policy. 178 * Update the IMA LSM based rules to reflect the reloaded LSM policy. 315 * @inode: pointer to an inode for which the policy decision is being made 322 * (There is no need for locking when walking the policy list, 360 * loaded policy. Based on this flag, the decision to short circuit 422 * policy. Once updated, the policy is locked, no additional rules can be 423 * added to the policy. 747 * @rule - ima measurement policy rule 749 * Uses a mutex to protect the policy list from multiple concurrent writers. 780 NULL, op, "invalid-policy", result, ima_parse_add_rule() 792 /* ima_delete_rules called to cleanup invalid policy */ ima_delete_rules()
|
H A D | ima.h | 46 /* current content of the policy */ 152 /* IMA policy related functions */ 226 /* LSM based policy rules require audit */
|
/linux-4.1.27/security/selinux/ |
H A D | xfrm.c | 138 * Authorize the deletion of a labeled SA or policy rule. 153 * LSM hook implementation that authorizes that a flow can use a xfrm policy 161 * "non-labeled" policy. This would prevent inadvertent "leaks". */ selinux_xfrm_policy_lookup() 176 * the given policy, flow combo. 186 /* unlabeled policy and labeled SA can't match */ selinux_xfrm_state_pol_flow_match() 189 /* unlabeled policy and unlabeled SA match all flows */ selinux_xfrm_state_pol_flow_match() 193 /* unlabeled SA and labeled policy can't match */ selinux_xfrm_state_pol_flow_match() 205 /* We don't need a separate SA Vs. policy polmatch check since the SA selinux_xfrm_state_pol_flow_match() 206 * is now of the same label as the flow and a flow Vs. policy polmatch selinux_xfrm_state_pol_flow_match() 294 * for policy cloning. 425 * non-IPsec communication unless explicitly allowed by policy. */ selinux_xfrm_sock_rcv_skb() 468 * non-IPsec communication unless explicitly allowed by policy. */ selinux_xfrm_postroute_last()
|
H A D | netnode.c | 5 * mapping is maintained as part of the normal policy but a fast cache is 188 * sel_netnode_sid_slow - Lookup the SID of a network address using the policy 195 * security policy. The result is added to the network address table to 258 * can't be found then the policy is queried and the result is added to the
|
H A D | netport.c | 5 * mapping is maintained as part of the normal policy but a fast cache is 137 * sel_netport_sid_slow - Lookup the SID of a network address using the policy 144 * policy. The result is added to the network port table to speedup future 193 * then the policy is queried and the result is added to the table to speedup
|
H A D | netif.c | 124 * sel_netif_sid_slow - Lookup the SID of a network interface using the policy 131 * security policy. The result is added to the network interface table to 198 * can't be found then the policy is queried and the result is added to the
|
/linux-4.1.27/net/xfrm/ |
H A D | xfrm_policy.c | 282 struct xfrm_policy *policy; xfrm_policy_alloc() local 284 policy = kzalloc(sizeof(struct xfrm_policy), gfp); xfrm_policy_alloc() 286 if (policy) { xfrm_policy_alloc() 287 write_pnet(&policy->xp_net, net); xfrm_policy_alloc() 288 INIT_LIST_HEAD(&policy->walk.all); xfrm_policy_alloc() 289 INIT_HLIST_NODE(&policy->bydst); xfrm_policy_alloc() 290 INIT_HLIST_NODE(&policy->byidx); xfrm_policy_alloc() 291 rwlock_init(&policy->lock); xfrm_policy_alloc() 292 atomic_set(&policy->refcnt, 1); xfrm_policy_alloc() 293 skb_queue_head_init(&policy->polq.hold_queue); xfrm_policy_alloc() 294 setup_timer(&policy->timer, xfrm_policy_timer, xfrm_policy_alloc() 295 (unsigned long)policy); xfrm_policy_alloc() 296 setup_timer(&policy->polq.hold_timer, xfrm_policy_queue_process, xfrm_policy_alloc() 297 (unsigned long)policy); xfrm_policy_alloc() 298 policy->flo.ops = &xfrm_policy_fc_ops; xfrm_policy_alloc() 300 return policy; xfrm_policy_alloc() 306 void xfrm_policy_destroy(struct xfrm_policy *policy) xfrm_policy_destroy() argument 308 BUG_ON(!policy->walk.dead); xfrm_policy_destroy() 310 if (del_timer(&policy->timer) || del_timer(&policy->polq.hold_timer)) xfrm_policy_destroy() 313 security_xfrm_policy_free(policy->security); xfrm_policy_destroy() 314 kfree(policy); xfrm_policy_destroy() 330 static void xfrm_policy_kill(struct xfrm_policy *policy) xfrm_policy_kill() argument 332 policy->walk.dead = 1; xfrm_policy_kill() 334 atomic_inc(&policy->genid); xfrm_policy_kill() 336 if (del_timer(&policy->polq.hold_timer)) xfrm_policy_kill() 337 xfrm_pol_put(policy); xfrm_policy_kill() 338 xfrm_queue_purge(&policy->polq.hold_queue); xfrm_policy_kill() 340 if (del_timer(&policy->timer)) xfrm_policy_kill() 341 xfrm_pol_put(policy); xfrm_policy_kill() 343 xfrm_pol_put(policy); xfrm_policy_kill() 353 /* calculate policy hash thresholds */ __get_hash_thresh() 581 struct xfrm_policy *policy; xfrm_hash_rebuild() local 627 list_for_each_entry_reverse(policy, &net->xfrm.policy_all, walk.all) { xfrm_hash_rebuild() 629 chain = policy_hash_bysel(net, &policy->selector, xfrm_hash_rebuild() 630 policy->family, xfrm_hash_rebuild() 631 xfrm_policy_id2dir(policy->index)); hlist_for_each_entry() 633 if (policy->priority >= pol->priority) hlist_for_each_entry() 639 hlist_add_behind(&policy->bydst, newpos); 641 hlist_add_head(&policy->bydst, chain); 732 static bool xfrm_policy_mark_match(struct xfrm_policy *policy, xfrm_policy_mark_match() argument 735 u32 mark = policy->mark.v & policy->mark.m; xfrm_policy_mark_match() 737 if (policy->mark.v == pol->mark.v && policy->mark.m == pol->mark.m) xfrm_policy_mark_match() 741 policy->priority == pol->priority) xfrm_policy_mark_match() 747 int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl) xfrm_policy_insert() argument 749 struct net *net = xp_net(policy); xfrm_policy_insert() 756 chain = policy_hash_bysel(net, &policy->selector, policy->family, dir); xfrm_policy_insert() 760 if (pol->type == policy->type && hlist_for_each_entry() 761 !selector_cmp(&pol->selector, &policy->selector) && hlist_for_each_entry() 762 xfrm_policy_mark_match(policy, pol) && hlist_for_each_entry() 763 xfrm_sec_ctx_match(pol->security, policy->security) && hlist_for_each_entry() 770 if (policy->priority > pol->priority) hlist_for_each_entry() 772 } else if (policy->priority >= pol->priority) { hlist_for_each_entry() 780 hlist_add_behind(&policy->bydst, newpos); 782 hlist_add_head(&policy->bydst, chain); 783 __xfrm_policy_link(policy, dir); 787 if (policy->family == AF_INET) 793 xfrm_policy_requeue(delpol, policy); 796 policy->index = delpol ? delpol->index : xfrm_gen_index(net, dir, policy->index); 797 hlist_add_head(&policy->byidx, net->xfrm.policy_byidx+idx_hash(net, policy->index)); 798 policy->curlft.add_time = get_seconds(); 799 policy->curlft.use_time = 0; 800 if (!mod_timer(&policy->timer, jiffies + HZ)) 801 xfrm_pol_hold(policy); 1063 * Find policy to apply to this flow. 1065 * Returns 0 if policy found, else an -errno. 1323 * allowed to delete or replace socket policy. xfrm_sk_policy_insert() 1392 /* Resolve list of templates for the flow, given policy. */ 1395 xfrm_tmpl_resolve_one(struct xfrm_policy *policy, const struct flowi *fl, xfrm_tmpl_resolve_one() argument 1398 struct net *net = xp_net(policy); xfrm_tmpl_resolve_one() 1405 for (nx = 0, i = 0; i < policy->xfrm_nr; i++) { xfrm_tmpl_resolve_one() 1409 struct xfrm_tmpl *tmpl = &policy->xfrm_vec[i]; xfrm_tmpl_resolve_one() 1423 x = xfrm_state_find(remote, local, fl, tmpl, policy, &error, family); xfrm_tmpl_resolve_one() 1634 static struct dst_entry *xfrm_bundle_create(struct xfrm_policy *policy, xfrm_bundle_create() argument 1639 struct net *net = xp_net(policy); xfrm_bundle_create() 1651 int family = policy->selector.family; xfrm_bundle_create() 2141 * either because the policy blocks, has no transformations or xfrm_bundle_lookup() 2364 * check policy restrictions. At the moment we make this in maximally 2366 * have policy cached at them. 2645 * When a policy's bundle is pruned, we dst_free() the XFRM xfrm_dst_check() 3228 /* target policy has been deleted */ xfrm_policy_migrate() 3308 /* Stage 1 - find policy */ xfrm_migrate() 3329 /* Stage 3 - update policy */ xfrm_migrate()
|
/linux-4.1.27/drivers/staging/lustre/lustre/lmv/ |
H A D | lproc_lmv.c | 100 enum placement_policy policy; lmv_placement_seq_write() local 116 policy = placement_name2policy(dummy, len); lmv_placement_seq_write() 117 if (policy != PLACEMENT_INVAL_POLICY) { lmv_placement_seq_write() 119 lmv->lmv_placement = policy; lmv_placement_seq_write() 122 CERROR("Invalid placement policy \"%s\"!\n", dummy); lmv_placement_seq_write()
|
/linux-4.1.27/fs/ceph/ |
H A D | ioctl.h | 10 * CEPH_IOC_GET_LAYOUT - get file layout or dir layout policy 12 * CEPH_IOC_SET_LAYOUT_POLICY - set dir layout policy 19 * Files get a new layout based on the policy set on the containing 21 * you examine the layout for a file or the policy on a directory. 27 * SET_LAYOUT_POLICY will let you set a layout policy (default layout)
|
/linux-4.1.27/block/ |
H A D | blk-flush.c | 99 unsigned int policy = 0; blk_flush_policy() local 102 policy |= REQ_FSEQ_DATA; blk_flush_policy() 106 policy |= REQ_FSEQ_PREFLUSH; blk_flush_policy() 108 policy |= REQ_FSEQ_POSTFLUSH; blk_flush_policy() 110 return policy; blk_flush_policy() 375 unsigned int policy = blk_flush_policy(fflags, rq); blk_insert_flush() local 379 * @policy now records what operations need to be done. Adjust blk_insert_flush() 392 if (!policy) { blk_insert_flush() 407 if ((policy & REQ_FSEQ_DATA) && blk_insert_flush() 408 !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { blk_insert_flush() 428 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); blk_insert_flush() 434 blk_flush_complete_seq(rq, fq, REQ_FSEQ_ACTIONS & ~policy, 0); blk_insert_flush()
|
H A D | blk-cgroup.c | 99 /* alloc per-policy data and attach it to blkg */ blkg_alloc() 122 * This is internal version and shouldn't be used by policy 213 /* invoke per-policy init */ blkg_create() 500 * @pol: policy in question 505 * policy specified by @pol exists. @prfill is invoked with @sf, the 506 * policy data and @data and the matching queue lock held. If @show_total 539 * @pd: policy private data of interest 559 * @pd: policy private data of interest 593 * @pd: policy private data of interest 607 * @pd: policy private data of interest 623 * @pd: policy private data of interest 655 * @pd: policy private data of interest 696 * @pol: target policy 946 * blkcg_activate_policy - activate a blkcg policy on a request_queue 948 * @pol: blkcg policy to activate 956 * always enough for dereferencing policy data. 958 * The caller is responsible for synchronizing [de]activations and policy 1059 * blkcg_deactivate_policy - deactivate a blkcg policy on a request_queue 1061 * @pol: blkcg policy to deactivate 1079 /* if no policy is left, no need for blkgs - shoot them down */ blkcg_deactivate_policy() 1104 * blkcg_policy_register - register a blkcg policy 1105 * @pol: blkcg policy to register 1131 /* everything is in place, add intf files for the new policy */ blkcg_policy_register() 1143 * blkcg_policy_unregister - unregister a blkcg policy 1144 * @pol: blkcg policy to unregister
|
H A D | blk-cgroup.h | 23 /* Max limits for throttle policy */ 53 /* TODO: per-policy storage in blkcg */ 82 /* the blkg and policy id this per-policy data belongs to */ 86 /* used during policy activation */ 123 /* policy specific private data size */ 125 /* cgroup files for the policy */ 145 /* Blkio controller policy registration */ 209 * blkg_to_pdata - get policy private data 211 * @pol: policy of interest 222 * pdata_to_blkg - get blkg associated with policy private data 223 * @pd: policy private data of interest 225 * @pd is policy private data. Determine the blkg it's associated with.
|
/linux-4.1.27/drivers/clk/bcm/ |
H A D | clk-bcm21664.c | 30 /* no policy control */ 52 .policy = { 135 .policy = { 238 .policy = {
|
H A D | clk-kona-setup.c | 30 struct ccu_policy *ccu_policy = &ccu->policy; ccu_data_offsets_valid() 37 pr_err("%s: bad policy enable offset for %s " ccu_data_offsets_valid() 43 pr_err("%s: bad policy control offset for %s " ccu_data_offsets_valid() 82 struct bcm_clk_policy *policy; peri_clk_data_offsets_valid() local 100 policy = &peri->policy; peri_clk_data_offsets_valid() 101 if (policy_exists(policy)) { peri_clk_data_offsets_valid() 102 if (policy->offset > limit) { peri_clk_data_offsets_valid() 103 pr_err("%s: bad policy offset for %s (%u > %u)\n", peri_clk_data_offsets_valid() 104 __func__, name, policy->offset, limit); peri_clk_data_offsets_valid() 225 if (!bit_posn_valid(enable->bit, "policy enable", ccu_name)) ccu_policy_valid() 229 if (!bit_posn_valid(control->go_bit, "policy control GO", ccu_name)) ccu_policy_valid() 232 if (!bit_posn_valid(control->atl_bit, "policy control ATL", ccu_name)) ccu_policy_valid() 235 if (!bit_posn_valid(control->ac_bit, "policy control AC", ccu_name)) ccu_policy_valid() 241 static bool policy_valid(struct bcm_clk_policy *policy, const char *clock_name) policy_valid() argument 243 if (!bit_posn_valid(policy->bit, "policy", clock_name)) policy_valid() 405 struct bcm_clk_policy *policy; peri_clk_data_valid() local 427 policy = &peri->policy; peri_clk_data_valid() 428 if (policy_exists(policy) && !policy_valid(policy, name)) peri_clk_data_valid() 790 ccu_policy = &ccu->policy; ccu_data_valid()
|
H A D | clk-kona.h | 52 #define policy_exists(policy) ((policy)->offset != 0) 85 * CCU policy control for clocks. Clocks can be enabled or disabled 86 * based on the CCU policy in effect. One bit in each policy mask 87 * register (one per CCU policy) represents whether the clock is 88 * enabled when that policy is effect or not. The CCU policy engine 93 u32 offset; /* first policy mask register offset */ 391 struct bcm_clk_policy policy; member in struct:peri_clk_data 431 * CCU policy control. To enable software update of the policy 432 * tables the CCU policy engine must be stopped by setting the 482 struct ccu_policy policy; member in struct:ccu_data
|
H A D | clk-kona.c | 23 * "Normal", and "Turbo".) A lower policy number has lower power 24 * consumption, and policy 2 is the default. 214 struct bcm_policy_ctl *control = &ccu->policy.control; __ccu_policy_engine_start() 220 /* If we don't need to control policy for this CCU, we're done. */ __ccu_policy_engine_start() 230 pr_err("%s: ccu %s policy engine wouldn't go idle\n", __ccu_policy_engine_start() 260 pr_err("%s: ccu %s policy engine never started\n", __ccu_policy_engine_start() 268 struct bcm_lvm_en *enable = &ccu->policy.enable; __ccu_policy_engine_stop() 273 /* If we don't need to control policy for this CCU, we're done. */ __ccu_policy_engine_stop() 282 pr_err("%s: ccu %s policy engine already stopped\n", __ccu_policy_engine_stop() 293 pr_err("%s: ccu %s policy engine never stopped\n", __ccu_policy_engine_stop() 301 * can be disabled or enabled based on which policy is currently in 302 * effect. Such clocks have a bit in a "policy mask" register for 303 * each policy indicating whether the clock is enabled for that 304 * policy or not. The bit position for a clock is the same for all 308 static bool policy_init(struct ccu_data *ccu, struct bcm_clk_policy *policy) policy_init() argument 315 if (!policy_exists(policy)) policy_init() 319 * We need to stop the CCU policy engine to allow update policy_init() 320 * of our policy bits. policy_init() 323 pr_err("%s: unable to stop CCU %s policy engine\n", policy_init() 329 * For now, if a clock defines its policy bit we just mark policy_init() 332 offset = policy->offset; policy_init() 333 mask = (u32)1 << policy->bit; policy_init() 343 /* We're done updating; fire up the policy engine again. */ policy_init() 346 pr_err("%s: unable to restart CCU %s policy engine\n", policy_init() 1194 if (!policy_init(ccu, &peri->policy)) { __peri_clk_init() 1195 pr_err("%s: error initializing policy for %s\n", __peri_clk_init()
|
/linux-4.1.27/arch/mips/include/asm/mach-loongson/ |
H A D | loongson_hwmon.h | 28 #define STEP_SPEED_POLICY 1 /* use up/down arrays to describe policy */
|
/linux-4.1.27/security/selinux/ss/ |
H A D | mls_types.h | 2 * Type definitions for the multi-level security (MLS) policy.
|
H A D | policydb.h | 2 * A policy database (policydb) specifies the 3 * configuration data for the security policy. 15 * Added conditional policy language extensions 158 * policy source. This is not used by the kernel policy but allows 227 /* The policy database */ 292 /* length of this policy when it was loaded */
|
H A D | mls.h | 2 * Multi-level security (MLS) policy operations.
|
H A D | context.h | 4 * by the security policy. Security contexts are 7 * with an understanding of the security policy.
|
H A D | conditional.h | 49 * A cond node represents a conditional block in a policy. It
|
/linux-4.1.27/tools/power/cpupower/bench/ |
H A D | config.h | 23 /* default scheduling policy SCHED_OTHER */
|
/linux-4.1.27/drivers/staging/lustre/lustre/ldlm/ |
H A D | ldlm_plain.c | 65 /* No policy for plain locks */ ldlm_plain_policy_wire_to_local() 71 /* No policy for plain locks */ ldlm_plain_policy_local_to_wire()
|
H A D | ldlm_lock.c | 93 * Converts lock policy from local format to on the wire lock_desc format 107 * Converts lock policy from on the wire lock_desc format to local format 945 * the policy group */ list_for_each() 959 /* go to next policy group within mode group */ list_for_each() 963 } /* loop over policy groups within the mode group */ list_for_each() 966 * new policy group is started */ list_for_each() 979 * new mode group and new policy group are started */ 1076 ldlm_policy_data_t *policy, search_queue() 1115 policy->l_extent.start || list_for_each() 1116 lock->l_policy_data.l_extent.end < policy->l_extent.end)) list_for_each() 1121 lock->l_policy_data.l_extent.gid != policy->l_extent.gid) list_for_each() 1128 policy->l_inodebits.bits) != list_for_each() 1129 policy->l_inodebits.bits)) list_for_each() 1227 ldlm_policy_data_t *policy, ldlm_mode_t mode, ldlm_lock_match() 1253 lock = search_queue(&res->lr_granted, &mode, policy, old_lock, ldlm_lock_match() 1263 lock = search_queue(&res->lr_converting, &mode, policy, old_lock, ldlm_lock_match() 1269 lock = search_queue(&res->lr_waiting, &mode, policy, old_lock, ldlm_lock_match() 1324 res_id->name[2] : policy->l_extent.start, ldlm_lock_match() 1326 res_id->name[3] : policy->l_extent.end); ldlm_lock_match() 1345 res_id->name[2] : policy->l_extent.start, ldlm_lock_match() 1347 res_id->name[3] : policy->l_extent.end); ldlm_lock_match() 1553 * If namespace has intent policy sent and the lock has LDLM_FL_HAS_INTENT flag 1554 * set, skip all the enqueueing and delegate lock processing to intent policy 2148 * mode below in ->policy() ldlm_lock_convert() 1074 search_queue(struct list_head *queue, ldlm_mode_t *mode, ldlm_policy_data_t *policy, struct ldlm_lock *old_lock, __u64 flags, int unref) search_queue() argument 1225 ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags, const struct ldlm_res_id *res_id, ldlm_type_t type, ldlm_policy_data_t *policy, ldlm_mode_t mode, struct lustre_handle *lockh, int unref) ldlm_lock_match() argument
|
H A D | ldlm_request.c | 397 ldlm_type_t type, ldlm_policy_data_t *policy, ldlm_cli_enqueue_local() 435 if (policy != NULL) ldlm_cli_enqueue_local() 436 lock->l_policy_data = *policy; ldlm_cli_enqueue_local() 440 lock->l_req_extent = policy->l_extent; ldlm_cli_enqueue_local() 442 err = ldlm_lock_enqueue(ns, &lock, policy, flags); ldlm_cli_enqueue_local() 446 if (policy != NULL) ldlm_cli_enqueue_local() 447 *policy = lock->l_policy_data; ldlm_cli_enqueue_local() 637 "client-side enqueue, new policy data"); ldlm_cli_enqueue_fini() 850 ldlm_policy_data_t const *policy, __u64 *flags, ldlm_cli_enqueue() 887 if (policy != NULL) ldlm_cli_enqueue() 888 lock->l_policy_data = *policy; ldlm_cli_enqueue() 891 lock->l_req_extent = policy->l_extent; ldlm_cli_enqueue() 948 policy->l_extent.end == OBD_OBJECT_EOF)); ldlm_cli_enqueue() 959 err = ldlm_cli_enqueue_fini(exp, req, einfo->ei_type, policy ? 1 : 0, ldlm_cli_enqueue() 1436 * Callback function for LRU-resize policy. Decides whether to keep 1475 * Callback function for proc used policy. Makes decision whether to keep 1495 * Callback function for aged policy. Makes decision whether to keep \a lock in 1516 * Callback function for default policy. Makes decision whether to keep \a lock 1578 * flags & LDLM_CANCEL_LRUR - use LRU resize policy (SLV from server) to 1585 * memory pressure policy function; 1587 * flags & LDLM_CANCEL_AGED - cancel \a count locks according to "aged policy". 1647 /* Pass the lock through the policy filter and see if it ldlm_prepare_lru_list() 1650 * Even for shrinker policy we stop scanning if ldlm_prepare_lru_list() 1656 * That is, for shrinker policy we drop only ldlm_prepare_lru_list() 1765 * given policy, mode. GET the found locks and add them into the \a cancels 1770 ldlm_policy_data_t *policy, ldlm_cancel_resource_local() 1797 /* If policy is given and this is IBITS lock, add to list only ldlm_cancel_resource_local() 1798 * those locks that match by policy. */ ldlm_cancel_resource_local() 1799 if (policy && (lock->l_resource->lr_type == LDLM_IBITS) && ldlm_cancel_resource_local() 1801 policy->l_inodebits.bits)) ldlm_cancel_resource_local() 1883 ldlm_policy_data_t *policy, ldlm_cli_cancel_unused_resource() 1901 count = ldlm_cancel_resource_local(res, &cancels, policy, mode, ldlm_cli_cancel_unused_resource() 2250 * because the LDLM_CANCEL_NO_WAIT policy doesn't use the ldlm_cancel_unused_locks_for_replay() 395 ldlm_cli_enqueue_local(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_type_t type, ldlm_policy_data_t *policy, ldlm_mode_t mode, __u64 *flags, ldlm_blocking_callback blocking, ldlm_completion_callback completion, ldlm_glimpse_callback glimpse, void *data, __u32 lvb_len, enum lvb_type lvb_type, const __u64 *client_cookie, struct lustre_handle *lockh) ldlm_cli_enqueue_local() argument 847 ldlm_cli_enqueue(struct obd_export *exp, struct ptlrpc_request **reqp, struct ldlm_enqueue_info *einfo, const struct ldlm_res_id *res_id, ldlm_policy_data_t const *policy, __u64 *flags, void *lvb, __u32 lvb_len, enum lvb_type lvb_type, struct lustre_handle *lockh, int async) ldlm_cli_enqueue() argument 1768 ldlm_cancel_resource_local(struct ldlm_resource *res, struct list_head *cancels, ldlm_policy_data_t *policy, ldlm_mode_t mode, __u64 lock_flags, ldlm_cancel_flags_t cancel_flags, void *opaque) ldlm_cancel_resource_local() argument 1881 ldlm_cli_cancel_unused_resource(struct ldlm_namespace *ns, const struct ldlm_res_id *res_id, ldlm_policy_data_t *policy, ldlm_mode_t mode, ldlm_cancel_flags_t flags, void *opaque) ldlm_cli_cancel_unused_resource() argument
|
/linux-4.1.27/include/linux/netfilter/ |
H A D | nfnetlink.h | 20 const struct nla_policy *policy; /* netlink attribute policy */ member in struct:nfnl_callback
|
/linux-4.1.27/include/linux/mfd/wm8350/ |
H A D | supply.h | 113 /* charger state machine policy - set in machine driver */ 129 struct wm8350_charger_policy *policy; member in struct:wm8350_power
|
/linux-4.1.27/drivers/acpi/ |
H A D | processor_thermal.c | 80 struct cpufreq_policy policy; cpu_has_cpufreq() local 81 if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu)) cpu_has_cpufreq() 89 struct cpufreq_policy *policy = data; acpi_thermal_cpufreq_notifier() local 96 policy->cpuinfo.max_freq * acpi_thermal_cpufreq_notifier() 97 (100 - reduction_pctg(policy->cpu) * 20) acpi_thermal_cpufreq_notifier() 100 cpufreq_verify_within_limits(policy, 0, max_freq); acpi_thermal_cpufreq_notifier()
|
/linux-4.1.27/drivers/thermal/ |
H A D | cpu_cooling.c | 171 * cpufreq_thermal_notifier - notifier callback for cpufreq policy change. 176 * Callback to hijack the notification on cpufreq policy transition. 177 * Every time there is a change in policy, we will intercept and 178 * update the cpufreq policy with thermal constraints. 185 struct cpufreq_policy *policy = data; cpufreq_thermal_notifier() local 194 if (!cpumask_test_cpu(policy->cpu, cpufreq_thermal_notifier() 200 if (policy->max != max_freq) cpufreq_thermal_notifier() 201 cpufreq_verify_within_limits(policy, 0, max_freq); cpufreq_thermal_notifier() 290 /* Notifier for cpufreq policy change */ 313 * Normally this should be same as cpufreq policy->related_cpus.
|
/linux-4.1.27/net/ieee802154/ |
H A D | nl802154.c | 195 /* policy for the attributes */ 893 .policy = nl802154_policy, 902 .policy = nl802154_policy, 910 .policy = nl802154_policy, 918 .policy = nl802154_policy, 926 .policy = nl802154_policy, 934 .policy = nl802154_policy, 942 .policy = nl802154_policy, 950 .policy = nl802154_policy, 958 .policy = nl802154_policy, 966 .policy = nl802154_policy, 974 .policy = nl802154_policy, 982 .policy = nl802154_policy,
|
H A D | ieee802154.h | 23 .policy = ieee802154_policy, \ 32 .policy = ieee802154_policy, \
|
/linux-4.1.27/include/net/ |
H A D | rtnetlink.h | 32 * @policy: Netlink policy for device specific attribute validation 60 const struct nla_policy *policy; member in struct:rtnl_link_ops
|
H A D | netlink.h | 159 * Standard attribute types to specify validation policy 184 * struct nla_policy - attribute validation policy 237 const struct nla_policy *policy); 239 int len, const struct nla_policy *policy); 364 * @policy: validation policy 370 const struct nla_policy *policy) nlmsg_parse() 376 nlmsg_attrlen(nlh, hdrlen), policy); nlmsg_parse() 399 * @policy: validation policy 403 const struct nla_policy *policy) nlmsg_validate() 409 nlmsg_attrlen(nlh, hdrlen), maxtype, policy); nlmsg_validate() 729 * @policy: validation policy 735 const struct nla_policy *policy) nla_parse_nested() 737 return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy); nla_parse_nested() 1201 * @policy: validation policy 1204 * specified policy. Attributes with a type exceeding maxtype will be 1210 const struct nla_policy *policy) nla_validate_nested() 1212 return nla_validate(nla_data(start), nla_len(start), maxtype, policy); nla_validate_nested() 368 nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, struct nlattr *tb[], int maxtype, const struct nla_policy *policy) nlmsg_parse() argument 401 nlmsg_validate(const struct nlmsghdr *nlh, int hdrlen, int maxtype, const struct nla_policy *policy) nlmsg_validate() argument 733 nla_parse_nested(struct nlattr *tb[], int maxtype, const struct nlattr *nla, const struct nla_policy *policy) nla_parse_nested() argument 1209 nla_validate_nested(const struct nlattr *start, int maxtype, const struct nla_policy *policy) nla_validate_nested() argument
|
H A D | genetlink.h | 115 * @policy: attribute validation policy 122 const struct nla_policy *policy; member in struct:genl_ops 217 * @policy: validation policy 222 const struct nla_policy *policy) genlmsg_parse() 225 policy); genlmsg_parse() 219 genlmsg_parse(const struct nlmsghdr *nlh, const struct genl_family *family, struct nlattr *tb[], int maxtype, const struct nla_policy *policy) genlmsg_parse() argument
|
H A D | vxlan.h | 25 * A = Indicates that the group policy has already been applied to 29 * [0] https://tools.ietf.org/html/draft-smith-vxlan-group-policy
|
H A D | fib_rules.h | 75 const struct nla_policy *policy; member in struct:fib_rules_ops
|
/linux-4.1.27/net/irda/ |
H A D | irnetlink.c | 138 .policy = irda_nl_policy, 144 .policy = irda_nl_policy,
|
/linux-4.1.27/net/nfc/ |
H A D | netlink.c | 1498 .policy = nfc_genl_policy, 1503 .policy = nfc_genl_policy, 1508 .policy = nfc_genl_policy, 1513 .policy = nfc_genl_policy, 1518 .policy = nfc_genl_policy, 1523 .policy = nfc_genl_policy, 1528 .policy = nfc_genl_policy, 1534 .policy = nfc_genl_policy, 1539 .policy = nfc_genl_policy, 1544 .policy = nfc_genl_policy, 1549 .policy = nfc_genl_policy, 1554 .policy = nfc_genl_policy, 1559 .policy = nfc_genl_policy, 1564 .policy = nfc_genl_policy, 1570 .policy = nfc_genl_policy, 1575 .policy = nfc_genl_policy, 1580 .policy = nfc_genl_policy,
|
/linux-4.1.27/drivers/staging/lustre/lustre/mdc/ |
H A D | mdc_locks.c | 156 ldlm_policy_data_t *policy, ldlm_mode_t mode, mdc_lock_match() 164 policy->l_inodebits.bits &= exp_connect_ibits(exp); mdc_lock_match() 166 &res_id, type, policy, mode, lockh, 0); mdc_lock_match() 172 ldlm_policy_data_t *policy, mdc_cancel_unused() 183 policy, mode, flags, opaque); mdc_cancel_unused() 793 ldlm_policy_data_t const *policy = &lookup_policy; mdc_enqueue() local 811 policy = &update_policy; mdc_enqueue() 813 policy = &layout_policy; mdc_enqueue() 815 policy = &getxattr_policy; mdc_enqueue() 825 policy as lmm, but lmmsize is 0 */ mdc_enqueue() 829 policy = (ldlm_policy_data_t *)lmm; mdc_enqueue() 835 policy = &update_policy; mdc_enqueue() 884 rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, policy, &flags, NULL, mdc_enqueue() 1040 ldlm_policy_data_t policy = lock->l_policy_data; mdc_finish_intent_lock() local 1052 LDLM_IBITS, &policy, LCK_NL, mdc_finish_intent_lock() 1075 ldlm_policy_data_t policy; mdc_revalidate_lock() local 1099 policy.l_inodebits.bits = MDS_INODELOCK_UPDATE | mdc_revalidate_lock() 1104 policy.l_inodebits.bits = MDS_INODELOCK_LAYOUT; mdc_revalidate_lock() 1107 policy.l_inodebits.bits = MDS_INODELOCK_LOOKUP; mdc_revalidate_lock() 1112 LDLM_IBITS, &policy, mdc_revalidate_lock() 1272 ldlm_policy_data_t policy = { mdc_intent_getattr_async() local 1295 rc = ldlm_cli_enqueue(exp, &req, einfo, &res_id, &policy, &flags, NULL, mdc_intent_getattr_async() 154 mdc_lock_match(struct obd_export *exp, __u64 flags, const struct lu_fid *fid, ldlm_type_t type, ldlm_policy_data_t *policy, ldlm_mode_t mode, struct lustre_handle *lockh) mdc_lock_match() argument 170 mdc_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, ldlm_policy_data_t *policy, ldlm_mode_t mode, ldlm_cancel_flags_t flags, void *opaque) mdc_cancel_unused() argument
|
/linux-4.1.27/drivers/staging/fsl-mc/include/ |
H A D | dprc.h | 91 /* General Container allocation policy - Indicates that the new container is 94 * that this is a container's global policy, but the parent container may 164 * warning: Only the parent container is allowed to destroy a child policy 407 * dprc_set_res_quota() - Set allocation policy for a specific resource/object 415 * when quota is set to -1, the policy is the same as container's 416 * general policy. 418 * Allocation policy determines whether or not a container may allocate 419 * resources from its parent. Each container has a 'global' allocation policy 422 * This function sets allocation policy for a specific resource type. 423 * The default policy for all resource types matches the container's 'global' 424 * allocation policy. 428 * @warning Only the parent container is allowed to change a child policy. 437 * dprc_get_res_quota() - Gets the allocation policy of a specific 445 * when quota is set to -1, the policy is the same as container's 446 * general policy. 511 * According to the DPRC allocation policy, the assigned resources may be taken
|
/linux-4.1.27/drivers/net/wireless/ti/wl18xx/ |
H A D | tx.h | 40 * the rate policy.
|
/linux-4.1.27/include/net/netns/ |
H A D | xfrm.h | 37 * Main use is finding SA after policy selected tunnel or transport
|
/linux-4.1.27/net/ipv4/netfilter/ |
H A D | nft_masq_ipv4.c | 47 .policy = nft_masq_policy,
|
H A D | nft_redir_ipv4.c | 56 .policy = nft_redir_policy,
|
H A D | nft_reject_ipv4.c | 56 .policy = nft_reject_policy,
|
H A D | iptable_security.c | 5 * which need to be able to manage security policy in separate context
|
/linux-4.1.27/net/ipv6/netfilter/ |
H A D | nft_masq_ipv6.c | 47 .policy = nft_masq_policy,
|
H A D | nft_redir_ipv6.c | 56 .policy = nft_redir_policy,
|
H A D | nft_reject_ipv6.c | 57 .policy = nft_reject_policy,
|
H A D | ip6table_security.c | 5 * which need to be able to manage security policy in separate context
|
/linux-4.1.27/security/keys/encrypted-keys/ |
H A D | ecryptfs_format.c | 68 /* TODO: Make the hash parameterizable via policy */ ecryptfs_fill_auth_tok()
|
/linux-4.1.27/net/l2tp/ |
H A D | l2tp_netlink.c | 901 .policy = l2tp_nl_policy, 907 .policy = l2tp_nl_policy, 913 .policy = l2tp_nl_policy, 919 .policy = l2tp_nl_policy, 926 .policy = l2tp_nl_policy, 932 .policy = l2tp_nl_policy, 938 .policy = l2tp_nl_policy, 944 .policy = l2tp_nl_policy, 951 .policy = l2tp_nl_policy,
|
/linux-4.1.27/net/netlabel/ |
H A D | netlabel_mgmt.c | 69 /* NetLabel Netlink attribute policy */ 707 .policy = netlbl_mgmt_genl_policy, 714 .policy = netlbl_mgmt_genl_policy, 721 .policy = netlbl_mgmt_genl_policy, 728 .policy = netlbl_mgmt_genl_policy, 735 .policy = netlbl_mgmt_genl_policy, 742 .policy = netlbl_mgmt_genl_policy, 749 .policy = netlbl_mgmt_genl_policy, 756 .policy = netlbl_mgmt_genl_policy,
|
/linux-4.1.27/drivers/net/fddi/skfp/ |
H A D | pcmplc.c | 1436 int policy ; pc_tcode_actions() local 1440 policy = smc->mib.fddiSMTConnectionPolicy ; pc_tcode_actions() 1446 ((policy & POLICY_AA) && ne == TA) || pc_tcode_actions() 1447 ((policy & POLICY_AB) && ne == TB) || pc_tcode_actions() 1448 ((policy & POLICY_AS) && ne == TS) || pc_tcode_actions() 1449 ((policy & POLICY_AM) && ne == TM) ) pc_tcode_actions() 1454 ((policy & POLICY_BA) && ne == TA) || pc_tcode_actions() 1455 ((policy & POLICY_BB) && ne == TB) || pc_tcode_actions() 1456 ((policy & POLICY_BS) && ne == TS) || pc_tcode_actions() 1457 ((policy & POLICY_BM) && ne == TM) ) pc_tcode_actions() 1462 ((policy & POLICY_SA) && ne == TA) || pc_tcode_actions() 1463 ((policy & POLICY_SB) && ne == TB) || pc_tcode_actions() 1464 ((policy & POLICY_SS) && ne == TS) || pc_tcode_actions() 1465 ((policy & POLICY_SM) && ne == TM) ) pc_tcode_actions() 1470 ((policy & POLICY_MA) && ne == TA) || pc_tcode_actions() 1471 ((policy & POLICY_MB) && ne == TB) || pc_tcode_actions() 1472 ((policy & POLICY_MS) && ne == TS) || pc_tcode_actions() 1473 ((policy & POLICY_MM) && ne == TM) ) pc_tcode_actions()
|
/linux-4.1.27/arch/arm/mm/ |
H A D | mmu.c | 76 const char policy[16]; member in struct:cachepolicy 84 #define s2_policy(policy) policy 86 #define s2_policy(policy) 0 91 .policy = "uncached", 97 .policy = "buffered", 103 .policy = "writethrough", 109 .policy = "writeback", 115 .policy = "writealloc", 129 * the C code sets the page tables up with the same policy as the head 148 pr_err("ERROR: could not find cache policy\n"); init_default_cache_policy() 161 int len = strlen(cache_policies[i].policy); early_cachepolicy() 163 if (memcmp(p, cache_policies[i].policy, len) == 0) { early_cachepolicy() 170 pr_err("ERROR: unknown or unsupported cache policy\n"); early_cachepolicy() 181 cache_policies[cachepolicy].policy); early_cachepolicy() 412 pr_warn("Forcing write-allocate cache policy for SMP\n"); build_mem_type_table() 646 pr_info("Memory policy: %sData cache %s\n", build_mem_type_table() 647 ecc_mask ? "ECC enabled, " : "", cp->policy); build_mem_type_table()
|
/linux-4.1.27/kernel/sched/ |
H A D | core.c | 696 * FIFO realtime policy runs the highest priority task. Other runnable sched_can_stop_tick() 699 if (current->policy == SCHED_FIFO) sched_can_stop_tick() 706 if (current->policy == SCHED_RR) { sched_can_stop_tick() 795 if (p->policy == SCHED_IDLE) { set_load_weight() 1911 * Revert to default priority/policy on fork if requested. sched_fork() 1915 p->policy = SCHED_NORMAL; sched_fork() 2030 static int dl_overflow(struct task_struct *p, int policy, dl_overflow() argument 2037 u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0; dl_overflow() 2050 if (dl_policy(policy) && !task_has_dl_policy(p) && dl_overflow() 2054 } else if (dl_policy(policy) && task_has_dl_policy(p) && dl_overflow() 2059 } else if (!dl_policy(policy) && task_has_dl_policy(p)) { dl_overflow() 3243 * for the first time with its new policy. 3278 * sched_setparam() passes in -1 for its policy, to let the functions 3286 int policy = attr->sched_policy; __setscheduler_params() local 3288 if (policy == SETPARAM_POLICY) __setscheduler_params() 3289 policy = p->policy; __setscheduler_params() 3291 p->policy = policy; __setscheduler_params() 3293 if (dl_policy(policy)) __setscheduler_params() 3295 else if (fair_policy(policy)) __setscheduler_params() 3421 int new_effective_prio, policy = attr->sched_policy; __sched_setscheduler() local 3430 /* double check policy once rq lock held */ __sched_setscheduler() 3431 if (policy < 0) { __sched_setscheduler() 3433 policy = oldpolicy = p->policy; __sched_setscheduler() 3437 if (policy != SCHED_DEADLINE && __sched_setscheduler() 3438 policy != SCHED_FIFO && policy != SCHED_RR && __sched_setscheduler() 3439 policy != SCHED_NORMAL && policy != SCHED_BATCH && __sched_setscheduler() 3440 policy != SCHED_IDLE) __sched_setscheduler() 3455 if ((dl_policy(policy) && !__checkparam_dl(attr)) || __sched_setscheduler() 3456 (rt_policy(policy) != (attr->sched_priority != 0))) __sched_setscheduler() 3463 if (fair_policy(policy)) { __sched_setscheduler() 3469 if (rt_policy(policy)) { __sched_setscheduler() 3473 /* can't set/change the rt policy */ __sched_setscheduler() 3474 if (policy != p->policy && !rlim_rtprio) __sched_setscheduler() 3484 * Can't set/change SCHED_DEADLINE policy at all for now __sched_setscheduler() 3489 if (dl_policy(policy)) __sched_setscheduler() 3496 if (p->policy == SCHED_IDLE && policy != SCHED_IDLE) { __sched_setscheduler() 3520 * To be able to change p->policy safely, the appropriate __sched_setscheduler() 3526 * Changing the policy of the stop threads its a very bad idea __sched_setscheduler() 3537 if (unlikely(policy == p->policy)) { __sched_setscheduler() 3538 if (fair_policy(policy) && attr->sched_nice != task_nice(p)) __sched_setscheduler() 3540 if (rt_policy(policy) && attr->sched_priority != p->rt_priority) __sched_setscheduler() 3542 if (dl_policy(policy) && dl_param_changed(p, attr)) __sched_setscheduler() 3557 if (rt_bandwidth_enabled() && rt_policy(policy) && __sched_setscheduler() 3565 if (dl_bandwidth_enabled() && dl_policy(policy)) { __sched_setscheduler() 3582 /* recheck policy now with rq lock held */ __sched_setscheduler() 3583 if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) { __sched_setscheduler() 3584 policy = oldpolicy = -1; __sched_setscheduler() 3594 if ((dl_policy(policy) || dl_task(p)) && dl_overflow(p, policy, attr)) { __sched_setscheduler() 3644 static int _sched_setscheduler(struct task_struct *p, int policy, _sched_setscheduler() argument 3648 .sched_policy = policy, _sched_setscheduler() 3654 if ((policy != SETPARAM_POLICY) && (policy & SCHED_RESET_ON_FORK)) { _sched_setscheduler() 3656 policy &= ~SCHED_RESET_ON_FORK; _sched_setscheduler() 3657 attr.sched_policy = policy; _sched_setscheduler() 3663 * sched_setscheduler - change the scheduling policy and/or RT priority of a thread. 3665 * @policy: new policy. 3672 int sched_setscheduler(struct task_struct *p, int policy, sched_setscheduler() argument 3675 return _sched_setscheduler(p, policy, param, true); sched_setscheduler() 3686 * sched_setscheduler_nocheck - change the scheduling policy and/or RT priority of a thread from kernelspace. 3688 * @policy: new policy. 3698 int sched_setscheduler_nocheck(struct task_struct *p, int policy, sched_setscheduler_nocheck() argument 3701 return _sched_setscheduler(p, policy, param, false); sched_setscheduler_nocheck() 3705 do_sched_setscheduler(pid_t pid, int policy, struct sched_param __user *param) do_sched_setscheduler() argument 3720 retval = sched_setscheduler(p, policy, &lparam); do_sched_setscheduler() 3798 * sys_sched_setscheduler - set/change the scheduler policy and RT priority 3800 * @policy: new policy. 3805 SYSCALL_DEFINE3(sched_setscheduler, pid_t, pid, int, policy, SYSCALL_DEFINE3() 3808 /* negative values for policy are not valid */ SYSCALL_DEFINE3() 3809 if (policy < 0) SYSCALL_DEFINE3() 3812 return do_sched_setscheduler(pid, policy, param); SYSCALL_DEFINE3() 3861 * sys_sched_getscheduler - get the policy (scheduling class) of a thread 3864 * Return: On success, the policy of the thread. Otherwise, a negative error 3881 retval = p->policy SYSCALL_DEFINE1() 3997 attr.sched_policy = p->policy; SYSCALL_DEFINE4() 4415 * @policy: scheduling class. 4421 SYSCALL_DEFINE1(sched_get_priority_max, int, policy) SYSCALL_DEFINE1() 4425 switch (policy) { SYSCALL_DEFINE1() 4442 * @policy: scheduling class. 4448 SYSCALL_DEFINE1(sched_get_priority_min, int, policy) SYSCALL_DEFINE1() 4452 switch (policy) { SYSCALL_DEFINE1()
|
H A D | auto_group.c | 87 * so we don't have to move tasks around upon policy change, autogroup_create() 90 * the policy change to proceed. autogroup_create()
|
/linux-4.1.27/include/scsi/fc/ |
H A D | fc_fc2.h | 112 #define ESB_ST_ERRP_BIT 24 /* LSB for error policy */ 113 #define ESB_ST_ERRP_MASK (3 << 24) /* mask for error policy */
|
/linux-4.1.27/arch/arm/mach-tegra/ |
H A D | sleep.S | 152 /* in LP2 idle (SDRAM active), set the CPU burst policy to PLLP */ 154 mov r0, #(2 << 28) @ burst policy = run mode
|
/linux-4.1.27/fs/f2fs/ |
H A D | segment.h | 484 * Sometimes f2fs may be better to drop out-of-place update policy. 485 * And, users can control the policy through sysfs entries. 511 unsigned int policy = SM_I(sbi)->ipu_policy; need_inplace_update() local 517 if (policy & (0x1 << F2FS_IPU_FORCE)) need_inplace_update() 519 if (policy & (0x1 << F2FS_IPU_SSR) && need_SSR(sbi)) need_inplace_update() 521 if (policy & (0x1 << F2FS_IPU_UTIL) && need_inplace_update() 524 if (policy & (0x1 << F2FS_IPU_SSR_UTIL) && need_SSR(sbi) && need_inplace_update() 529 if (policy & (0x1 << F2FS_IPU_FSYNC) && need_inplace_update()
|
/linux-4.1.27/net/hsr/ |
H A D | hsr_netlink.c | 108 .policy = hsr_policy, 117 /* attribute policy */ 452 .policy = hsr_genl_policy, 459 .policy = hsr_genl_policy,
|
/linux-4.1.27/net/mac80211/ |
H A D | agg-rx.c | 186 u8 dialog_token, u16 status, u16 policy, ieee80211_send_addba_resp() 220 capab = (u16)(policy << 1); /* bit 1 aggregation policy */ ieee80211_send_addba_resp() 257 * check if configuration can support the BA policy __ieee80211_start_rx_ba_session() 265 "AddBA Req with bad params from %pM on tid %u. policy %d, buffer size %d\n", __ieee80211_start_rx_ba_session() 185 ieee80211_send_addba_resp(struct ieee80211_sub_if_data *sdata, u8 *da, u16 tid, u8 dialog_token, u16 status, u16 policy, u16 buf_size, u16 timeout) ieee80211_send_addba_resp() argument
|
/linux-4.1.27/net/wimax/ |
H A D | stack.c | 74 * Authoritative source for the RE_STATE_CHANGE attribute policy 423 .policy = wimax_gnl_policy, 429 .policy = wimax_gnl_policy, 435 .policy = wimax_gnl_policy, 441 .policy = wimax_gnl_policy,
|