plr               642 arch/x86/kernel/cpu/resctrl/core.c 		if (d->plr)
plr               643 arch/x86/kernel/cpu/resctrl/core.c 			d->plr->d = NULL;
plr               293 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 				rdtgrp->plr->r = r;
plr               294 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 				rdtgrp->plr->d = d;
plr               295 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 				rdtgrp->plr->cbm = d->new_ctrl;
plr               296 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 				d->plr = rdtgrp->plr;
plr               474 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 			if (!rdtgrp->plr->d) {
plr               480 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 					   rdtgrp->plr->r->name,
plr               481 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 					   rdtgrp->plr->d->id,
plr               482 arch/x86/kernel/cpu/resctrl/ctrlmondata.c 					   rdtgrp->plr->cbm);
plr               211 arch/x86/kernel/cpu/resctrl/internal.h 	struct pseudo_lock_region	*plr;
plr               330 arch/x86/kernel/cpu/resctrl/internal.h 	struct pseudo_lock_region	*plr;
plr               156 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		if (rdtgrp->plr && rdtgrp->plr->minor == minor) {
plr               174 arch/x86/kernel/cpu/resctrl/pseudo_lock.c static void pseudo_lock_cstates_relax(struct pseudo_lock_region *plr)
plr               178 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	list_for_each_entry_safe(pm_req, next, &plr->pm_reqs, list) {
plr               200 arch/x86/kernel/cpu/resctrl/pseudo_lock.c static int pseudo_lock_cstates_constrain(struct pseudo_lock_region *plr)
plr               206 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	for_each_cpu(cpu, &plr->d->cpu_mask) {
plr               224 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		list_add(&pm_req->list, &plr->pm_reqs);
plr               230 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	pseudo_lock_cstates_relax(plr);
plr               243 arch/x86/kernel/cpu/resctrl/pseudo_lock.c static void pseudo_lock_region_clear(struct pseudo_lock_region *plr)
plr               245 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->size = 0;
plr               246 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->line_size = 0;
plr               247 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	kfree(plr->kmem);
plr               248 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->kmem = NULL;
plr               249 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->r = NULL;
plr               250 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (plr->d)
plr               251 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		plr->d->plr = NULL;
plr               252 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->d = NULL;
plr               253 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->cbm = 0;
plr               254 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->debugfs_dir = NULL;
plr               275 arch/x86/kernel/cpu/resctrl/pseudo_lock.c static int pseudo_lock_region_init(struct pseudo_lock_region *plr)
plr               282 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->cpu = cpumask_first(&plr->d->cpu_mask);
plr               284 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (!cpu_online(plr->cpu)) {
plr               286 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 				    plr->cpu);
plr               291 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	ci = get_cpu_cacheinfo(plr->cpu);
plr               293 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->size = rdtgroup_cbm_to_size(plr->r, plr->d, plr->cbm);
plr               296 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		if (ci->info_list[i].level == plr->r->cache_level) {
plr               297 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 			plr->line_size = ci->info_list[i].coherency_line_size;
plr               305 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	pseudo_lock_region_clear(plr);
plr               322 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	struct pseudo_lock_region *plr;
plr               324 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr = kzalloc(sizeof(*plr), GFP_KERNEL);
plr               325 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (!plr)
plr               328 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	init_waitqueue_head(&plr->lock_thread_wq);
plr               329 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	INIT_LIST_HEAD(&plr->pm_reqs);
plr               330 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	rdtgrp->plr = plr;
plr               344 arch/x86/kernel/cpu/resctrl/pseudo_lock.c static int pseudo_lock_region_alloc(struct pseudo_lock_region *plr)
plr               348 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	ret = pseudo_lock_region_init(plr);
plr               356 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (plr->size > KMALLOC_MAX_SIZE) {
plr               362 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->kmem = kzalloc(plr->size, GFP_KERNEL);
plr               363 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (!plr->kmem) {
plr               372 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	pseudo_lock_region_clear(plr);
plr               389 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	pseudo_lock_region_clear(rdtgrp->plr);
plr               390 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	kfree(rdtgrp->plr);
plr               391 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	rdtgrp->plr = NULL;
plr               416 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	struct pseudo_lock_region *plr = rdtgrp->plr;
plr               465 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	mem_r = plr->kmem;
plr               466 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	size = plr->size;
plr               467 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	line_size = plr->line_size;
plr               516 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->thread_done = 1;
plr               517 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	wake_up_interruptible(&plr->lock_thread_wq);
plr               798 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (d->plr) {
plr               799 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		cbm_len = d->plr->r->cache.cbm_len;
plr               800 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		cbm_b = d->plr->cbm;
plr               836 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 			if (d_i->plr)
plr               869 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	struct pseudo_lock_region *plr = _plr;
plr               879 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	mem_r = READ_ONCE(plr->kmem);
plr               885 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	for (i = 0; i < plr->size; i += 32) {
plr               896 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->thread_done = 1;
plr               897 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	wake_up_interruptible(&plr->lock_thread_wq);
plr               933 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 				struct pseudo_lock_region *plr,
plr               945 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	miss_event = perf_event_create_kernel_counter(miss_attr, plr->cpu,
plr               950 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	hit_event = perf_event_create_kernel_counter(hit_attr, plr->cpu,
plr               981 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	line_size = READ_ONCE(plr->line_size);
plr               982 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	mem_r = READ_ONCE(plr->kmem);
plr               983 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	size = READ_ONCE(plr->size);
plr              1049 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	struct pseudo_lock_region *plr = _plr;
plr              1072 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
plr              1080 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->thread_done = 1;
plr              1081 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	wake_up_interruptible(&plr->lock_thread_wq);
plr              1087 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	struct pseudo_lock_region *plr = _plr;
plr              1111 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	measure_residency_fn(&perf_miss_attr, &perf_hit_attr, plr, &counts);
plr              1136 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->thread_done = 1;
plr              1137 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	wake_up_interruptible(&plr->lock_thread_wq);
plr              1153 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	struct pseudo_lock_region *plr = rdtgrp->plr;
plr              1166 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (!plr->d) {
plr              1171 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->thread_done = 0;
plr              1172 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	cpu = cpumask_first(&plr->d->cpu_mask);
plr              1178 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->cpu = cpu;
plr              1181 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		thread = kthread_create_on_node(measure_cycles_lat_fn, plr,
plr              1186 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		thread = kthread_create_on_node(measure_l2_residency, plr,
plr              1191 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		thread = kthread_create_on_node(measure_l3_residency, plr,
plr              1205 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	ret = wait_event_interruptible(plr->lock_thread_wq,
plr              1206 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 				       plr->thread_done == 1);
plr              1273 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	struct pseudo_lock_region *plr = rdtgrp->plr;
plr              1279 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	ret = pseudo_lock_region_alloc(plr);
plr              1283 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	ret = pseudo_lock_cstates_constrain(plr);
plr              1289 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->thread_done = 0;
plr              1292 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 					cpu_to_node(plr->cpu),
plr              1293 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 					"pseudo_lock/%u", plr->cpu);
plr              1300 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	kthread_bind(thread, plr->cpu);
plr              1303 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	ret = wait_event_interruptible(plr->lock_thread_wq,
plr              1304 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 				       plr->thread_done == 1);
plr              1336 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		plr->debugfs_dir = debugfs_create_dir(rdtgrp->kn->name,
plr              1338 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 		if (!IS_ERR_OR_NULL(plr->debugfs_dir))
plr              1340 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 					    plr->debugfs_dir, rdtgrp,
plr              1363 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr->minor = new_minor;
plr              1376 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	debugfs_remove_recursive(plr->debugfs_dir);
plr              1379 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	pseudo_lock_cstates_relax(plr);
plr              1381 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	pseudo_lock_region_clear(plr);
plr              1402 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	struct pseudo_lock_region *plr = rdtgrp->plr;
plr              1413 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	pseudo_lock_cstates_relax(plr);
plr              1414 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	debugfs_remove_recursive(rdtgrp->plr->debugfs_dir);
plr              1415 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	device_destroy(pseudo_lock_class, MKDEV(pseudo_lock_major, plr->minor));
plr              1416 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	pseudo_lock_minor_release(plr->minor);
plr              1475 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	struct pseudo_lock_region *plr;
plr              1489 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	plr = rdtgrp->plr;
plr              1491 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (!plr->d) {
plr              1502 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (!cpumask_subset(current->cpus_ptr, &plr->d->cpu_mask)) {
plr              1507 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	physical = __pa(plr->kmem) >> PAGE_SHIFT;
plr              1508 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	psize = plr->size - off;
plr              1510 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	if (off > plr->size) {
plr              1529 arch/x86/kernel/cpu/resctrl/pseudo_lock.c 	memset(plr->kmem + off, 0, vsize);
plr               272 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			if (!rdtgrp->plr->d) {
plr               277 arch/x86/kernel/cpu/resctrl/rdtgroup.c 				mask = &rdtgrp->plr->d->cpu_mask;
plr               846 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			pseudo_locked = dom->plr ? dom->plr->cbm : 0;
plr              1302 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		if (!rdtgrp->plr->d) {
plr              1308 arch/x86/kernel/cpu/resctrl/rdtgroup.c 				   rdtgrp->plr->r->name);
plr              1309 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
plr              1310 arch/x86/kernel/cpu/resctrl/rdtgroup.c 						    rdtgrp->plr->d,
plr              1311 arch/x86/kernel/cpu/resctrl/rdtgroup.c 						    rdtgrp->plr->cbm);
plr              1312 arch/x86/kernel/cpu/resctrl/rdtgroup.c 			seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
plr              2572 arch/x86/kernel/cpu/resctrl/rdtgroup.c 	if (d->plr && d->plr->cbm > 0)
plr              2573 arch/x86/kernel/cpu/resctrl/rdtgroup.c 		used_b |= d->plr->cbm;
plr               189 drivers/gpio/gpio-adnp.c 		u8 ddr, plr, ier, isr;
plr               197 drivers/gpio/gpio-adnp.c 		err = adnp_read(adnp, GPIO_PLR(adnp) + i, &plr);
plr               221 drivers/gpio/gpio-adnp.c 			if (plr & BIT(j))