box 114 arch/x86/events/intel/uncore.c u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event) box 123 arch/x86/events/intel/uncore.c void uncore_mmio_exit_box(struct intel_uncore_box *box) box 125 arch/x86/events/intel/uncore.c if (box->io_addr) box 126 arch/x86/events/intel/uncore.c iounmap(box->io_addr); box 129 arch/x86/events/intel/uncore.c u64 uncore_mmio_read_counter(struct intel_uncore_box *box, box 132 arch/x86/events/intel/uncore.c if (!box->io_addr) box 135 arch/x86/events/intel/uncore.c return readq(box->io_addr + event->hw.event_base); box 142 arch/x86/events/intel/uncore.c uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event) box 156 arch/x86/events/intel/uncore.c (!uncore_box_is_fake(box) && reg1->alloc)) box 159 arch/x86/events/intel/uncore.c er = &box->shared_regs[reg1->idx]; box 171 arch/x86/events/intel/uncore.c if (!uncore_box_is_fake(box)) box 179 arch/x86/events/intel/uncore.c void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event) box 192 arch/x86/events/intel/uncore.c if (uncore_box_is_fake(box) || !reg1->alloc) box 195 arch/x86/events/intel/uncore.c er = &box->shared_regs[reg1->idx]; box 200 arch/x86/events/intel/uncore.c u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx) box 206 arch/x86/events/intel/uncore.c er = &box->shared_regs[idx]; box 215 arch/x86/events/intel/uncore.c static void uncore_assign_hw_event(struct intel_uncore_box *box, box 221 arch/x86/events/intel/uncore.c hwc->last_tag = ++box->tags[idx]; box 224 arch/x86/events/intel/uncore.c hwc->event_base = uncore_fixed_ctr(box); box 225 arch/x86/events/intel/uncore.c hwc->config_base = uncore_fixed_ctl(box); box 229 arch/x86/events/intel/uncore.c hwc->config_base = uncore_event_ctl(box, hwc->idx); box 230 arch/x86/events/intel/uncore.c hwc->event_base = uncore_perf_ctr(box, hwc->idx); box 233 arch/x86/events/intel/uncore.c void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event) box 239 arch/x86/events/intel/uncore.c shift = 64 - uncore_freerunning_bits(box, event); box 241 arch/x86/events/intel/uncore.c shift = 64 - uncore_fixed_ctr_bits(box); box 243 arch/x86/events/intel/uncore.c shift = 64 - uncore_perf_ctr_bits(box); box 248 arch/x86/events/intel/uncore.c new_count = uncore_read_counter(box, event); box 265 arch/x86/events/intel/uncore.c struct intel_uncore_box *box; box 270 arch/x86/events/intel/uncore.c box = container_of(hrtimer, struct intel_uncore_box, hrtimer); box 271 arch/x86/events/intel/uncore.c if (!box->n_active || box->cpu != smp_processor_id()) box 283 arch/x86/events/intel/uncore.c list_for_each_entry(event, &box->active_list, active_entry) { box 284 arch/x86/events/intel/uncore.c uncore_perf_event_update(box, event); box 287 arch/x86/events/intel/uncore.c for_each_set_bit(bit, box->active_mask, UNCORE_PMC_IDX_MAX) box 288 arch/x86/events/intel/uncore.c uncore_perf_event_update(box, box->events[bit]); box 292 arch/x86/events/intel/uncore.c hrtimer_forward_now(hrtimer, ns_to_ktime(box->hrtimer_duration)); box 296 arch/x86/events/intel/uncore.c void uncore_pmu_start_hrtimer(struct intel_uncore_box *box) box 298 arch/x86/events/intel/uncore.c hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration), box 302 arch/x86/events/intel/uncore.c void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box) box 304 arch/x86/events/intel/uncore.c hrtimer_cancel(&box->hrtimer); box 307 arch/x86/events/intel/uncore.c static void uncore_pmu_init_hrtimer(struct intel_uncore_box *box) box 309 arch/x86/events/intel/uncore.c hrtimer_init(&box->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); box 310 arch/x86/events/intel/uncore.c box->hrtimer.function = uncore_pmu_hrtimer; box 317 arch/x86/events/intel/uncore.c struct intel_uncore_box *box; box 319 arch/x86/events/intel/uncore.c size = sizeof(*box) + numshared * sizeof(struct intel_uncore_extra_reg); box 321 arch/x86/events/intel/uncore.c box = kzalloc_node(size, GFP_KERNEL, node); box 322 arch/x86/events/intel/uncore.c if (!box) box 326 arch/x86/events/intel/uncore.c raw_spin_lock_init(&box->shared_regs[i].lock); box 328 arch/x86/events/intel/uncore.c uncore_pmu_init_hrtimer(box); box 329 arch/x86/events/intel/uncore.c box->cpu = -1; box 330 arch/x86/events/intel/uncore.c box->pci_phys_id = -1; box 331 arch/x86/events/intel/uncore.c box->dieid = -1; box 334 arch/x86/events/intel/uncore.c box->hrtimer_duration = UNCORE_PMU_HRTIMER_INTERVAL; box 336 arch/x86/events/intel/uncore.c INIT_LIST_HEAD(&box->active_list); box 338 arch/x86/events/intel/uncore.c return box; box 347 arch/x86/events/intel/uncore.c static bool is_box_event(struct intel_uncore_box *box, struct perf_event *event) box 349 arch/x86/events/intel/uncore.c return &box->pmu->pmu == event->pmu; box 353 arch/x86/events/intel/uncore.c uncore_collect_events(struct intel_uncore_box *box, struct perf_event *leader, box 359 arch/x86/events/intel/uncore.c max_count = box->pmu->type->num_counters; box 360 arch/x86/events/intel/uncore.c if (box->pmu->type->fixed_ctl) box 363 arch/x86/events/intel/uncore.c if (box->n_events >= max_count) box 366 arch/x86/events/intel/uncore.c n = box->n_events; box 368 arch/x86/events/intel/uncore.c if (is_box_event(box, leader)) { box 369 arch/x86/events/intel/uncore.c box->event_list[n] = leader; box 377 arch/x86/events/intel/uncore.c if (!is_box_event(box, event) || box 384 arch/x86/events/intel/uncore.c box->event_list[n] = event; box 391 arch/x86/events/intel/uncore.c uncore_get_event_constraint(struct intel_uncore_box *box, struct perf_event *event) box 393 arch/x86/events/intel/uncore.c struct intel_uncore_type *type = box->pmu->type; box 397 arch/x86/events/intel/uncore.c c = type->ops->get_constraint(box, event); box 415 arch/x86/events/intel/uncore.c static void uncore_put_event_constraint(struct intel_uncore_box *box, box 418 arch/x86/events/intel/uncore.c if (box->pmu->type->ops->put_constraint) box 419 arch/x86/events/intel/uncore.c box->pmu->type->ops->put_constraint(box, event); box 422 arch/x86/events/intel/uncore.c static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int n) box 432 arch/x86/events/intel/uncore.c c = uncore_get_event_constraint(box, box->event_list[i]); box 433 arch/x86/events/intel/uncore.c box->event_constraint[i] = c; box 440 arch/x86/events/intel/uncore.c hwc = &box->event_list[i]->hw; box 441 arch/x86/events/intel/uncore.c c = box->event_constraint[i]; box 461 arch/x86/events/intel/uncore.c ret = perf_assign_events(box->event_constraint, n, box 466 arch/x86/events/intel/uncore.c uncore_put_event_constraint(box, box->event_list[i]); box 473 arch/x86/events/intel/uncore.c struct intel_uncore_box *box = uncore_event_to_box(event); box 486 arch/x86/events/intel/uncore.c list_add_tail(&event->active_entry, &box->active_list); box 488 arch/x86/events/intel/uncore.c uncore_read_counter(box, event)); box 489 arch/x86/events/intel/uncore.c if (box->n_active++ == 0) box 490 arch/x86/events/intel/uncore.c uncore_pmu_start_hrtimer(box); box 498 arch/x86/events/intel/uncore.c box->events[idx] = event; box 499 arch/x86/events/intel/uncore.c box->n_active++; box 500 arch/x86/events/intel/uncore.c __set_bit(idx, box->active_mask); box 502 arch/x86/events/intel/uncore.c local64_set(&event->hw.prev_count, uncore_read_counter(box, event)); box 503 arch/x86/events/intel/uncore.c uncore_enable_event(box, event); box 505 arch/x86/events/intel/uncore.c if (box->n_active == 1) box 506 arch/x86/events/intel/uncore.c uncore_pmu_start_hrtimer(box); box 511 arch/x86/events/intel/uncore.c struct intel_uncore_box *box = uncore_event_to_box(event); box 517 arch/x86/events/intel/uncore.c if (--box->n_active == 0) box 518 arch/x86/events/intel/uncore.c uncore_pmu_cancel_hrtimer(box); box 519 arch/x86/events/intel/uncore.c uncore_perf_event_update(box, event); box 523 arch/x86/events/intel/uncore.c if (__test_and_clear_bit(hwc->idx, box->active_mask)) { box 524 arch/x86/events/intel/uncore.c uncore_disable_event(box, event); box 525 arch/x86/events/intel/uncore.c box->n_active--; box 526 arch/x86/events/intel/uncore.c box->events[hwc->idx] = NULL; box 530 arch/x86/events/intel/uncore.c if (box->n_active == 0) box 531 arch/x86/events/intel/uncore.c uncore_pmu_cancel_hrtimer(box); box 539 arch/x86/events/intel/uncore.c uncore_perf_event_update(box, event); box 546 arch/x86/events/intel/uncore.c struct intel_uncore_box *box = uncore_event_to_box(event); box 551 arch/x86/events/intel/uncore.c if (!box) box 565 arch/x86/events/intel/uncore.c ret = n = uncore_collect_events(box, event, false); box 573 arch/x86/events/intel/uncore.c ret = uncore_assign_events(box, assign, n); box 578 arch/x86/events/intel/uncore.c for (i = 0; i < box->n_events; i++) { box 579 arch/x86/events/intel/uncore.c event = box->event_list[i]; box 583 arch/x86/events/intel/uncore.c hwc->last_tag == box->tags[assign[i]]) box 597 arch/x86/events/intel/uncore.c event = box->event_list[i]; box 601 arch/x86/events/intel/uncore.c hwc->last_tag != box->tags[assign[i]]) box 602 arch/x86/events/intel/uncore.c uncore_assign_hw_event(box, event, assign[i]); box 603 arch/x86/events/intel/uncore.c else if (i < box->n_events) box 611 arch/x86/events/intel/uncore.c box->n_events = n; box 618 arch/x86/events/intel/uncore.c struct intel_uncore_box *box = uncore_event_to_box(event); box 631 arch/x86/events/intel/uncore.c for (i = 0; i < box->n_events; i++) { box 632 arch/x86/events/intel/uncore.c if (event == box->event_list[i]) { box 633 arch/x86/events/intel/uncore.c uncore_put_event_constraint(box, event); box 635 arch/x86/events/intel/uncore.c for (++i; i < box->n_events; i++) box 636 arch/x86/events/intel/uncore.c box->event_list[i - 1] = box->event_list[i]; box 638 arch/x86/events/intel/uncore.c --box->n_events; box 649 arch/x86/events/intel/uncore.c struct intel_uncore_box *box = uncore_event_to_box(event); box 650 arch/x86/events/intel/uncore.c uncore_perf_event_update(box, event); box 699 arch/x86/events/intel/uncore.c struct intel_uncore_box *box; box 721 arch/x86/events/intel/uncore.c box = uncore_pmu_to_box(pmu, event->cpu); box 722 arch/x86/events/intel/uncore.c if (!box || box->cpu < 0) box 724 arch/x86/events/intel/uncore.c event->cpu = box->cpu; box 725 arch/x86/events/intel/uncore.c event->pmu_private = box; box 749 arch/x86/events/intel/uncore.c if (!check_valid_freerunning_event(box, event)) box 758 arch/x86/events/intel/uncore.c event->hw.event_base = uncore_freerunning_counter(box, event); box 763 arch/x86/events/intel/uncore.c ret = pmu->type->ops->hw_config(box, event); box 780 arch/x86/events/intel/uncore.c struct intel_uncore_box *box; box 786 arch/x86/events/intel/uncore.c box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); box 787 arch/x86/events/intel/uncore.c if (!box) box 791 arch/x86/events/intel/uncore.c uncore_pmu->type->ops->enable_box(box); box 797 arch/x86/events/intel/uncore.c struct intel_uncore_box *box; box 803 arch/x86/events/intel/uncore.c box = uncore_pmu_to_box(uncore_pmu, smp_processor_id()); box 804 arch/x86/events/intel/uncore.c if (!box) box 808 arch/x86/events/intel/uncore.c uncore_pmu->type->ops->disable_box(box); box 987 arch/x86/events/intel/uncore.c struct intel_uncore_box *box; box 1044 arch/x86/events/intel/uncore.c box = uncore_alloc_box(type, NUMA_NO_NODE); box 1045 arch/x86/events/intel/uncore.c if (!box) box 1053 arch/x86/events/intel/uncore.c atomic_inc(&box->refcnt); box 1054 arch/x86/events/intel/uncore.c box->pci_phys_id = phys_id; box 1055 arch/x86/events/intel/uncore.c box->dieid = die; box 1056 arch/x86/events/intel/uncore.c box->pci_dev = pdev; box 1057 arch/x86/events/intel/uncore.c box->pmu = pmu; box 1058 arch/x86/events/intel/uncore.c uncore_box_init(box); box 1059 arch/x86/events/intel/uncore.c pci_set_drvdata(pdev, box); box 1061 arch/x86/events/intel/uncore.c pmu->boxes[die] = box; box 1070 arch/x86/events/intel/uncore.c uncore_box_exit(box); box 1071 arch/x86/events/intel/uncore.c kfree(box); box 1078 arch/x86/events/intel/uncore.c struct intel_uncore_box *box; box 1084 arch/x86/events/intel/uncore.c box = pci_get_drvdata(pdev); box 1085 arch/x86/events/intel/uncore.c if (!box) { box 1098 arch/x86/events/intel/uncore.c pmu = box->pmu; box 1099 arch/x86/events/intel/uncore.c if (WARN_ON_ONCE(phys_id != box->pci_phys_id)) box 1103 arch/x86/events/intel/uncore.c pmu->boxes[box->dieid] = NULL; box 1106 arch/x86/events/intel/uncore.c uncore_box_exit(box); box 1107 arch/x86/events/intel/uncore.c kfree(box); box 1161 arch/x86/events/intel/uncore.c struct intel_uncore_box *box; box 1166 arch/x86/events/intel/uncore.c box = pmu->boxes[die]; box 1167 arch/x86/events/intel/uncore.c if (!box) box 1171 arch/x86/events/intel/uncore.c WARN_ON_ONCE(box->cpu != -1); box 1172 arch/x86/events/intel/uncore.c box->cpu = new_cpu; box 1176 arch/x86/events/intel/uncore.c WARN_ON_ONCE(box->cpu != old_cpu); box 1177 arch/x86/events/intel/uncore.c box->cpu = -1; box 1181 arch/x86/events/intel/uncore.c uncore_pmu_cancel_hrtimer(box); box 1183 arch/x86/events/intel/uncore.c box->cpu = new_cpu; box 1198 arch/x86/events/intel/uncore.c struct intel_uncore_box *box; box 1205 arch/x86/events/intel/uncore.c box = pmu->boxes[id]; box 1206 arch/x86/events/intel/uncore.c if (box && atomic_dec_return(&box->refcnt) == 0) box 1207 arch/x86/events/intel/uncore.c uncore_box_exit(box); box 1243 arch/x86/events/intel/uncore.c struct intel_uncore_box *box, *tmp; box 1256 arch/x86/events/intel/uncore.c box = uncore_alloc_box(type, cpu_to_node(cpu)); box 1257 arch/x86/events/intel/uncore.c if (!box) box 1259 arch/x86/events/intel/uncore.c box->pmu = pmu; box 1260 arch/x86/events/intel/uncore.c box->dieid = die; box 1261 arch/x86/events/intel/uncore.c list_add(&box->active_list, &allocated); box 1265 arch/x86/events/intel/uncore.c list_for_each_entry_safe(box, tmp, &allocated, active_list) { box 1266 arch/x86/events/intel/uncore.c list_del_init(&box->active_list); box 1267 arch/x86/events/intel/uncore.c box->pmu->boxes[die] = box; box 1272 arch/x86/events/intel/uncore.c list_for_each_entry_safe(box, tmp, &allocated, active_list) { box 1273 arch/x86/events/intel/uncore.c list_del_init(&box->active_list); box 1274 arch/x86/events/intel/uncore.c kfree(box); box 1284 arch/x86/events/intel/uncore.c struct intel_uncore_box *box; box 1295 arch/x86/events/intel/uncore.c box = pmu->boxes[id]; box 1296 arch/x86/events/intel/uncore.c if (box && atomic_inc_return(&box->refcnt) == 1) box 1297 arch/x86/events/intel/uncore.c uncore_box_init(box); box 199 arch/x86/events/intel/uncore.h unsigned int uncore_mmio_box_ctl(struct intel_uncore_box *box) box 201 arch/x86/events/intel/uncore.h return box->pmu->type->box_ctl + box 202 arch/x86/events/intel/uncore.h box->pmu->type->mmio_offset * box->pmu->pmu_idx; box 205 arch/x86/events/intel/uncore.h static inline unsigned uncore_pci_box_ctl(struct intel_uncore_box *box) box 207 arch/x86/events/intel/uncore.h return box->pmu->type->box_ctl; box 210 arch/x86/events/intel/uncore.h static inline unsigned uncore_pci_fixed_ctl(struct intel_uncore_box *box) box 212 arch/x86/events/intel/uncore.h return box->pmu->type->fixed_ctl; box 215 arch/x86/events/intel/uncore.h static inline unsigned uncore_pci_fixed_ctr(struct intel_uncore_box *box) box 217 arch/x86/events/intel/uncore.h return box->pmu->type->fixed_ctr; box 221 arch/x86/events/intel/uncore.h unsigned uncore_pci_event_ctl(struct intel_uncore_box *box, int idx) box 223 arch/x86/events/intel/uncore.h if (test_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags)) box 224 arch/x86/events/intel/uncore.h return idx * 8 + box->pmu->type->event_ctl; box 226 arch/x86/events/intel/uncore.h return idx * 4 + box->pmu->type->event_ctl; box 230 arch/x86/events/intel/uncore.h unsigned uncore_pci_perf_ctr(struct intel_uncore_box *box, int idx) box 232 arch/x86/events/intel/uncore.h return idx * 8 + box->pmu->type->perf_ctr; box 235 arch/x86/events/intel/uncore.h static inline unsigned uncore_msr_box_offset(struct intel_uncore_box *box) box 237 arch/x86/events/intel/uncore.h struct intel_uncore_pmu *pmu = box->pmu; box 243 arch/x86/events/intel/uncore.h static inline unsigned uncore_msr_box_ctl(struct intel_uncore_box *box) box 245 arch/x86/events/intel/uncore.h if (!box->pmu->type->box_ctl) box 247 arch/x86/events/intel/uncore.h return box->pmu->type->box_ctl + uncore_msr_box_offset(box); box 250 arch/x86/events/intel/uncore.h static inline unsigned uncore_msr_fixed_ctl(struct intel_uncore_box *box) box 252 arch/x86/events/intel/uncore.h if (!box->pmu->type->fixed_ctl) box 254 arch/x86/events/intel/uncore.h return box->pmu->type->fixed_ctl + uncore_msr_box_offset(box); box 257 arch/x86/events/intel/uncore.h static inline unsigned uncore_msr_fixed_ctr(struct intel_uncore_box *box) box 259 arch/x86/events/intel/uncore.h return box->pmu->type->fixed_ctr + uncore_msr_box_offset(box); box 304 arch/x86/events/intel/uncore.h unsigned int uncore_freerunning_counter(struct intel_uncore_box *box, box 309 arch/x86/events/intel/uncore.h struct intel_uncore_pmu *pmu = box->pmu; box 317 arch/x86/events/intel/uncore.h unsigned uncore_msr_event_ctl(struct intel_uncore_box *box, int idx) box 319 arch/x86/events/intel/uncore.h if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { box 321 arch/x86/events/intel/uncore.h (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); box 323 arch/x86/events/intel/uncore.h return box->pmu->type->event_ctl + box 324 arch/x86/events/intel/uncore.h (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + box 325 arch/x86/events/intel/uncore.h uncore_msr_box_offset(box); box 330 arch/x86/events/intel/uncore.h unsigned uncore_msr_perf_ctr(struct intel_uncore_box *box, int idx) box 332 arch/x86/events/intel/uncore.h if (test_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags)) { box 334 arch/x86/events/intel/uncore.h (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx); box 336 arch/x86/events/intel/uncore.h return box->pmu->type->perf_ctr + box 337 arch/x86/events/intel/uncore.h (box->pmu->type->pair_ctr_ctl ? 2 * idx : idx) + box 338 arch/x86/events/intel/uncore.h uncore_msr_box_offset(box); box 343 arch/x86/events/intel/uncore.h unsigned uncore_fixed_ctl(struct intel_uncore_box *box) box 345 arch/x86/events/intel/uncore.h if (box->pci_dev || box->io_addr) box 346 arch/x86/events/intel/uncore.h return uncore_pci_fixed_ctl(box); box 348 arch/x86/events/intel/uncore.h return uncore_msr_fixed_ctl(box); box 352 arch/x86/events/intel/uncore.h unsigned uncore_fixed_ctr(struct intel_uncore_box *box) box 354 arch/x86/events/intel/uncore.h if (box->pci_dev || box->io_addr) box 355 arch/x86/events/intel/uncore.h return uncore_pci_fixed_ctr(box); box 357 arch/x86/events/intel/uncore.h return uncore_msr_fixed_ctr(box); box 361 arch/x86/events/intel/uncore.h unsigned uncore_event_ctl(struct intel_uncore_box *box, int idx) box 363 arch/x86/events/intel/uncore.h if (box->pci_dev || box->io_addr) box 364 arch/x86/events/intel/uncore.h return uncore_pci_event_ctl(box, idx); box 366 arch/x86/events/intel/uncore.h return uncore_msr_event_ctl(box, idx); box 370 arch/x86/events/intel/uncore.h unsigned uncore_perf_ctr(struct intel_uncore_box *box, int idx) box 372 arch/x86/events/intel/uncore.h if (box->pci_dev || box->io_addr) box 373 arch/x86/events/intel/uncore.h return uncore_pci_perf_ctr(box, idx); box 375 arch/x86/events/intel/uncore.h return uncore_msr_perf_ctr(box, idx); box 378 arch/x86/events/intel/uncore.h static inline int uncore_perf_ctr_bits(struct intel_uncore_box *box) box 380 arch/x86/events/intel/uncore.h return box->pmu->type->perf_ctr_bits; box 383 arch/x86/events/intel/uncore.h static inline int uncore_fixed_ctr_bits(struct intel_uncore_box *box) box 385 arch/x86/events/intel/uncore.h return box->pmu->type->fixed_ctr_bits; box 389 arch/x86/events/intel/uncore.h unsigned int uncore_freerunning_bits(struct intel_uncore_box *box, box 394 arch/x86/events/intel/uncore.h return box->pmu->type->freerunning[type].bits; box 397 arch/x86/events/intel/uncore.h static inline int uncore_num_freerunning(struct intel_uncore_box *box, box 402 arch/x86/events/intel/uncore.h return box->pmu->type->freerunning[type].num_counters; box 405 arch/x86/events/intel/uncore.h static inline int uncore_num_freerunning_types(struct intel_uncore_box *box, box 408 arch/x86/events/intel/uncore.h return box->pmu->type->num_freerunning_types; box 411 arch/x86/events/intel/uncore.h static inline bool check_valid_freerunning_event(struct intel_uncore_box *box, box 417 arch/x86/events/intel/uncore.h return (type < uncore_num_freerunning_types(box, event)) && box 418 arch/x86/events/intel/uncore.h (idx < uncore_num_freerunning(box, event)); box 421 arch/x86/events/intel/uncore.h static inline int uncore_num_counters(struct intel_uncore_box *box) box 423 arch/x86/events/intel/uncore.h return box->pmu->type->num_counters; box 435 arch/x86/events/intel/uncore.h static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box, box 444 arch/x86/events/intel/uncore.h static inline void uncore_disable_event(struct intel_uncore_box *box, box 447 arch/x86/events/intel/uncore.h box->pmu->type->ops->disable_event(box, event); box 450 arch/x86/events/intel/uncore.h static inline void uncore_enable_event(struct intel_uncore_box *box, box 453 arch/x86/events/intel/uncore.h box->pmu->type->ops->enable_event(box, event); box 456 arch/x86/events/intel/uncore.h static inline u64 uncore_read_counter(struct intel_uncore_box *box, box 459 arch/x86/events/intel/uncore.h return box->pmu->type->ops->read_counter(box, event); box 462 arch/x86/events/intel/uncore.h static inline void uncore_box_init(struct intel_uncore_box *box) box 464 arch/x86/events/intel/uncore.h if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { box 465 arch/x86/events/intel/uncore.h if (box->pmu->type->ops->init_box) box 466 arch/x86/events/intel/uncore.h box->pmu->type->ops->init_box(box); box 470 arch/x86/events/intel/uncore.h static inline void uncore_box_exit(struct intel_uncore_box *box) box 472 arch/x86/events/intel/uncore.h if (test_and_clear_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) { box 473 arch/x86/events/intel/uncore.h if (box->pmu->type->ops->exit_box) box 474 arch/x86/events/intel/uncore.h box->pmu->type->ops->exit_box(box); box 478 arch/x86/events/intel/uncore.h static inline bool uncore_box_is_fake(struct intel_uncore_box *box) box 480 arch/x86/events/intel/uncore.h return (box->dieid < 0); box 494 arch/x86/events/intel/uncore.h u64 uncore_msr_read_counter(struct intel_uncore_box *box, struct perf_event *event); box 495 arch/x86/events/intel/uncore.h void uncore_mmio_exit_box(struct intel_uncore_box *box); box 496 arch/x86/events/intel/uncore.h u64 uncore_mmio_read_counter(struct intel_uncore_box *box, box 498 arch/x86/events/intel/uncore.h void uncore_pmu_start_hrtimer(struct intel_uncore_box *box); box 499 arch/x86/events/intel/uncore.h void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box); box 505 arch/x86/events/intel/uncore.h void uncore_perf_event_update(struct intel_uncore_box *box, struct perf_event *event); box 507 arch/x86/events/intel/uncore.h uncore_get_constraint(struct intel_uncore_box *box, struct perf_event *event); box 508 arch/x86/events/intel/uncore.h void uncore_put_constraint(struct intel_uncore_box *box, struct perf_event *event); box 509 arch/x86/events/intel/uncore.h u64 uncore_shared_reg_config(struct intel_uncore_box *box, int idx); box 200 arch/x86/events/intel/uncore_nhmex.c static void nhmex_uncore_msr_init_box(struct intel_uncore_box *box) box 205 arch/x86/events/intel/uncore_nhmex.c static void nhmex_uncore_msr_exit_box(struct intel_uncore_box *box) box 210 arch/x86/events/intel/uncore_nhmex.c static void nhmex_uncore_msr_disable_box(struct intel_uncore_box *box) box 212 arch/x86/events/intel/uncore_nhmex.c unsigned msr = uncore_msr_box_ctl(box); box 217 arch/x86/events/intel/uncore_nhmex.c config &= ~((1ULL << uncore_num_counters(box)) - 1); box 219 arch/x86/events/intel/uncore_nhmex.c if (uncore_msr_fixed_ctl(box)) box 225 arch/x86/events/intel/uncore_nhmex.c static void nhmex_uncore_msr_enable_box(struct intel_uncore_box *box) box 227 arch/x86/events/intel/uncore_nhmex.c unsigned msr = uncore_msr_box_ctl(box); box 232 arch/x86/events/intel/uncore_nhmex.c config |= (1ULL << uncore_num_counters(box)) - 1; box 234 arch/x86/events/intel/uncore_nhmex.c if (uncore_msr_fixed_ctl(box)) box 240 arch/x86/events/intel/uncore_nhmex.c static void nhmex_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) box 245 arch/x86/events/intel/uncore_nhmex.c static void nhmex_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 251 arch/x86/events/intel/uncore_nhmex.c else if (box->pmu->type->event_mask & NHMEX_PMON_CTL_EN_BIT0) box 350 arch/x86/events/intel/uncore_nhmex.c static int nhmex_bbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 367 arch/x86/events/intel/uncore_nhmex.c if (box->pmu->pmu_idx == 0) box 377 arch/x86/events/intel/uncore_nhmex.c static void nhmex_bbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 441 arch/x86/events/intel/uncore_nhmex.c static int nhmex_sbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 452 arch/x86/events/intel/uncore_nhmex.c if (box->pmu->pmu_idx == 0) box 462 arch/x86/events/intel/uncore_nhmex.c static void nhmex_sbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 553 arch/x86/events/intel/uncore_nhmex.c static bool nhmex_mbox_get_shared_reg(struct intel_uncore_box *box, int idx, u64 config) box 561 arch/x86/events/intel/uncore_nhmex.c er = &box->shared_regs[idx]; box 586 arch/x86/events/intel/uncore_nhmex.c er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; box 614 arch/x86/events/intel/uncore_nhmex.c static void nhmex_mbox_put_shared_reg(struct intel_uncore_box *box, int idx) box 619 arch/x86/events/intel/uncore_nhmex.c er = &box->shared_regs[idx]; box 625 arch/x86/events/intel/uncore_nhmex.c er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; box 669 arch/x86/events/intel/uncore_nhmex.c nhmex_mbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) box 680 arch/x86/events/intel/uncore_nhmex.c if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) box 686 arch/x86/events/intel/uncore_nhmex.c if (!nhmex_mbox_get_shared_reg(box, idx[i], box 694 arch/x86/events/intel/uncore_nhmex.c (uncore_box_is_fake(box) || !reg2->alloc) && box 695 arch/x86/events/intel/uncore_nhmex.c !nhmex_mbox_get_shared_reg(box, reg2->idx, reg2->config)) box 704 arch/x86/events/intel/uncore_nhmex.c if (!uncore_box_is_fake(box)) { box 732 arch/x86/events/intel/uncore_nhmex.c nhmex_mbox_put_shared_reg(box, idx[0]); box 734 arch/x86/events/intel/uncore_nhmex.c nhmex_mbox_put_shared_reg(box, idx[1]); box 738 arch/x86/events/intel/uncore_nhmex.c static void nhmex_mbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) box 743 arch/x86/events/intel/uncore_nhmex.c if (uncore_box_is_fake(box)) box 747 arch/x86/events/intel/uncore_nhmex.c nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 0, 8)); box 749 arch/x86/events/intel/uncore_nhmex.c nhmex_mbox_put_shared_reg(box, __BITS_VALUE(reg1->idx, 1, 8)); box 753 arch/x86/events/intel/uncore_nhmex.c nhmex_mbox_put_shared_reg(box, reg2->idx); box 765 arch/x86/events/intel/uncore_nhmex.c static int nhmex_mbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 767 arch/x86/events/intel/uncore_nhmex.c struct intel_uncore_type *type = box->pmu->type; box 784 arch/x86/events/intel/uncore_nhmex.c msr = er->msr + type->msr_offset * box->pmu->pmu_idx; box 811 arch/x86/events/intel/uncore_nhmex.c if (box->pmu->pmu_idx == 0) box 819 arch/x86/events/intel/uncore_nhmex.c static u64 nhmex_mbox_shared_reg_config(struct intel_uncore_box *box, int idx) box 826 arch/x86/events/intel/uncore_nhmex.c return box->shared_regs[idx].config; box 828 arch/x86/events/intel/uncore_nhmex.c er = &box->shared_regs[EXTRA_REG_NHMEX_M_ZDP_CTL_FVC]; box 835 arch/x86/events/intel/uncore_nhmex.c static void nhmex_mbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 845 arch/x86/events/intel/uncore_nhmex.c nhmex_mbox_shared_reg_config(box, idx)); box 849 arch/x86/events/intel/uncore_nhmex.c nhmex_mbox_shared_reg_config(box, idx)); box 944 arch/x86/events/intel/uncore_nhmex.c static void nhmex_rbox_alter_er(struct intel_uncore_box *box, struct perf_event *event) box 978 arch/x86/events/intel/uncore_nhmex.c nhmex_rbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) box 989 arch/x86/events/intel/uncore_nhmex.c if (!uncore_box_is_fake(box) && reg1->alloc) box 1001 arch/x86/events/intel/uncore_nhmex.c er = &box->shared_regs[er_idx]; box 1052 arch/x86/events/intel/uncore_nhmex.c if (!uncore_box_is_fake(box)) { box 1054 arch/x86/events/intel/uncore_nhmex.c nhmex_rbox_alter_er(box, event); box 1062 arch/x86/events/intel/uncore_nhmex.c static void nhmex_rbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) box 1068 arch/x86/events/intel/uncore_nhmex.c if (uncore_box_is_fake(box) || !reg1->alloc) box 1077 arch/x86/events/intel/uncore_nhmex.c er = &box->shared_regs[er_idx]; box 1086 arch/x86/events/intel/uncore_nhmex.c static int nhmex_rbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 1111 arch/x86/events/intel/uncore_nhmex.c static void nhmex_rbox_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 1119 arch/x86/events/intel/uncore_nhmex.c port = idx / 6 + box->pmu->pmu_idx * 4; box 1131 arch/x86/events/intel/uncore_nhmex.c uncore_shared_reg_config(box, 2 + (idx / 6) * 5)); box 121 arch/x86/events/intel/uncore_snb.c static void snb_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 131 arch/x86/events/intel/uncore_snb.c static void snb_uncore_msr_disable_event(struct intel_uncore_box *box, struct perf_event *event) box 136 arch/x86/events/intel/uncore_snb.c static void snb_uncore_msr_init_box(struct intel_uncore_box *box) box 138 arch/x86/events/intel/uncore_snb.c if (box->pmu->pmu_idx == 0) { box 144 arch/x86/events/intel/uncore_snb.c static void snb_uncore_msr_enable_box(struct intel_uncore_box *box) box 150 arch/x86/events/intel/uncore_snb.c static void snb_uncore_msr_exit_box(struct intel_uncore_box *box) box 152 arch/x86/events/intel/uncore_snb.c if (box->pmu->pmu_idx == 0) box 235 arch/x86/events/intel/uncore_snb.c static void skl_uncore_msr_init_box(struct intel_uncore_box *box) box 237 arch/x86/events/intel/uncore_snb.c if (box->pmu->pmu_idx == 0) { box 243 arch/x86/events/intel/uncore_snb.c if (box->pmu->pmu_idx == 7) box 244 arch/x86/events/intel/uncore_snb.c __set_bit(UNCORE_BOX_FLAG_CFL8_CBOX_MSR_OFFS, &box->flags); box 247 arch/x86/events/intel/uncore_snb.c static void skl_uncore_msr_enable_box(struct intel_uncore_box *box) box 253 arch/x86/events/intel/uncore_snb.c static void skl_uncore_msr_exit_box(struct intel_uncore_box *box) box 255 arch/x86/events/intel/uncore_snb.c if (box->pmu->pmu_idx == 0) box 411 arch/x86/events/intel/uncore_snb.c static void snb_uncore_imc_init_box(struct intel_uncore_box *box) box 413 arch/x86/events/intel/uncore_snb.c struct pci_dev *pdev = box->pci_dev; box 428 arch/x86/events/intel/uncore_snb.c box->io_addr = ioremap(addr, SNB_UNCORE_PCI_IMC_MAP_SIZE); box 429 arch/x86/events/intel/uncore_snb.c box->hrtimer_duration = UNCORE_SNB_IMC_HRTIMER_INTERVAL; box 432 arch/x86/events/intel/uncore_snb.c static void snb_uncore_imc_enable_box(struct intel_uncore_box *box) box 435 arch/x86/events/intel/uncore_snb.c static void snb_uncore_imc_disable_box(struct intel_uncore_box *box) box 438 arch/x86/events/intel/uncore_snb.c static void snb_uncore_imc_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 441 arch/x86/events/intel/uncore_snb.c static void snb_uncore_imc_disable_event(struct intel_uncore_box *box, struct perf_event *event) box 451 arch/x86/events/intel/uncore_snb.c struct intel_uncore_box *box; box 483 arch/x86/events/intel/uncore_snb.c box = uncore_pmu_to_box(pmu, event->cpu); box 484 arch/x86/events/intel/uncore_snb.c if (!box || box->cpu < 0) box 487 arch/x86/events/intel/uncore_snb.c event->cpu = box->cpu; box 488 arch/x86/events/intel/uncore_snb.c event->pmu_private = box; box 524 arch/x86/events/intel/uncore_snb.c static int snb_uncore_imc_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 923 arch/x86/events/intel/uncore_snb.c static void nhm_uncore_msr_disable_box(struct intel_uncore_box *box) box 928 arch/x86/events/intel/uncore_snb.c static void nhm_uncore_msr_enable_box(struct intel_uncore_box *box) box 933 arch/x86/events/intel/uncore_snb.c static void nhm_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 463 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_pci_disable_box(struct intel_uncore_box *box) box 465 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 466 arch/x86/events/intel/uncore_snbep.c int box_ctl = uncore_pci_box_ctl(box); box 475 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_pci_enable_box(struct intel_uncore_box *box) box 477 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 478 arch/x86/events/intel/uncore_snbep.c int box_ctl = uncore_pci_box_ctl(box); box 487 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_pci_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 489 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 495 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_pci_disable_event(struct intel_uncore_box *box, struct perf_event *event) box 497 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 503 arch/x86/events/intel/uncore_snbep.c static u64 snbep_uncore_pci_read_counter(struct intel_uncore_box *box, struct perf_event *event) box 505 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 515 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_pci_init_box(struct intel_uncore_box *box) box 517 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 518 arch/x86/events/intel/uncore_snbep.c int box_ctl = uncore_pci_box_ctl(box); box 523 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_msr_disable_box(struct intel_uncore_box *box) box 528 arch/x86/events/intel/uncore_snbep.c msr = uncore_msr_box_ctl(box); box 536 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_msr_enable_box(struct intel_uncore_box *box) box 541 arch/x86/events/intel/uncore_snbep.c msr = uncore_msr_box_ctl(box); box 549 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_msr_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 555 arch/x86/events/intel/uncore_snbep.c wrmsrl(reg1->reg, uncore_shared_reg_config(box, 0)); box 560 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_msr_disable_event(struct intel_uncore_box *box, box 568 arch/x86/events/intel/uncore_snbep.c static void snbep_uncore_msr_init_box(struct intel_uncore_box *box) box 570 arch/x86/events/intel/uncore_snbep.c unsigned msr = uncore_msr_box_ctl(box); box 841 arch/x86/events/intel/uncore_snbep.c static void snbep_cbox_put_constraint(struct intel_uncore_box *box, struct perf_event *event) box 844 arch/x86/events/intel/uncore_snbep.c struct intel_uncore_extra_reg *er = &box->shared_regs[0]; box 847 arch/x86/events/intel/uncore_snbep.c if (uncore_box_is_fake(box)) box 858 arch/x86/events/intel/uncore_snbep.c __snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event, box 862 arch/x86/events/intel/uncore_snbep.c struct intel_uncore_extra_reg *er = &box->shared_regs[0]; box 874 arch/x86/events/intel/uncore_snbep.c if (!uncore_box_is_fake(box) && (reg1->alloc & (0x1 << i))) box 892 arch/x86/events/intel/uncore_snbep.c if (!uncore_box_is_fake(box)) box 921 arch/x86/events/intel/uncore_snbep.c snbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) box 923 arch/x86/events/intel/uncore_snbep.c return __snbep_cbox_get_constraint(box, event, snbep_cbox_filter_mask); box 926 arch/x86/events/intel/uncore_snbep.c static int snbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 940 arch/x86/events/intel/uncore_snbep.c SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; box 990 arch/x86/events/intel/uncore_snbep.c snbep_pcu_get_constraint(struct intel_uncore_box *box, struct perf_event *event) box 993 arch/x86/events/intel/uncore_snbep.c struct intel_uncore_extra_reg *er = &box->shared_regs[0]; box 1000 arch/x86/events/intel/uncore_snbep.c (!uncore_box_is_fake(box) && reg1->alloc)) box 1023 arch/x86/events/intel/uncore_snbep.c if (!uncore_box_is_fake(box)) { box 1031 arch/x86/events/intel/uncore_snbep.c static void snbep_pcu_put_constraint(struct intel_uncore_box *box, struct perf_event *event) box 1034 arch/x86/events/intel/uncore_snbep.c struct intel_uncore_extra_reg *er = &box->shared_regs[0]; box 1036 arch/x86/events/intel/uncore_snbep.c if (uncore_box_is_fake(box) || !reg1->alloc) box 1043 arch/x86/events/intel/uncore_snbep.c static int snbep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 1099 arch/x86/events/intel/uncore_snbep.c static int snbep_qpi_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 1115 arch/x86/events/intel/uncore_snbep.c static void snbep_qpi_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 1117 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 1123 arch/x86/events/intel/uncore_snbep.c int idx = box->pmu->pmu_idx + SNBEP_PCI_QPI_PORT0_FILTER; box 1124 arch/x86/events/intel/uncore_snbep.c int die = box->dieid; box 1383 arch/x86/events/intel/uncore_snbep.c static void ivbep_uncore_msr_init_box(struct intel_uncore_box *box) box 1385 arch/x86/events/intel/uncore_snbep.c unsigned msr = uncore_msr_box_ctl(box); box 1390 arch/x86/events/intel/uncore_snbep.c static void ivbep_uncore_pci_init_box(struct intel_uncore_box *box) box 1392 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 1605 arch/x86/events/intel/uncore_snbep.c ivbep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) box 1607 arch/x86/events/intel/uncore_snbep.c return __snbep_cbox_get_constraint(box, event, ivbep_cbox_filter_mask); box 1610 arch/x86/events/intel/uncore_snbep.c static int ivbep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 1624 arch/x86/events/intel/uncore_snbep.c SNBEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; box 1631 arch/x86/events/intel/uncore_snbep.c static void ivbep_cbox_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 1637 arch/x86/events/intel/uncore_snbep.c u64 filter = uncore_shared_reg_config(box, 0); box 1732 arch/x86/events/intel/uncore_snbep.c static void ivbep_uncore_irp_enable_event(struct intel_uncore_box *box, struct perf_event *event) box 1734 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 1741 arch/x86/events/intel/uncore_snbep.c static void ivbep_uncore_irp_disable_event(struct intel_uncore_box *box, struct perf_event *event) box 1743 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 1749 arch/x86/events/intel/uncore_snbep.c static u64 ivbep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) box 1751 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 2032 arch/x86/events/intel/uncore_snbep.c knl_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) box 2034 arch/x86/events/intel/uncore_snbep.c return __snbep_cbox_get_constraint(box, event, knl_cha_filter_mask); box 2037 arch/x86/events/intel/uncore_snbep.c static int knl_cha_hw_config(struct intel_uncore_box *box, box 2052 arch/x86/events/intel/uncore_snbep.c KNL_CHA_MSR_OFFSET * box->pmu->pmu_idx; box 2063 arch/x86/events/intel/uncore_snbep.c static void hswep_cbox_enable_event(struct intel_uncore_box *box, box 2137 arch/x86/events/intel/uncore_snbep.c static void knl_uncore_imc_enable_box(struct intel_uncore_box *box) box 2139 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 2140 arch/x86/events/intel/uncore_snbep.c int box_ctl = uncore_pci_box_ctl(box); box 2145 arch/x86/events/intel/uncore_snbep.c static void knl_uncore_imc_enable_event(struct intel_uncore_box *box, box 2148 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 2460 arch/x86/events/intel/uncore_snbep.c static int hswep_ubox_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 2588 arch/x86/events/intel/uncore_snbep.c hswep_cbox_get_constraint(struct intel_uncore_box *box, struct perf_event *event) box 2590 arch/x86/events/intel/uncore_snbep.c return __snbep_cbox_get_constraint(box, event, hswep_cbox_filter_mask); box 2593 arch/x86/events/intel/uncore_snbep.c static int hswep_cbox_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 2607 arch/x86/events/intel/uncore_snbep.c HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; box 2614 arch/x86/events/intel/uncore_snbep.c static void hswep_cbox_enable_event(struct intel_uncore_box *box, box 2621 arch/x86/events/intel/uncore_snbep.c u64 filter = uncore_shared_reg_config(box, 0); box 2660 arch/x86/events/intel/uncore_snbep.c static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box) box 2662 arch/x86/events/intel/uncore_snbep.c unsigned msr = uncore_msr_box_ctl(box); box 2710 arch/x86/events/intel/uncore_snbep.c static int hswep_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 2806 arch/x86/events/intel/uncore_snbep.c static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event) box 2808 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 3481 arch/x86/events/intel/uncore_snbep.c skx_cha_get_constraint(struct intel_uncore_box *box, struct perf_event *event) box 3483 arch/x86/events/intel/uncore_snbep.c return __snbep_cbox_get_constraint(box, event, skx_cha_filter_mask); box 3486 arch/x86/events/intel/uncore_snbep.c static int skx_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 3500 arch/x86/events/intel/uncore_snbep.c HSWEP_CBO_MSR_OFFSET * box->pmu->pmu_idx; box 3561 arch/x86/events/intel/uncore_snbep.c static void skx_iio_enable_event(struct intel_uncore_box *box, box 3815 arch/x86/events/intel/uncore_snbep.c static void skx_upi_uncore_pci_init_box(struct intel_uncore_box *box) box 3817 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 3819 arch/x86/events/intel/uncore_snbep.c __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); box 3846 arch/x86/events/intel/uncore_snbep.c static void skx_m2m_uncore_pci_init_box(struct intel_uncore_box *box) box 3848 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 3850 arch/x86/events/intel/uncore_snbep.c __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); box 4067 arch/x86/events/intel/uncore_snbep.c static int snr_cha_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 4072 arch/x86/events/intel/uncore_snbep.c box->pmu->type->msr_offset * box->pmu->pmu_idx; box 4079 arch/x86/events/intel/uncore_snbep.c static void snr_cha_enable_event(struct intel_uncore_box *box, box 4175 arch/x86/events/intel/uncore_snbep.c static int snr_pcu_hw_config(struct intel_uncore_box *box, struct perf_event *event) box 4280 arch/x86/events/intel/uncore_snbep.c static void snr_m2m_uncore_pci_init_box(struct intel_uncore_box *box) box 4282 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = box->pci_dev; box 4283 arch/x86/events/intel/uncore_snbep.c int box_ctl = uncore_pci_box_ctl(box); box 4285 arch/x86/events/intel/uncore_snbep.c __set_bit(UNCORE_BOX_FLAG_CTL_OFFS8, &box->flags); box 4383 arch/x86/events/intel/uncore_snbep.c static void snr_uncore_mmio_init_box(struct intel_uncore_box *box) box 4385 arch/x86/events/intel/uncore_snbep.c struct pci_dev *pdev = snr_uncore_get_mc_dev(box->dieid); box 4386 arch/x86/events/intel/uncore_snbep.c unsigned int box_ctl = uncore_mmio_box_ctl(box); box 4401 arch/x86/events/intel/uncore_snbep.c box->io_addr = ioremap(addr, SNR_IMC_MMIO_SIZE); box 4402 arch/x86/events/intel/uncore_snbep.c if (!box->io_addr) box 4405 arch/x86/events/intel/uncore_snbep.c writel(IVBEP_PMON_BOX_CTL_INT, box->io_addr); box 4408 arch/x86/events/intel/uncore_snbep.c static void snr_uncore_mmio_disable_box(struct intel_uncore_box *box) box 4412 arch/x86/events/intel/uncore_snbep.c if (!box->io_addr) box 4415 arch/x86/events/intel/uncore_snbep.c config = readl(box->io_addr); box 4417 arch/x86/events/intel/uncore_snbep.c writel(config, box->io_addr); box 4420 arch/x86/events/intel/uncore_snbep.c static void snr_uncore_mmio_enable_box(struct intel_uncore_box *box) box 4424 arch/x86/events/intel/uncore_snbep.c if (!box->io_addr) box 4427 arch/x86/events/intel/uncore_snbep.c config = readl(box->io_addr); box 4429 arch/x86/events/intel/uncore_snbep.c writel(config, box->io_addr); box 4432 arch/x86/events/intel/uncore_snbep.c static void snr_uncore_mmio_enable_event(struct intel_uncore_box *box, box 4437 arch/x86/events/intel/uncore_snbep.c if (!box->io_addr) box 4441 arch/x86/events/intel/uncore_snbep.c box->io_addr + hwc->config_base); box 4444 arch/x86/events/intel/uncore_snbep.c static void snr_uncore_mmio_disable_event(struct intel_uncore_box *box, box 4449 arch/x86/events/intel/uncore_snbep.c if (!box->io_addr) box 4452 arch/x86/events/intel/uncore_snbep.c writel(hwc->config, box->io_addr + hwc->config_base); box 719 drivers/gpu/drm/i810/i810_dma.c struct drm_clip_rect *box = sarea_priv->boxes; box 758 drivers/gpu/drm/i810/i810_dma.c OUT_RING(box[i].x1 | (box[i].y1 << 16)); box 759 drivers/gpu/drm/i810/i810_dma.c OUT_RING((box[i].x2 - box 760 drivers/gpu/drm/i810/i810_dma.c 1) | ((box[i].y2 - 1) << 16)); box 42 drivers/gpu/drm/mga/mga_state.c struct drm_clip_rect *box) box 60 drivers/gpu/drm/mga/mga_state.c MGA_CXBNDRY, ((box->x2 - 1) << 16) | box->x1, box 61 drivers/gpu/drm/mga/mga_state.c MGA_YTOP, box->y1 * pitch, MGA_YBOT, (box->y2 - 1) * pitch); box 500 drivers/gpu/drm/mga/mga_state.c struct drm_clip_rect *box = &pbox[i]; box 501 drivers/gpu/drm/mga/mga_state.c u32 height = box->y2 - box->y1; box 504 drivers/gpu/drm/mga/mga_state.c box->x1, box->y1, box->x2, box->y2); box 511 drivers/gpu/drm/mga/mga_state.c MGA_YDSTLEN, (box->y1 << 16) | height, box 512 drivers/gpu/drm/mga/mga_state.c MGA_FXBNDRY, (box->x2 << 16) | box->x1); box 527 drivers/gpu/drm/mga/mga_state.c MGA_YDSTLEN, (box->y1 << 16) | height, box 528 drivers/gpu/drm/mga/mga_state.c MGA_FXBNDRY, (box->x2 << 16) | box->x1); box 543 drivers/gpu/drm/mga/mga_state.c MGA_YDSTLEN, (box->y1 << 16) | height, box 544 drivers/gpu/drm/mga/mga_state.c MGA_FXBNDRY, (box->x2 << 16) | box->x1); box 598 drivers/gpu/drm/mga/mga_state.c struct drm_clip_rect *box = &pbox[i]; box 599 drivers/gpu/drm/mga/mga_state.c u32 height = box->y2 - box->y1; box 600 drivers/gpu/drm/mga/mga_state.c u32 start = box->y1 * dev_priv->front_pitch; box 603 drivers/gpu/drm/mga/mga_state.c box->x1, box->y1, box->x2, box->y2); box 605 drivers/gpu/drm/mga/mga_state.c DMA_BLOCK(MGA_AR0, start + box->x2 - 1, box 606 drivers/gpu/drm/mga/mga_state.c MGA_AR3, start + box->x1, box 607 drivers/gpu/drm/mga/mga_state.c MGA_FXBNDRY, ((box->x2 - 1) << 16) | box->x1, box 608 drivers/gpu/drm/mga/mga_state.c MGA_YDSTLEN + MGA_EXEC, (box->y1 << 16) | height); box 308 drivers/gpu/drm/virtio/virtgpu_drv.h struct virtio_gpu_box *box, box 314 drivers/gpu/drm/virtio/virtgpu_drv.h struct virtio_gpu_box *box, box 369 drivers/gpu/drm/virtio/virtgpu_ioctl.c struct virtio_gpu_box box; box 388 drivers/gpu/drm/virtio/virtgpu_ioctl.c convert_to_hw_box(&box, &args->box); box 398 drivers/gpu/drm/virtio/virtgpu_ioctl.c &box, fence); box 420 drivers/gpu/drm/virtio/virtgpu_ioctl.c struct virtio_gpu_box box; box 438 drivers/gpu/drm/virtio/virtgpu_ioctl.c convert_to_hw_box(&box, &args->box); box 442 drivers/gpu/drm/virtio/virtgpu_ioctl.c box.w, box.h, box.x, box.y, NULL); box 452 drivers/gpu/drm/virtio/virtgpu_ioctl.c args->level, &box, fence); box 894 drivers/gpu/drm/virtio/virtgpu_vq.c struct virtio_gpu_box *box, box 912 drivers/gpu/drm/virtio/virtgpu_vq.c cmd_p->box = *box; box 922 drivers/gpu/drm/virtio/virtgpu_vq.c struct virtio_gpu_box *box, box 934 drivers/gpu/drm/virtio/virtgpu_vq.c cmd_p->box = *box; box 501 drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h SVGA3dCopyBox box; box 1786 drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h SVGA3dBox box; box 1841 drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h SVGA3dBox box; box 1885 drivers/gpu/drm/vmwgfx/device_include/svga3d_cmd.h SVGA3dBox box; box 618 drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h SVGA3dCopyBox box; box 737 drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h SVGA3dBox box; box 815 drivers/gpu/drm/vmwgfx/device_include/svga3d_dx.h SVGA3dCopyBox box; box 152 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c SVGA3dCopyBox *box; box 178 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box = (SVGA3dCopyBox *)&cmd[1]; box 183 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->x != 0 || box->y != 0 || box->z != 0 || box 184 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->srcx != 0 || box->srcy != 0 || box->srcz != 0 || box 185 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->d != 1 || box_count != 1) { box 191 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->srcx, box->srcy, box->srcz, box 192 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->x, box->y, box->z, box 193 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->w, box->h, box->d, box_count, box 213 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c if (box->w == 64 && cmd->dma.guest.pitch == 64*4) { box 217 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c for (i = 0; i < box->h; i++) box 220 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->w * 4); box 2605 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c SVGA3dBox *box; box 2617 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box = &cmd->body.box; box 2631 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->x = clips->x1; box 2632 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->y = clips->y1; box 2633 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->z = 0; box 2634 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->w = clips->x2 - clips->x1; box 2635 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->h = clips->y2 - clips->y1; box 2636 drivers/gpu/drm/vmwgfx/vmwgfx_kms.c box->d = 1; box 1227 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c struct SVGA3dCopyBox *box = cmd; box 1229 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->srcx = fb_x; box 1230 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->srcy = fb_y; box 1231 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->srcz = 0; box 1232 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->x = clip->x1; box 1233 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->y = clip->y1; box 1234 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->z = 0; box 1235 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->w = drm_rect_width(clip); box 1236 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->h = drm_rect_height(clip); box 1237 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->d = 1; box 1239 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c return sizeof(*box); box 1324 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c SVGA3dBox *box = &cmd_img->body.box; box 1332 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->x = diff.rect.x1; box 1333 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->y = diff.rect.y1; box 1334 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->z = 0; box 1335 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->w = drm_rect_width(&diff.rect); box 1336 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->h = drm_rect_height(&diff.rect); box 1337 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->d = 1; box 1453 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c SVGA3dBox *box = &cmd_update->body.box; box 1461 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->x = clip.x1; box 1462 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->y = clip.y1; box 1463 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->z = 0; box 1464 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->w = drm_rect_width(&clip); box 1465 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->h = drm_rect_height(&clip); box 1466 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->d = 1; box 1500 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c struct SVGA3dCopyBox *box = cmd; box 1502 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->srcx = fb_x; box 1503 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->srcy = fb_y; box 1504 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->srcz = 0; box 1505 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->x = clip->x1; box 1506 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->y = clip->y1; box 1507 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->z = 0; box 1508 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->w = drm_rect_width(clip); box 1509 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->h = drm_rect_height(clip); box 1510 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c box->d = 1; box 1512 drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c return sizeof(*box); box 89 drivers/net/wireless/realtek/rtw88/fw.c u8 box; box 102 drivers/net/wireless/realtek/rtw88/fw.c box = rtwdev->h2c.last_box_num; box 103 drivers/net/wireless/realtek/rtw88/fw.c switch (box) { box 128 drivers/net/wireless/realtek/rtw88/fw.c } while ((box_state >> box) & 0x1 && --h2c_wait > 0); box 805 drivers/scsi/hpsa.c u8 box; box 844 drivers/scsi/hpsa.c box = hdev->box[i]; box 857 drivers/scsi/hpsa.c if (box == 0 || box == 0xFF) { box 866 drivers/scsi/hpsa.c box, bay, active); box 868 drivers/scsi/hpsa.c } else if (box != 0 && box != 0xFF) { box 871 drivers/scsi/hpsa.c box, active); box 3496 drivers/scsi/hpsa.c encl_dev->box[id_phys->active_path_number] = bssbp->phys_box_on_port; box 4198 drivers/scsi/hpsa.c memcpy(&this_device->box, box 4200 drivers/scsi/hpsa.c sizeof(this_device->box)); box 89 drivers/scsi/hpsa.h u8 box[8]; box 936 drivers/scsi/smartpqi/smartpqi.h u8 box[8]; box 1363 drivers/scsi/smartpqi/smartpqi_init.c memcpy(&device->box, box 1365 drivers/scsi/smartpqi/smartpqi_init.c sizeof(device->box)); box 1688 drivers/scsi/smartpqi/smartpqi_init.c memcpy(existing_device->box, new_device->box, box 1689 drivers/scsi/smartpqi/smartpqi_init.c sizeof(existing_device->box)); box 6309 drivers/scsi/smartpqi/smartpqi_init.c u8 box; box 6360 drivers/scsi/smartpqi/smartpqi_init.c box = device->box[i]; box 6361 drivers/scsi/smartpqi/smartpqi_init.c if (box != 0 && box != 0xFF) box 6364 drivers/scsi/smartpqi/smartpqi_init.c "BOX: %hhu ", box); box 117 include/uapi/drm/virtgpu_drm.h struct drm_virtgpu_3d_box box; box 124 include/uapi/drm/virtgpu_drm.h struct drm_virtgpu_3d_box box; box 220 include/uapi/linux/virtio_gpu.h struct virtio_gpu_box box; box 98 lib/packing.c int plogical_first_u8, plogical_last_u8, box; box 131 lib/packing.c for (box = plogical_first_u8; box >= plogical_last_u8; box--) { box 143 lib/packing.c if (box == plogical_first_u8) box 147 lib/packing.c if (box == plogical_last_u8) box 159 lib/packing.c proj_start_bit = ((box * 8) + box_start_bit) - endbit; box 160 lib/packing.c proj_end_bit = ((box * 8) + box_end_bit) - endbit; box 169 lib/packing.c box_addr = pbuflen - box - 1; box 207 scripts/kconfig/lxdialog/dialog.h void draw_box(WINDOW * win, int y, int x, int height, int width, chtype box, box 26 scripts/kconfig/lxdialog/textbox.c static void refresh_text_box(WINDOW *dialog, WINDOW *box, int boxh, int boxw, box 30 scripts/kconfig/lxdialog/textbox.c print_page(box, boxh, boxw, update_text, data); box 49 scripts/kconfig/lxdialog/textbox.c WINDOW *dialog, *box; box 99 scripts/kconfig/lxdialog/textbox.c box = subwin(dialog, boxh, boxw, y + 1, x + 1); box 100 scripts/kconfig/lxdialog/textbox.c wattrset(box, dlg.dialog.atr); box 101 scripts/kconfig/lxdialog/textbox.c wbkgdset(box, dlg.dialog.atr & A_COLOR); box 103 scripts/kconfig/lxdialog/textbox.c keypad(box, TRUE); box 124 scripts/kconfig/lxdialog/textbox.c attr_clear(box, boxh, boxw, dlg.dialog.atr); box 125 scripts/kconfig/lxdialog/textbox.c refresh_text_box(dialog, box, boxh, boxw, cur_y, cur_x, update_text, box 144 scripts/kconfig/lxdialog/textbox.c refresh_text_box(dialog, box, boxh, boxw, box 156 scripts/kconfig/lxdialog/textbox.c refresh_text_box(dialog, box, boxh, boxw, cur_y, box 166 scripts/kconfig/lxdialog/textbox.c refresh_text_box(dialog, box, boxh, boxw, cur_y, box 176 scripts/kconfig/lxdialog/textbox.c refresh_text_box(dialog, box, boxh, boxw, cur_y, box 186 scripts/kconfig/lxdialog/textbox.c refresh_text_box(dialog, box, boxh, boxw, cur_y, box 196 scripts/kconfig/lxdialog/textbox.c refresh_text_box(dialog, box, boxh, boxw, cur_y, box 212 scripts/kconfig/lxdialog/textbox.c refresh_text_box(dialog, box, boxh, boxw, cur_y, box 223 scripts/kconfig/lxdialog/textbox.c refresh_text_box(dialog, box, boxh, boxw, cur_y, box 232 scripts/kconfig/lxdialog/textbox.c delwin(box); box 245 scripts/kconfig/lxdialog/textbox.c delwin(box); box 465 scripts/kconfig/lxdialog/util.c chtype box, chtype border) box 478 scripts/kconfig/lxdialog/util.c waddch(win, box | ACS_URCORNER); box 480 scripts/kconfig/lxdialog/util.c waddch(win, box | ACS_LRCORNER); box 484 scripts/kconfig/lxdialog/util.c waddch(win, box | ACS_HLINE); box 488 scripts/kconfig/lxdialog/util.c waddch(win, box | ACS_VLINE); box 490 scripts/kconfig/lxdialog/util.c waddch(win, box | ' '); box 965 scripts/kconfig/nconf.c box(main_window, 0, 0); box 301 scripts/kconfig/nconf.gui.c box(win, 0, 0); box 411 scripts/kconfig/nconf.gui.c box(win, 0, 0); box 595 scripts/kconfig/nconf.gui.c box(win, 0, 0); box 68 sound/pci/ice1712/hoontech.c static void snd_ice1712_stdsp24_box_channel(struct snd_ice1712 *ice, int box, int chn, int activate) box 75 sound/pci/ice1712/hoontech.c ICE1712_STDSP24_0_BOX(spec->boxbits, box); box 118 sound/pci/ice1712/hoontech.c static void snd_ice1712_stdsp24_box_midi(struct snd_ice1712 *ice, int box, int master) box 125 sound/pci/ice1712/hoontech.c ICE1712_STDSP24_0_BOX(spec->boxbits, box); box 158 sound/pci/ice1712/hoontech.c int box, chn; box 234 sound/pci/ice1712/hoontech.c for (box = 0; box < 4; box++) { box 235 sound/pci/ice1712/hoontech.c if (spec->boxconfig[box] & ICE1712_STDSP24_BOX_MIDI2) box 238 sound/pci/ice1712/hoontech.c snd_ice1712_stdsp24_box_channel(ice, box, chn, box 239 sound/pci/ice1712/hoontech.c (spec->boxconfig[box] & (1 << chn)) ? 1 : 0); box 240 sound/pci/ice1712/hoontech.c if (spec->boxconfig[box] & ICE1712_STDSP24_BOX_MIDI1) box 241 sound/pci/ice1712/hoontech.c snd_ice1712_stdsp24_box_midi(ice, box, 1); box 291 tools/thermal/tmon/tui.c box(w, 0, 0); box 492 tools/thermal/tmon/tui.c box(cooling_device_window, 10, 0);