Lines Matching refs:bcp
128 struct bau_control *bcp; in set_bau_on() local
136 bcp = &per_cpu(bau_control, cpu); in set_bau_on()
137 bcp->nobau = 0; in set_bau_on()
147 struct bau_control *bcp; in set_bau_off() local
151 bcp = &per_cpu(bau_control, cpu); in set_bau_off()
152 bcp->nobau = 1; in set_bau_off()
195 static void reply_to_message(struct msg_desc *mdp, struct bau_control *bcp, in reply_to_message() argument
214 struct bau_control *bcp) in bau_process_retry_msg() argument
222 struct ptc_stats *stat = bcp->statp; in bau_process_retry_msg()
270 static void bau_process_message(struct msg_desc *mdp, struct bau_control *bcp, in bau_process_message() argument
276 struct ptc_stats *stat = bcp->statp; in bau_process_message()
278 struct bau_control *smaster = bcp->socket_master; in bau_process_message()
298 if (msg->msg_type == MSG_RETRY && bcp == bcp->uvhub_master) in bau_process_message()
299 bau_process_retry_msg(mdp, bcp); in bau_process_message()
310 if (socket_ack_count == bcp->cpus_in_socket) { in bau_process_message()
320 if (msg_ack_count == bcp->cpus_in_uvhub) { in bau_process_message()
325 reply_to_message(mdp, bcp, do_acknowledge); in bau_process_message()
361 struct bau_control *bcp = &per_cpu(bau_control, smp_processor_id()); in do_reset() local
364 struct ptc_stats *stat = bcp->statp; in do_reset()
373 for (msg = bcp->queue_first, i = 0; i < DEST_Q_SIZE; msg++, i++) { in do_reset()
407 static void reset_with_ipi(struct pnmask *distribution, struct bau_control *bcp) in reset_with_ipi() argument
412 int sender = bcp->cpu; in reset_with_ipi()
413 cpumask_t *mask = bcp->uvhub_master->cpumask; in reset_with_ipi()
414 struct bau_control *smaster = bcp->socket_master; in reset_with_ipi()
426 apnode = pnode + bcp->partition_base_pnode; in reset_with_ipi()
515 struct bau_control *bcp, long try) in uv1_wait_completion() argument
519 struct ptc_stats *stat = bcp->statp; in uv1_wait_completion()
543 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { in uv1_wait_completion()
544 bcp->conseccompletes = 0; in uv1_wait_completion()
548 bcp->conseccompletes = 0; in uv1_wait_completion()
558 bcp->conseccompletes++; in uv1_wait_completion()
583 int normal_busy(struct bau_control *bcp) in normal_busy() argument
585 int cpu = bcp->uvhub_cpu; in normal_busy()
600 int handle_uv2_busy(struct bau_control *bcp) in handle_uv2_busy() argument
602 struct ptc_stats *stat = bcp->statp; in handle_uv2_busy()
605 bcp->busy = 1; in handle_uv2_busy()
611 struct bau_control *bcp, long try) in uv2_3_wait_completion() argument
615 int desc = bcp->uvhub_cpu; in uv2_3_wait_completion()
617 struct ptc_stats *stat = bcp->statp; in uv2_3_wait_completion()
645 if (cycles_2_us(ttm - bcp->send_message) < timeout_us) { in uv2_3_wait_completion()
646 bcp->conseccompletes = 0; in uv2_3_wait_completion()
652 bcp->conseccompletes = 0; in uv2_3_wait_completion()
661 if ((ttm - bcp->send_message) > bcp->timeout_interval) in uv2_3_wait_completion()
662 return handle_uv2_busy(bcp); in uv2_3_wait_completion()
671 bcp->conseccompletes++; in uv2_3_wait_completion()
680 static int wait_completion(struct bau_desc *bau_desc, struct bau_control *bcp, long try) in wait_completion() argument
684 int desc = bcp->uvhub_cpu; in wait_completion()
694 if (bcp->uvhub_version == 1) in wait_completion()
695 return uv1_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try); in wait_completion()
697 return uv2_3_wait_completion(bau_desc, mmr_offset, right_shift, bcp, try); in wait_completion()
706 struct bau_control *bcp, in destination_plugged() argument
709 udelay(bcp->plugged_delay); in destination_plugged()
710 bcp->plugged_tries++; in destination_plugged()
712 if (bcp->plugged_tries >= bcp->plugsb4reset) { in destination_plugged()
713 bcp->plugged_tries = 0; in destination_plugged()
718 reset_with_ipi(&bau_desc->distribution, bcp); in destination_plugged()
723 bcp->ipi_attempts++; in destination_plugged()
729 struct bau_control *bcp, struct bau_control *hmaster, in destination_timeout() argument
733 bcp->timeout_tries++; in destination_timeout()
734 if (bcp->timeout_tries >= bcp->timeoutsb4reset) { in destination_timeout()
735 bcp->timeout_tries = 0; in destination_timeout()
740 reset_with_ipi(&bau_desc->distribution, bcp); in destination_timeout()
745 bcp->ipi_attempts++; in destination_timeout()
754 static void disable_for_period(struct bau_control *bcp, struct ptc_stats *stat) in disable_for_period() argument
761 hmaster = bcp->uvhub_master; in disable_for_period()
763 if (!bcp->baudisabled) { in disable_for_period()
771 tm1 + bcp->disabled_period; in disable_for_period()
778 static void count_max_concurr(int stat, struct bau_control *bcp, in count_max_concurr() argument
781 bcp->plugged_tries = 0; in count_max_concurr()
782 bcp->timeout_tries = 0; in count_max_concurr()
785 if (bcp->conseccompletes <= bcp->complete_threshold) in count_max_concurr()
793 struct bau_control *bcp, struct ptc_stats *stat, in record_send_stats() argument
803 bcp->period_requests++; in record_send_stats()
804 bcp->period_time += elapsed; in record_send_stats()
806 (bcp->period_requests > bcp->cong_reps) && in record_send_stats()
807 ((bcp->period_time / bcp->period_requests) > in record_send_stats()
810 disable_for_period(bcp, stat); in record_send_stats()
820 if (get_cycles() > bcp->period_end) in record_send_stats()
821 bcp->period_giveups = 0; in record_send_stats()
822 bcp->period_giveups++; in record_send_stats()
823 if (bcp->period_giveups == 1) in record_send_stats()
824 bcp->period_end = get_cycles() + bcp->disabled_period; in record_send_stats()
825 if (bcp->period_giveups > bcp->giveup_limit) { in record_send_stats()
826 disable_for_period(bcp, stat); in record_send_stats()
854 struct bau_control *bcp, struct bau_control *hmaster, in handle_cmplt() argument
858 destination_plugged(bau_desc, bcp, hmaster, stat); in handle_cmplt()
860 destination_timeout(bau_desc, bcp, hmaster, stat); in handle_cmplt()
873 int uv_flush_send_and_wait(struct cpumask *flush_mask, struct bau_control *bcp, in uv_flush_send_and_wait() argument
883 struct ptc_stats *stat = bcp->statp; in uv_flush_send_and_wait()
884 struct bau_control *hmaster = bcp->uvhub_master; in uv_flush_send_and_wait()
888 if (bcp->uvhub_version == 1) { in uv_flush_send_and_wait()
909 seq_number = bcp->message_number++; in uv_flush_send_and_wait()
922 index = (1UL << AS_PUSH_SHIFT) | bcp->uvhub_cpu; in uv_flush_send_and_wait()
923 bcp->send_message = get_cycles(); in uv_flush_send_and_wait()
928 completion_stat = wait_completion(bau_desc, bcp, try); in uv_flush_send_and_wait()
930 handle_cmplt(completion_stat, bau_desc, bcp, hmaster, stat); in uv_flush_send_and_wait()
932 if (bcp->ipi_attempts >= bcp->ipi_reset_limit) { in uv_flush_send_and_wait()
933 bcp->ipi_attempts = 0; in uv_flush_send_and_wait()
944 count_max_concurr(completion_stat, bcp, hmaster); in uv_flush_send_and_wait()
951 record_send_stats(time1, time2, bcp, stat, completion_stat, try); in uv_flush_send_and_wait()
964 static int check_enable(struct bau_control *bcp, struct ptc_stats *stat) in check_enable() argument
970 hmaster = bcp->uvhub_master; in check_enable()
972 if (bcp->baudisabled && (get_cycles() >= bcp->set_bau_on_time)) { in check_enable()
1024 static int set_distrib_bits(struct cpumask *flush_mask, struct bau_control *bcp, in set_distrib_bits() argument
1039 hpp = &bcp->socket_master->thp[cpu]; in set_distrib_bits()
1040 pnode = hpp->pnode - bcp->partition_base_pnode; in set_distrib_bits()
1043 if (hpp->uvhub == bcp->uvhub) in set_distrib_bits()
1090 struct bau_control *bcp; in uv_flush_tlb_others() local
1094 bcp = &per_cpu(bau_control, cpu); in uv_flush_tlb_others()
1096 if (bcp->nobau) in uv_flush_tlb_others()
1099 stat = bcp->statp; in uv_flush_tlb_others()
1102 if (bcp->busy) { in uv_flush_tlb_others()
1105 status = ((descriptor_status >> (bcp->uvhub_cpu * in uv_flush_tlb_others()
1109 bcp->busy = 0; in uv_flush_tlb_others()
1113 if (bcp->baudisabled) { in uv_flush_tlb_others()
1114 if (check_enable(bcp, stat)) { in uv_flush_tlb_others()
1132 bau_desc = bcp->descriptor_base; in uv_flush_tlb_others()
1133 bau_desc += (ITEMS_PER_DESC * bcp->uvhub_cpu); in uv_flush_tlb_others()
1135 if (set_distrib_bits(flush_mask, bcp, bau_desc, &locals, &remotes)) in uv_flush_tlb_others()
1149 if (!uv_flush_send_and_wait(flush_mask, bcp, bau_desc)) in uv_flush_tlb_others()
1160 struct bau_control *bcp) in find_another_by_swack() argument
1165 if (msg_next > bcp->queue_last) in find_another_by_swack()
1166 msg_next = bcp->queue_first; in find_another_by_swack()
1172 if (msg_next > bcp->queue_last) in find_another_by_swack()
1173 msg_next = bcp->queue_first; in find_another_by_swack()
1183 void process_uv2_message(struct msg_desc *mdp, struct bau_control *bcp) in process_uv2_message() argument
1203 other_msg = find_another_by_swack(msg, bcp); in process_uv2_message()
1209 bau_process_message(mdp, bcp, 0); in process_uv2_message()
1223 bau_process_message(mdp, bcp, 1); in process_uv2_message()
1247 struct bau_control *bcp; in uv_bau_message_interrupt() local
1254 bcp = &per_cpu(bau_control, smp_processor_id()); in uv_bau_message_interrupt()
1255 stat = bcp->statp; in uv_bau_message_interrupt()
1257 msgdesc.queue_first = bcp->queue_first; in uv_bau_message_interrupt()
1258 msgdesc.queue_last = bcp->queue_last; in uv_bau_message_interrupt()
1260 msg = bcp->bau_msg_head; in uv_bau_message_interrupt()
1266 if (bcp->uvhub_version == 2) in uv_bau_message_interrupt()
1267 process_uv2_message(&msgdesc, bcp); in uv_bau_message_interrupt()
1270 bau_process_message(&msgdesc, bcp, 1); in uv_bau_message_interrupt()
1275 bcp->bau_msg_head = msg; in uv_bau_message_interrupt()
1365 struct bau_control *bcp; in ptc_seq_show() local
1384 bcp = &per_cpu(bau_control, cpu); in ptc_seq_show()
1385 if (bcp->nobau) { in ptc_seq_show()
1389 stat = bcp->statp; in ptc_seq_show()
1393 cpu, bcp->nobau, stat->s_requestor, in ptc_seq_show()
1524 static int parse_tunables_write(struct bau_control *bcp, char *instr, in parse_tunables_write() argument
1558 if (val < 1 || val > bcp->cpus_in_uvhub) { in parse_tunables_write()
1589 struct bau_control *bcp; in tunables_write() local
1599 bcp = &per_cpu(bau_control, cpu); in tunables_write()
1600 ret = parse_tunables_write(bcp, instr, count); in tunables_write()
1606 bcp = &per_cpu(bau_control, cpu); in tunables_write()
1607 bcp->max_concurr = max_concurr; in tunables_write()
1608 bcp->max_concurr_const = max_concurr; in tunables_write()
1609 bcp->plugged_delay = plugged_delay; in tunables_write()
1610 bcp->plugsb4reset = plugsb4reset; in tunables_write()
1611 bcp->timeoutsb4reset = timeoutsb4reset; in tunables_write()
1612 bcp->ipi_reset_limit = ipi_reset_limit; in tunables_write()
1613 bcp->complete_threshold = complete_threshold; in tunables_write()
1614 bcp->cong_response_us = congested_respns_us; in tunables_write()
1615 bcp->cong_reps = congested_reps; in tunables_write()
1616 bcp->disabled_period = sec_2_cycles(disabled_period); in tunables_write()
1617 bcp->giveup_limit = giveup_limit; in tunables_write()
1701 struct bau_control *bcp; in activation_descriptor_init() local
1761 bcp = &per_cpu(bau_control, cpu); in activation_descriptor_init()
1762 bcp->descriptor_base = bau_desc; in activation_descriptor_init()
1783 struct bau_control *bcp; in pq_init() local
1797 bcp = &per_cpu(bau_control, cpu); in pq_init()
1798 bcp->queue_first = pqp; in pq_init()
1799 bcp->bau_msg_head = pqp; in pq_init()
1800 bcp->queue_last = pqp + (DEST_Q_SIZE - 1); in pq_init()
1883 struct bau_control *bcp; in init_per_cpu_tunables() local
1886 bcp = &per_cpu(bau_control, cpu); in init_per_cpu_tunables()
1887 bcp->baudisabled = 0; in init_per_cpu_tunables()
1889 bcp->nobau = 1; in init_per_cpu_tunables()
1890 bcp->statp = &per_cpu(ptcstats, cpu); in init_per_cpu_tunables()
1892 bcp->timeout_interval = usec_2_cycles(2*timeout_us); in init_per_cpu_tunables()
1893 bcp->max_concurr = max_concurr; in init_per_cpu_tunables()
1894 bcp->max_concurr_const = max_concurr; in init_per_cpu_tunables()
1895 bcp->plugged_delay = plugged_delay; in init_per_cpu_tunables()
1896 bcp->plugsb4reset = plugsb4reset; in init_per_cpu_tunables()
1897 bcp->timeoutsb4reset = timeoutsb4reset; in init_per_cpu_tunables()
1898 bcp->ipi_reset_limit = ipi_reset_limit; in init_per_cpu_tunables()
1899 bcp->complete_threshold = complete_threshold; in init_per_cpu_tunables()
1900 bcp->cong_response_us = congested_respns_us; in init_per_cpu_tunables()
1901 bcp->cong_reps = congested_reps; in init_per_cpu_tunables()
1902 bcp->disabled_period = sec_2_cycles(disabled_period); in init_per_cpu_tunables()
1903 bcp->giveup_limit = giveup_limit; in init_per_cpu_tunables()
1904 spin_lock_init(&bcp->queue_lock); in init_per_cpu_tunables()
1905 spin_lock_init(&bcp->uvhub_lock); in init_per_cpu_tunables()
1906 spin_lock_init(&bcp->disable_lock); in init_per_cpu_tunables()
1921 struct bau_control *bcp; in get_cpu_topology() local
1926 bcp = &per_cpu(bau_control, cpu); in get_cpu_topology()
1928 memset(bcp, 0, sizeof(struct bau_control)); in get_cpu_topology()
1938 bcp->osnode = cpu_to_node(cpu); in get_cpu_topology()
1939 bcp->partition_base_pnode = base_pnode; in get_cpu_topology()
1951 socket = bcp->osnode & 1; in get_cpu_topology()
2002 struct bau_control *bcp; in scan_sock() local
2006 bcp = &per_cpu(bau_control, cpu); in scan_sock()
2007 bcp->cpu = cpu; in scan_sock()
2009 *smasterp = bcp; in scan_sock()
2011 *hmasterp = bcp; in scan_sock()
2013 bcp->cpus_in_uvhub = bdp->num_cpus; in scan_sock()
2014 bcp->cpus_in_socket = sdp->num_cpus; in scan_sock()
2015 bcp->socket_master = *smasterp; in scan_sock()
2016 bcp->uvhub = bdp->uvhub; in scan_sock()
2018 bcp->uvhub_version = 1; in scan_sock()
2020 bcp->uvhub_version = 2; in scan_sock()
2022 bcp->uvhub_version = 3; in scan_sock()
2027 bcp->uvhub_master = *hmasterp; in scan_sock()
2028 bcp->uvhub_cpu = uv_cpu_hub_info(cpu)->blade_processor_id; in scan_sock()
2029 if (bcp->uvhub_cpu >= MAX_CPUS_PER_UVHUB) { in scan_sock()
2031 bcp->uvhub_cpu); in scan_sock()