hsr               159 arch/arm/include/asm/kvm_emulate.h 	return vcpu->arch.fault.hsr;
hsr               164 arch/arm/include/asm/kvm_emulate.h 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
hsr               166 arch/arm/include/asm/kvm_emulate.h 	if (hsr & HSR_CV)
hsr               167 arch/arm/include/asm/kvm_emulate.h 		return (hsr & HSR_COND) >> HSR_COND_SHIFT;
hsr                93 arch/arm/include/asm/kvm_host.h 	u32 hsr;		/* Hyp Syndrome Register */
hsr                82 arch/arm/kvm/handle_exit.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
hsr                85 arch/arm/kvm/handle_exit.c 		      hsr);
hsr                98 arch/arm/kvm/hyp/switch.c 	u32 hsr = read_sysreg(HSR);
hsr                99 arch/arm/kvm/hyp/switch.c 	u8 ec = hsr >> HSR_EC_SHIFT;
hsr               102 arch/arm/kvm/hyp/switch.c 	vcpu->arch.fault.hsr = hsr;
hsr               123 arch/arm/kvm/hyp/switch.c 	if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
hsr               143 arch/arm64/include/uapi/asm/kvm.h 	__u32 hsr;
hsr               122 arch/arm64/kvm/handle_exit.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
hsr               126 arch/arm64/kvm/handle_exit.c 	run->debug.arch.hsr = hsr;
hsr               128 arch/arm64/kvm/handle_exit.c 	switch (ESR_ELx_EC(hsr)) {
hsr               139 arch/arm64/kvm/handle_exit.c 			__func__, (unsigned int) hsr);
hsr               149 arch/arm64/kvm/handle_exit.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
hsr               152 arch/arm64/kvm/handle_exit.c 		      hsr, esr_get_class_string(hsr));
hsr               214 arch/arm64/kvm/handle_exit.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
hsr               215 arch/arm64/kvm/handle_exit.c 	u8 hsr_ec = ESR_ELx_EC(hsr);
hsr              2125 arch/arm64/kvm/sys_regs.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
hsr              2127 arch/arm64/kvm/sys_regs.c 	int Rt2 = (hsr >> 10) & 0x1f;
hsr              2131 arch/arm64/kvm/sys_regs.c 	params.CRm = (hsr >> 1) & 0xf;
hsr              2132 arch/arm64/kvm/sys_regs.c 	params.is_write = ((hsr & 1) == 0);
hsr              2135 arch/arm64/kvm/sys_regs.c 	params.Op1 = (hsr >> 16) & 0xf;
hsr              2182 arch/arm64/kvm/sys_regs.c 	u32 hsr = kvm_vcpu_get_hsr(vcpu);
hsr              2187 arch/arm64/kvm/sys_regs.c 	params.CRm = (hsr >> 1) & 0xf;
hsr              2189 arch/arm64/kvm/sys_regs.c 	params.is_write = ((hsr & 1) == 0);
hsr              2190 arch/arm64/kvm/sys_regs.c 	params.CRn = (hsr >> 10) & 0xf;
hsr              2192 arch/arm64/kvm/sys_regs.c 	params.Op1 = (hsr >> 14) & 0x7;
hsr              2193 arch/arm64/kvm/sys_regs.c 	params.Op2 = (hsr >> 17) & 0x7;
hsr               142 arch/arm64/kvm/trace.h 	TP_PROTO(unsigned long hsr),
hsr               143 arch/arm64/kvm/trace.h 	TP_ARGS(hsr),
hsr               146 arch/arm64/kvm/trace.h 		__field(unsigned long,	hsr)
hsr               150 arch/arm64/kvm/trace.h 		__entry->hsr = hsr;
hsr               153 arch/arm64/kvm/trace.h 	TP_printk("HSR 0x%08lx", __entry->hsr)
hsr               438 drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h 		DC_LOG_BANDWIDTH_CALCS("	[bw_fixed] hsr[%d]:%d", i, bw_fixed_to_int(data->hsr[i]));
hsr               470 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 			data->hsr[i] = data->hsr_after_stereo;
hsr               506 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 			if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) {
hsr               507 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 				if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) {
hsr               511 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 					if (bw_mtn(data->hsr[i], data->h_taps[i])) {
hsr               515 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 						if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) {
hsr               541 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 			if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) {
hsr               542 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 				data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]);
hsr               842 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 				data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2))));
hsr              1183 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 			data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]);
hsr              1243 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 						if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
hsr              1247 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 							data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
hsr              1443 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 				if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) {
hsr              1694 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 			if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) {
hsr              1698 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c 				data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1))));
hsr               402 drivers/gpu/drm/amd/display/dc/inc/dce_calcs.h 	struct bw_fixed hsr[maximum_number_of_surfaces];
hsr                65 net/hsr/hsr_device.c 	hsr_for_each_port(master->hsr, port)
hsr                83 net/hsr/hsr_device.c 	struct hsr_priv *hsr;
hsr                85 net/hsr/hsr_device.c 	hsr = netdev_priv(hsr_dev);
hsr                89 net/hsr/hsr_device.c 		hsr->announce_count = 0;
hsr                90 net/hsr/hsr_device.c 		mod_timer(&hsr->announce_timer,
hsr                96 net/hsr/hsr_device.c 		del_timer(&hsr->announce_timer);
hsr                99 net/hsr/hsr_device.c void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
hsr               105 net/hsr/hsr_device.c 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr               115 net/hsr/hsr_device.c int hsr_get_max_mtu(struct hsr_priv *hsr)
hsr               122 net/hsr/hsr_device.c 	hsr_for_each_port(hsr, port)
hsr               134 net/hsr/hsr_device.c 	struct hsr_priv *hsr;
hsr               137 net/hsr/hsr_device.c 	hsr = netdev_priv(dev);
hsr               138 net/hsr/hsr_device.c 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr               140 net/hsr/hsr_device.c 	if (new_mtu > hsr_get_max_mtu(hsr)) {
hsr               153 net/hsr/hsr_device.c 	struct hsr_priv *hsr;
hsr               157 net/hsr/hsr_device.c 	hsr = netdev_priv(dev);
hsr               161 net/hsr/hsr_device.c 	hsr_for_each_port(hsr, port) {
hsr               192 net/hsr/hsr_device.c static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
hsr               208 net/hsr/hsr_device.c 	hsr_for_each_port(hsr, port)
hsr               219 net/hsr/hsr_device.c 	struct hsr_priv *hsr = netdev_priv(dev);
hsr               221 net/hsr/hsr_device.c 	return hsr_features_recompute(hsr, features);
hsr               226 net/hsr/hsr_device.c 	struct hsr_priv *hsr = netdev_priv(dev);
hsr               229 net/hsr/hsr_device.c 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr               271 net/hsr/hsr_device.c 			    master->hsr->sup_multicast_addr,
hsr               289 net/hsr/hsr_device.c 	spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
hsr               291 net/hsr/hsr_device.c 		hsr_stag->sequence_nr = htons(master->hsr->sup_sequence_nr);
hsr               292 net/hsr/hsr_device.c 		hsr_tag->sequence_nr = htons(master->hsr->sequence_nr);
hsr               293 net/hsr/hsr_device.c 		master->hsr->sup_sequence_nr++;
hsr               294 net/hsr/hsr_device.c 		master->hsr->sequence_nr++;
hsr               296 net/hsr/hsr_device.c 		hsr_stag->sequence_nr = htons(master->hsr->sequence_nr);
hsr               297 net/hsr/hsr_device.c 		master->hsr->sequence_nr++;
hsr               299 net/hsr/hsr_device.c 	spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
hsr               325 net/hsr/hsr_device.c 	struct hsr_priv *hsr;
hsr               329 net/hsr/hsr_device.c 	hsr = from_timer(hsr, t, announce_timer);
hsr               332 net/hsr/hsr_device.c 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr               334 net/hsr/hsr_device.c 	if (hsr->announce_count < 3 && hsr->prot_version == 0) {
hsr               336 net/hsr/hsr_device.c 					   hsr->prot_version);
hsr               337 net/hsr/hsr_device.c 		hsr->announce_count++;
hsr               342 net/hsr/hsr_device.c 					   hsr->prot_version);
hsr               348 net/hsr/hsr_device.c 		mod_timer(&hsr->announce_timer, jiffies + interval);
hsr               359 net/hsr/hsr_device.c 	struct hsr_priv *hsr;
hsr               363 net/hsr/hsr_device.c 	hsr = netdev_priv(hsr_dev);
hsr               365 net/hsr/hsr_device.c 	hsr_debugfs_term(hsr);
hsr               367 net/hsr/hsr_device.c 	list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
hsr               370 net/hsr/hsr_device.c 	del_timer_sync(&hsr->prune_timer);
hsr               371 net/hsr/hsr_device.c 	del_timer_sync(&hsr->announce_timer);
hsr               373 net/hsr/hsr_device.c 	hsr_del_self_node(hsr);
hsr               374 net/hsr/hsr_device.c 	hsr_del_nodes(&hsr->node_db);
hsr               436 net/hsr/hsr_device.c 	struct hsr_priv *hsr;
hsr               441 net/hsr/hsr_device.c 	hsr = netdev_priv(hsr_dev);
hsr               442 net/hsr/hsr_device.c 	INIT_LIST_HEAD(&hsr->ports);
hsr               443 net/hsr/hsr_device.c 	INIT_LIST_HEAD(&hsr->node_db);
hsr               444 net/hsr/hsr_device.c 	INIT_LIST_HEAD(&hsr->self_node_db);
hsr               445 net/hsr/hsr_device.c 	spin_lock_init(&hsr->list_lock);
hsr               450 net/hsr/hsr_device.c 	res = hsr_create_self_node(hsr, hsr_dev->dev_addr,
hsr               455 net/hsr/hsr_device.c 	spin_lock_init(&hsr->seqnr_lock);
hsr               457 net/hsr/hsr_device.c 	hsr->sequence_nr = HSR_SEQNR_START;
hsr               458 net/hsr/hsr_device.c 	hsr->sup_sequence_nr = HSR_SUP_SEQNR_START;
hsr               460 net/hsr/hsr_device.c 	timer_setup(&hsr->announce_timer, hsr_announce, 0);
hsr               461 net/hsr/hsr_device.c 	timer_setup(&hsr->prune_timer, hsr_prune_nodes, 0);
hsr               463 net/hsr/hsr_device.c 	ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
hsr               464 net/hsr/hsr_device.c 	hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
hsr               466 net/hsr/hsr_device.c 	hsr->prot_version = protocol_version;
hsr               481 net/hsr/hsr_device.c 	res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
hsr               489 net/hsr/hsr_device.c 	res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
hsr               493 net/hsr/hsr_device.c 	res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
hsr               497 net/hsr/hsr_device.c 	hsr_debugfs_init(hsr, hsr_dev);
hsr               498 net/hsr/hsr_device.c 	mod_timer(&hsr->prune_timer, jiffies + msecs_to_jiffies(PRUNE_PERIOD));
hsr               505 net/hsr/hsr_device.c 	list_for_each_entry_safe(port, tmp, &hsr->ports, port_list)
hsr               508 net/hsr/hsr_device.c 	hsr_del_self_node(hsr);
hsr                17 net/hsr/hsr_device.h void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
hsr                19 net/hsr/hsr_device.h int hsr_get_max_mtu(struct hsr_priv *hsr);
hsr                45 net/hsr/hsr_forward.c static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
hsr                56 net/hsr/hsr_forward.c 			      hsr->sup_multicast_addr))
hsr                67 net/hsr/hsr_forward.c 		if (hsr_V1_hdr->hsr.encap_proto != htons(ETH_P_PRP))
hsr               175 net/hsr/hsr_forward.c 	hsr_fill_tag(skb, frame, port, port->hsr->prot_version);
hsr               247 net/hsr/hsr_forward.c 	hsr_for_each_port(frame->port_rcv->hsr, port) {
hsr               289 net/hsr/hsr_forward.c static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
hsr               292 net/hsr/hsr_forward.c 	if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
hsr               314 net/hsr/hsr_forward.c 	frame->is_supervision = is_supervision_frame(port->hsr, skb);
hsr               335 net/hsr/hsr_forward.c 		spin_lock_irqsave(&port->hsr->seqnr_lock, irqflags);
hsr               336 net/hsr/hsr_forward.c 		frame->sequence_nr = port->hsr->sequence_nr;
hsr               337 net/hsr/hsr_forward.c 		port->hsr->sequence_nr++;
hsr               338 net/hsr/hsr_forward.c 		spin_unlock_irqrestore(&port->hsr->seqnr_lock, irqflags);
hsr               342 net/hsr/hsr_forward.c 	check_local_dest(port->hsr, skb, frame);
hsr                41 net/hsr/hsr_framereg.c bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
hsr                45 net/hsr/hsr_framereg.c 	node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node,
hsr                78 net/hsr/hsr_framereg.c int hsr_create_self_node(struct hsr_priv *hsr,
hsr                82 net/hsr/hsr_framereg.c 	struct list_head *self_node_db = &hsr->self_node_db;
hsr                92 net/hsr/hsr_framereg.c 	spin_lock_bh(&hsr->list_lock);
hsr                97 net/hsr/hsr_framereg.c 		spin_unlock_bh(&hsr->list_lock);
hsr               101 net/hsr/hsr_framereg.c 		spin_unlock_bh(&hsr->list_lock);
hsr               107 net/hsr/hsr_framereg.c void hsr_del_self_node(struct hsr_priv *hsr)
hsr               109 net/hsr/hsr_framereg.c 	struct list_head *self_node_db = &hsr->self_node_db;
hsr               112 net/hsr/hsr_framereg.c 	spin_lock_bh(&hsr->list_lock);
hsr               118 net/hsr/hsr_framereg.c 	spin_unlock_bh(&hsr->list_lock);
hsr               134 net/hsr/hsr_framereg.c static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
hsr               158 net/hsr/hsr_framereg.c 	spin_lock_bh(&hsr->list_lock);
hsr               166 net/hsr/hsr_framereg.c 	spin_unlock_bh(&hsr->list_lock);
hsr               169 net/hsr/hsr_framereg.c 	spin_unlock_bh(&hsr->list_lock);
hsr               179 net/hsr/hsr_framereg.c 	struct list_head *node_db = &port->hsr->node_db;
hsr               180 net/hsr/hsr_framereg.c 	struct hsr_priv *hsr = port->hsr;
hsr               214 net/hsr/hsr_framereg.c 	return hsr_add_node(hsr, node_db, ethhdr->h_source, seq_out);
hsr               224 net/hsr/hsr_framereg.c 	struct hsr_priv *hsr = port_rcv->hsr;
hsr               246 net/hsr/hsr_framereg.c 	node_db = &port_rcv->hsr->node_db;
hsr               250 net/hsr/hsr_framereg.c 		node_real = hsr_add_node(hsr, node_db, hsr_sp->macaddress_A,
hsr               271 net/hsr/hsr_framereg.c 	spin_lock_bh(&hsr->list_lock);
hsr               273 net/hsr/hsr_framereg.c 	spin_unlock_bh(&hsr->list_lock);
hsr               318 net/hsr/hsr_framereg.c 	node_dst = find_node_by_addr_A(&port->hsr->node_db,
hsr               362 net/hsr/hsr_framereg.c static struct hsr_port *get_late_port(struct hsr_priv *hsr,
hsr               366 net/hsr/hsr_framereg.c 		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
hsr               368 net/hsr/hsr_framereg.c 		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
hsr               373 net/hsr/hsr_framereg.c 		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
hsr               377 net/hsr/hsr_framereg.c 		return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
hsr               387 net/hsr/hsr_framereg.c 	struct hsr_priv *hsr = from_timer(hsr, t, prune_timer);
hsr               394 net/hsr/hsr_framereg.c 	spin_lock_bh(&hsr->list_lock);
hsr               395 net/hsr/hsr_framereg.c 	list_for_each_entry_safe(node, tmp, &hsr->node_db, mac_list) {
hsr               401 net/hsr/hsr_framereg.c 		if (hsr_addr_is_self(hsr, node->macaddress_A))
hsr               428 net/hsr/hsr_framereg.c 			port = get_late_port(hsr, node);
hsr               430 net/hsr/hsr_framereg.c 				hsr_nl_ringerror(hsr, node->macaddress_A, port);
hsr               437 net/hsr/hsr_framereg.c 			hsr_nl_nodedown(hsr, node->macaddress_A);
hsr               443 net/hsr/hsr_framereg.c 	spin_unlock_bh(&hsr->list_lock);
hsr               446 net/hsr/hsr_framereg.c 	mod_timer(&hsr->prune_timer,
hsr               450 net/hsr/hsr_framereg.c void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
hsr               456 net/hsr/hsr_framereg.c 		node = list_first_or_null_rcu(&hsr->node_db,
hsr               464 net/hsr/hsr_framereg.c 	list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
hsr               472 net/hsr/hsr_framereg.c int hsr_get_node_data(struct hsr_priv *hsr,
hsr               485 net/hsr/hsr_framereg.c 	node = find_node_by_addr_A(&hsr->node_db, addr);
hsr               516 net/hsr/hsr_framereg.c 		port = hsr_port_get_hsr(hsr, node->addr_B_port);
hsr                15 net/hsr/hsr_framereg.h void hsr_del_self_node(struct hsr_priv *hsr);
hsr                21 net/hsr/hsr_framereg.h bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr);
hsr                34 net/hsr/hsr_framereg.h int hsr_create_self_node(struct hsr_priv *hsr,
hsr                38 net/hsr/hsr_framereg.h void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
hsr                41 net/hsr/hsr_framereg.h int hsr_get_node_data(struct hsr_priv *hsr,
hsr                23 net/hsr/hsr_main.c 	struct hsr_priv *hsr;
hsr                32 net/hsr/hsr_main.c 		hsr = netdev_priv(dev);
hsr                33 net/hsr/hsr_main.c 		port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr                39 net/hsr/hsr_main.c 		hsr = port->hsr;
hsr                46 net/hsr/hsr_main.c 		hsr_check_carrier_and_operstate(hsr);
hsr                61 net/hsr/hsr_main.c 		master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr                70 net/hsr/hsr_main.c 		port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
hsr                71 net/hsr/hsr_main.c 		res = hsr_create_self_node(hsr,
hsr                83 net/hsr/hsr_main.c 		mtu_max = hsr_get_max_mtu(port->hsr);
hsr                84 net/hsr/hsr_main.c 		master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
hsr               100 net/hsr/hsr_main.c struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
hsr               104 net/hsr/hsr_main.c 	hsr_for_each_port(hsr, port)
hsr               133 net/hsr/hsr_main.h 	struct hsr_tag		hsr;
hsr               149 net/hsr/hsr_main.h 	struct hsr_priv		*hsr;
hsr               173 net/hsr/hsr_main.h #define hsr_for_each_port(hsr, port) \
hsr               174 net/hsr/hsr_main.h 	list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
hsr               176 net/hsr/hsr_main.h struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
hsr                80 net/hsr/hsr_netlink.c 	struct hsr_priv *hsr;
hsr                84 net/hsr/hsr_netlink.c 	hsr = netdev_priv(dev);
hsr                89 net/hsr/hsr_netlink.c 	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
hsr                97 net/hsr/hsr_netlink.c 	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
hsr               105 net/hsr/hsr_netlink.c 		    hsr->sup_multicast_addr) ||
hsr               106 net/hsr/hsr_netlink.c 	    nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
hsr               146 net/hsr/hsr_netlink.c void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
hsr               181 net/hsr/hsr_netlink.c 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr               189 net/hsr/hsr_netlink.c void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
hsr               218 net/hsr/hsr_netlink.c 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr               240 net/hsr/hsr_netlink.c 	struct hsr_priv *hsr;
hsr               287 net/hsr/hsr_netlink.c 	hsr = netdev_priv(hsr_dev);
hsr               288 net/hsr/hsr_netlink.c 	res = hsr_get_node_data(hsr,
hsr               323 net/hsr/hsr_netlink.c 	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
hsr               336 net/hsr/hsr_netlink.c 	port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
hsr               372 net/hsr/hsr_netlink.c 	struct hsr_priv *hsr;
hsr               416 net/hsr/hsr_netlink.c 	hsr = netdev_priv(hsr_dev);
hsr               419 net/hsr/hsr_netlink.c 		pos = hsr_get_next_node(hsr, NULL, addr);
hsr               432 net/hsr/hsr_netlink.c 		pos = hsr_get_next_node(hsr, pos, addr);
hsr                21 net/hsr/hsr_netlink.h void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
hsr                23 net/hsr/hsr_netlink.h void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]);
hsr                33 net/hsr/hsr_slave.c 	if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
hsr               128 net/hsr/hsr_slave.c int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
hsr               140 net/hsr/hsr_slave.c 	port = hsr_port_get_hsr(hsr, type);
hsr               148 net/hsr/hsr_slave.c 	port->hsr = hsr;
hsr               158 net/hsr/hsr_slave.c 	list_add_tail_rcu(&port->port_list, &hsr->ports);
hsr               161 net/hsr/hsr_slave.c 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr               163 net/hsr/hsr_slave.c 	dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
hsr               174 net/hsr/hsr_slave.c 	struct hsr_priv *hsr;
hsr               177 net/hsr/hsr_slave.c 	hsr = port->hsr;
hsr               178 net/hsr/hsr_slave.c 	master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
hsr               184 net/hsr/hsr_slave.c 			dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
hsr                15 net/hsr/hsr_slave.h int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
hsr               712 sound/pci/asihpi/hpi6205.c 	u32 hsr = 0;
hsr               714 sound/pci/asihpi/hpi6205.c 	hsr = ioread32(phw->prHSR);
hsr               715 sound/pci/asihpi/hpi6205.c 	if (hsr & C6205_HSR_INTSRC) {
hsr               143 tools/arch/arm64/include/uapi/asm/kvm.h 	__u32 hsr;
hsr                53 virt/kvm/arm/trace.h 	TP_PROTO(unsigned long vcpu_pc, unsigned long hsr,
hsr                56 virt/kvm/arm/trace.h 	TP_ARGS(vcpu_pc, hsr, hxfar, ipa),
hsr                60 virt/kvm/arm/trace.h 		__field(	unsigned long,	hsr		)
hsr                67 virt/kvm/arm/trace.h 		__entry->hsr			= hsr;
hsr                73 virt/kvm/arm/trace.h 		  __entry->ipa, __entry->hsr,