inbound            92 arch/arm/common/mcpm_entry.c 	sync_cache_r(&c->inbound);
inbound            93 arch/arm/common/mcpm_entry.c 	if (c->inbound == INBOUND_COMING_UP)
inbound           437 arch/arm/common/mcpm_entry.c 		mcpm_sync.clusters[i].inbound = INBOUND_NOT_COMING_UP;
inbound           294 arch/arm/include/asm/mcpm.h 	s8 inbound __aligned(__CACHE_WRITEBACK_GRANULE);
inbound          4747 drivers/block/rbd.c 			     void *inbound,
inbound          4784 drivers/block/rbd.c 		memcpy(inbound, page_address(reply_page), inbound_size);
inbound            74 drivers/hv/channel.c 	hv_ringbuffer_cleanup(&channel->inbound);
inbound           146 drivers/hv/channel.c 	err = hv_ringbuffer_init(&newchannel->inbound,
inbound           233 drivers/hv/channel.c 	hv_ringbuffer_cleanup(&newchannel->inbound);
inbound           380 drivers/hv/connection.c 		if (likely(hv_end_read(&channel->inbound) == 0))
inbound           383 drivers/hv/connection.c 		hv_begin_read(&channel->inbound);
inbound           187 drivers/hv/ring_buffer.c 	mutex_init(&channel->inbound.ring_buffer_mutex);
inbound           396 drivers/hv/ring_buffer.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
inbound           420 drivers/hv/ring_buffer.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
inbound           467 drivers/hv/ring_buffer.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
inbound           427 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_debug_info inbound;
inbound           433 drivers/hv/vmbus_drv.c 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
inbound           437 drivers/hv/vmbus_drv.c 	return sprintf(buf, "%d\n", inbound.current_interrupt_mask);
inbound           445 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_debug_info inbound;
inbound           451 drivers/hv/vmbus_drv.c 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
inbound           455 drivers/hv/vmbus_drv.c 	return sprintf(buf, "%d\n", inbound.current_read_index);
inbound           463 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_debug_info inbound;
inbound           469 drivers/hv/vmbus_drv.c 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
inbound           473 drivers/hv/vmbus_drv.c 	return sprintf(buf, "%d\n", inbound.current_write_index);
inbound           482 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_debug_info inbound;
inbound           488 drivers/hv/vmbus_drv.c 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
inbound           492 drivers/hv/vmbus_drv.c 	return sprintf(buf, "%d\n", inbound.bytes_avail_toread);
inbound           501 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_debug_info inbound;
inbound           507 drivers/hv/vmbus_drv.c 	ret = hv_ringbuffer_get_debuginfo(&hv_dev->channel->inbound, &inbound);
inbound           511 drivers/hv/vmbus_drv.c 	return sprintf(buf, "%d\n", inbound.bytes_avail_towrite);
inbound          1226 drivers/hv/vmbus_drv.c 				hv_begin_read(&channel->inbound);
inbound          1578 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
inbound          1595 drivers/hv/vmbus_drv.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
inbound          1327 drivers/net/hyperv/netvsc.c 	    (ret || hv_end_read(&channel->inbound)) &&
inbound          1329 drivers/net/hyperv/netvsc.c 		hv_begin_read(&channel->inbound);
inbound          1344 drivers/net/hyperv/netvsc.c 	struct hv_ring_buffer_info *rbi = &channel->inbound;
inbound           166 drivers/net/hyperv/netvsc_drv.c 			aread = hv_get_bytes_to_read(&chn->inbound);
inbound           107 drivers/net/ppp/ppp_async.c 			   int len, int inbound);
inbound           951 drivers/net/ppp/ppp_async.c 			   int len, int inbound)
inbound           967 drivers/net/ppp/ppp_async.c 	if (code == (inbound? CONFACK: CONFREQ)) {
inbound           976 drivers/net/ppp/ppp_async.c 		if (!inbound) {
inbound           987 drivers/net/ppp/ppp_async.c 	} else if (inbound)
inbound           998 drivers/net/ppp/ppp_async.c 			if (inbound)
inbound          1005 drivers/net/ppp/ppp_async.c 			if (inbound)
inbound           274 drivers/net/ppp/ppp_generic.c static void ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound);
inbound          2810 drivers/net/ppp/ppp_generic.c ppp_ccp_peek(struct ppp *ppp, struct sk_buff *skb, int inbound)
inbound          2829 drivers/net/ppp/ppp_generic.c 		if(inbound)
inbound          2857 drivers/net/ppp/ppp_generic.c 		if (inbound) {
inbound          2880 drivers/net/ppp/ppp_generic.c 		if (inbound) {
inbound           324 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(csw-inbound-dirty,			0x16),
inbound           468 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(cswlf-inbound-snoop-fifo-backpressure, 0x2a),
inbound           470 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(cswlf-inbound-gack-fifo-backpressure, 0x2c),
inbound           472 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(cswlf-inbound-data-fifo-backpressure, 0x2e),
inbound           473 drivers/perf/xgene_pmu.c 	XGENE_PMU_EVENT_ATTR(cswlf-inbound-req-backpressure,	0x2f),
inbound          9188 drivers/scsi/lpfc/lpfc_sli.c 	int inbound = 0; /* number of sg reply entries inbound from firmware */
inbound          9235 drivers/scsi/lpfc/lpfc_sli.c 					inbound++;
inbound          9237 drivers/scsi/lpfc/lpfc_sli.c 				if (inbound == 1)
inbound          19717 drivers/scsi/lpfc/lpfc_sli.c 	int inbound = 0; /* number of sg reply entries inbound from firmware */
inbound          19768 drivers/scsi/lpfc/lpfc_sli.c 					inbound++;
inbound          19770 drivers/scsi/lpfc/lpfc_sli.c 				if (inbound == 1)
inbound           147 drivers/soc/qcom/smp2p.c 	struct list_head inbound;
inbound           202 drivers/soc/qcom/smp2p.c 		list_for_each_entry(entry, &smp2p->inbound, node) {
inbound           213 drivers/soc/qcom/smp2p.c 	list_for_each_entry(entry, &smp2p->inbound, node) {
inbound           455 drivers/soc/qcom/smp2p.c 	INIT_LIST_HEAD(&smp2p->inbound);
inbound           519 drivers/soc/qcom/smp2p.c 			list_add(&entry->node, &smp2p->inbound);
inbound           545 drivers/soc/qcom/smp2p.c 	list_for_each_entry(entry, &smp2p->inbound, node)
inbound           568 drivers/soc/qcom/smp2p.c 	list_for_each_entry(entry, &smp2p->inbound, node)
inbound            84 drivers/uio/uio_hv_generic.c 	dev->channel->inbound.ring_buffer->interrupt_mask = !irq_state;
inbound            99 drivers/uio/uio_hv_generic.c 	chan->inbound.ring_buffer->interrupt_mask = 1;
inbound           168 drivers/uio/uio_hv_generic.c 	new_sc->inbound.ring_buffer->interrupt_mask = 1;
inbound           213 drivers/uio/uio_hv_generic.c 		dev->channel->inbound.ring_buffer->interrupt_mask = 1;
inbound           744 include/linux/hyperv.h 	struct hv_ring_buffer_info inbound;	/* receive from parent */
inbound            79 include/net/sctp/ulpevent.h 	__u16 inbound,
inbound           117 net/sctp/ulpevent.c 	__u16 inbound, struct sctp_chunk *chunk, gfp_t gfp)
inbound           213 net/sctp/ulpevent.c 	sac->sac_inbound_streams = inbound;
inbound           180 net/vmw_vsock/hyperv_transport.c 	u32 readable = hv_get_bytes_to_read(&chan->inbound);
inbound           188 net/vmw_vsock/hyperv_transport.c 	u32 readable = hv_get_bytes_to_read(&chan->inbound);