rcvd              505 drivers/infiniband/sw/siw/siw_cm.c 	int rcvd, to_rcv;
rcvd              508 drivers/infiniband/sw/siw/siw_cm.c 		rcvd = ksock_recv(s, (char *)hdr + cep->mpa.bytes_rcvd,
rcvd              511 drivers/infiniband/sw/siw/siw_cm.c 		if (rcvd <= 0)
rcvd              514 drivers/infiniband/sw/siw/siw_cm.c 		cep->mpa.bytes_rcvd += rcvd;
rcvd              539 drivers/infiniband/sw/siw/siw_cm.c 		rcvd = ksock_recv(s, (char *)&word, sizeof(word), MSG_DONTWAIT);
rcvd              540 drivers/infiniband/sw/siw/siw_cm.c 		if (rcvd == -EAGAIN)
rcvd              543 drivers/infiniband/sw/siw/siw_cm.c 		if (rcvd == 0) {
rcvd              547 drivers/infiniband/sw/siw/siw_cm.c 		if (rcvd < 0) {
rcvd              548 drivers/infiniband/sw/siw/siw_cm.c 			siw_dbg_cep(cep, "error: %d\n", rcvd);
rcvd              549 drivers/infiniband/sw/siw/siw_cm.c 			return rcvd;
rcvd              551 drivers/infiniband/sw/siw/siw_cm.c 		siw_dbg_cep(cep, "peer sent extra data: %d\n", rcvd);
rcvd              565 drivers/infiniband/sw/siw/siw_cm.c 	rcvd = ksock_recv(
rcvd              569 drivers/infiniband/sw/siw/siw_cm.c 	if (rcvd < 0)
rcvd              570 drivers/infiniband/sw/siw/siw_cm.c 		return rcvd;
rcvd              572 drivers/infiniband/sw/siw/siw_cm.c 	if (rcvd > to_rcv)
rcvd              575 drivers/infiniband/sw/siw/siw_cm.c 	cep->mpa.bytes_rcvd += rcvd;
rcvd              577 drivers/infiniband/sw/siw/siw_cm.c 	if (to_rcv == rcvd) {
rcvd              286 drivers/mtd/nand/raw/atmel/nand-controller.c 	u32 sr, rcvd;
rcvd              291 drivers/mtd/nand/raw/atmel/nand-controller.c 	rcvd = sr & (nc->op.wait | ATMEL_HSMC_NFC_SR_ERRORS);
rcvd              294 drivers/mtd/nand/raw/atmel/nand-controller.c 	if (rcvd)
rcvd              295 drivers/mtd/nand/raw/atmel/nand-controller.c 		regmap_write(nc->base.smc, ATMEL_HSMC_NFC_IDR, rcvd);
rcvd              300 drivers/mtd/nand/raw/atmel/nand-controller.c 	return rcvd ? IRQ_HANDLED : IRQ_NONE;
rcvd             1862 drivers/net/ethernet/brocade/bna/bnad.c 	int rcvd = 0;
rcvd             1869 drivers/net/ethernet/brocade/bna/bnad.c 	rcvd = bnad_cq_process(bnad, rx_ctrl->ccb, budget);
rcvd             1870 drivers/net/ethernet/brocade/bna/bnad.c 	if (rcvd >= budget)
rcvd             1871 drivers/net/ethernet/brocade/bna/bnad.c 		return rcvd;
rcvd             1874 drivers/net/ethernet/brocade/bna/bnad.c 	napi_complete_done(napi, rcvd);
rcvd             1881 drivers/net/ethernet/brocade/bna/bnad.c 	return rcvd;
rcvd             1703 drivers/tty/n_tty.c 	int room, n, rcvd = 0, overflow;
rcvd             1748 drivers/tty/n_tty.c 		rcvd += n;
rcvd             1765 drivers/tty/n_tty.c 	return rcvd;
rcvd             8556 fs/nfs/nfs4proc.c 	struct nfs4_channel_attrs *rcvd = &res->fc_attrs;
rcvd             8558 fs/nfs/nfs4proc.c 	if (rcvd->max_resp_sz > sent->max_resp_sz)
rcvd             8566 fs/nfs/nfs4proc.c 	if (rcvd->max_ops < sent->max_ops)
rcvd             8568 fs/nfs/nfs4proc.c 	if (rcvd->max_reqs == 0)
rcvd             8570 fs/nfs/nfs4proc.c 	if (rcvd->max_reqs > NFS4_MAX_SLOT_TABLE)
rcvd             8571 fs/nfs/nfs4proc.c 		rcvd->max_reqs = NFS4_MAX_SLOT_TABLE;
rcvd             8579 fs/nfs/nfs4proc.c 	struct nfs4_channel_attrs *rcvd = &res->bc_attrs;
rcvd             8583 fs/nfs/nfs4proc.c 	if (rcvd->max_rqst_sz > sent->max_rqst_sz)
rcvd             8585 fs/nfs/nfs4proc.c 	if (rcvd->max_resp_sz < sent->max_resp_sz)
rcvd             8587 fs/nfs/nfs4proc.c 	if (rcvd->max_resp_sz_cached > sent->max_resp_sz_cached)
rcvd             8589 fs/nfs/nfs4proc.c 	if (rcvd->max_ops > sent->max_ops)
rcvd             8591 fs/nfs/nfs4proc.c 	if (rcvd->max_reqs > sent->max_reqs)
rcvd              491 samples/bpf/xdpsock_user.c 	unsigned int rcvd;
rcvd              504 samples/bpf/xdpsock_user.c 	rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq);
rcvd              505 samples/bpf/xdpsock_user.c 	if (rcvd > 0) {
rcvd              509 samples/bpf/xdpsock_user.c 		ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
rcvd              510 samples/bpf/xdpsock_user.c 		while (ret != rcvd) {
rcvd              515 samples/bpf/xdpsock_user.c 			ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
rcvd              518 samples/bpf/xdpsock_user.c 		for (i = 0; i < rcvd; i++)
rcvd              522 samples/bpf/xdpsock_user.c 		xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
rcvd              523 samples/bpf/xdpsock_user.c 		xsk_ring_cons__release(&xsk->umem->cq, rcvd);
rcvd              524 samples/bpf/xdpsock_user.c 		xsk->outstanding_tx -= rcvd;
rcvd              525 samples/bpf/xdpsock_user.c 		xsk->tx_npkts += rcvd;
rcvd              531 samples/bpf/xdpsock_user.c 	unsigned int rcvd;
rcvd              540 samples/bpf/xdpsock_user.c 	rcvd = xsk_ring_cons__peek(&xsk->umem->cq, BATCH_SIZE, &idx);
rcvd              541 samples/bpf/xdpsock_user.c 	if (rcvd > 0) {
rcvd              542 samples/bpf/xdpsock_user.c 		xsk_ring_cons__release(&xsk->umem->cq, rcvd);
rcvd              543 samples/bpf/xdpsock_user.c 		xsk->outstanding_tx -= rcvd;
rcvd              544 samples/bpf/xdpsock_user.c 		xsk->tx_npkts += rcvd;
rcvd              550 samples/bpf/xdpsock_user.c 	unsigned int rcvd, i;
rcvd              554 samples/bpf/xdpsock_user.c 	rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
rcvd              555 samples/bpf/xdpsock_user.c 	if (!rcvd) {
rcvd              561 samples/bpf/xdpsock_user.c 	ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
rcvd              562 samples/bpf/xdpsock_user.c 	while (ret != rcvd) {
rcvd              567 samples/bpf/xdpsock_user.c 		ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
rcvd              570 samples/bpf/xdpsock_user.c 	for (i = 0; i < rcvd; i++) {
rcvd              582 samples/bpf/xdpsock_user.c 	xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
rcvd              583 samples/bpf/xdpsock_user.c 	xsk_ring_cons__release(&xsk->rx, rcvd);
rcvd              584 samples/bpf/xdpsock_user.c 	xsk->rx_npkts += rcvd;
rcvd              663 samples/bpf/xdpsock_user.c 	unsigned int rcvd, i;
rcvd              669 samples/bpf/xdpsock_user.c 	rcvd = xsk_ring_cons__peek(&xsk->rx, BATCH_SIZE, &idx_rx);
rcvd              670 samples/bpf/xdpsock_user.c 	if (!rcvd) {
rcvd              676 samples/bpf/xdpsock_user.c 	ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
rcvd              677 samples/bpf/xdpsock_user.c 	while (ret != rcvd) {
rcvd              682 samples/bpf/xdpsock_user.c 		ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
rcvd              685 samples/bpf/xdpsock_user.c 	for (i = 0; i < rcvd; i++) {
rcvd              700 samples/bpf/xdpsock_user.c 	xsk_ring_prod__submit(&xsk->tx, rcvd);
rcvd              701 samples/bpf/xdpsock_user.c 	xsk_ring_cons__release(&xsk->rx, rcvd);
rcvd              703 samples/bpf/xdpsock_user.c 	xsk->rx_npkts += rcvd;
rcvd              704 samples/bpf/xdpsock_user.c 	xsk->outstanding_tx += rcvd;