rings 320 drivers/block/xen-blkback/common.h struct xen_blkif_ring *rings; rings 84 drivers/block/xen-blkback/xenbus.c if (!blkif->rings || !blkif->rings[0].irq || !blkif->vbd.bdev) rings 110 drivers/block/xen-blkback/xenbus.c ring = &blkif->rings[i]; rings 124 drivers/block/xen-blkback/xenbus.c ring = &blkif->rings[i]; rings 134 drivers/block/xen-blkback/xenbus.c blkif->rings = kcalloc(blkif->nr_rings, sizeof(struct xen_blkif_ring), rings 136 drivers/block/xen-blkback/xenbus.c if (!blkif->rings) rings 140 drivers/block/xen-blkback/xenbus.c struct xen_blkif_ring *ring = &blkif->rings[r]; rings 252 drivers/block/xen-blkback/xenbus.c struct xen_blkif_ring *ring = &blkif->rings[r]; rings 316 drivers/block/xen-blkback/xenbus.c kfree(blkif->rings); rings 317 drivers/block/xen-blkback/xenbus.c blkif->rings = NULL; rings 361 drivers/block/xen-blkback/xenbus.c if (!blkif->rings) \ rings 365 drivers/block/xen-blkback/xenbus.c struct xen_blkif_ring *ring = &blkif->rings[i]; \ rings 1096 drivers/block/xen-blkback/xenbus.c return read_per_ring_refs(&blkif->rings[0], dev->otherend); rings 1108 drivers/block/xen-blkback/xenbus.c err = read_per_ring_refs(&blkif->rings[i], xspath); rings 51 drivers/crypto/inside-secure/safexcel.c for (i = 0; i < priv->config.rings; i++) { rings 475 drivers/crypto/inside-secure/safexcel.c for (i = 0; i < priv->config.rings; i++) { rings 523 drivers/crypto/inside-secure/safexcel.c for (i = 0; i < priv->config.rings; i++) { rings 565 drivers/crypto/inside-secure/safexcel.c priv->config.pes, priv->config.rings); rings 627 drivers/crypto/inside-secure/safexcel.c GENMASK(priv->config.rings - 1, 0), rings 678 drivers/crypto/inside-secure/safexcel.c for (i = 0; i < priv->config.rings; i++) { rings 704 drivers/crypto/inside-secure/safexcel.c for (i = 0; i < priv->config.rings; i++) { rings 728 drivers/crypto/inside-secure/safexcel.c writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), rings 732 drivers/crypto/inside-secure/safexcel.c writel(EIP197_DxE_THR_CTRL_EN | GENMASK(priv->config.rings - 1, 0), rings 1257 drivers/crypto/inside-secure/safexcel.c priv->config.rings = min_t(u32, val & GENMASK(3, 0), max_rings); rings 1439 drivers/crypto/inside-secure/safexcel.c priv->config.rings + 1, rings 1440 drivers/crypto/inside-secure/safexcel.c priv->config.rings + 1, rings 1449 drivers/crypto/inside-secure/safexcel.c priv->ring = devm_kcalloc(dev, priv->config.rings, rings 1455 drivers/crypto/inside-secure/safexcel.c for (i = 0; i < priv->config.rings; i++) { rings 1535 drivers/crypto/inside-secure/safexcel.c for (i = 0; i < priv->config.rings; i++) { rings 1630 drivers/crypto/inside-secure/safexcel.c for (i = 0; i < priv->config.rings; i++) rings 1769 drivers/crypto/inside-secure/safexcel.c for (i = 0; i < priv->config.rings; i++) rings 600 drivers/crypto/inside-secure/safexcel.h u32 rings; rings 42 drivers/crypto/inside-secure/safexcel_ring.c return (atomic_inc_return(&priv->ring_used) % priv->config.rings); rings 277 drivers/crypto/qat/qat_common/adf_transport.c ring = &bank->rings[ring_num]; rings 340 drivers/crypto/qat/qat_common/adf_transport.c adf_handle_response(&bank->rings[i]); rings 413 drivers/crypto/qat/qat_common/adf_transport.c ring = &bank->rings[i]; rings 427 drivers/crypto/qat/qat_common/adf_transport.c tx_ring = &bank->rings[i - hw_data->tx_rx_gap]; rings 442 drivers/crypto/qat/qat_common/adf_transport.c ring = &bank->rings[i]; rings 516 drivers/crypto/qat/qat_common/adf_transport.c struct adf_etr_ring_data *ring = &bank->rings[i]; rings 208 drivers/crypto/qat/qat_common/adf_transport_debug.c struct adf_etr_ring_data *ring = &bank->rings[ring_id]; rings 76 drivers/crypto/qat/qat_common/adf_transport_internal.h struct adf_etr_ring_data rings[ADF_ETR_MAX_RINGS_PER_BANK]; rings 906 drivers/gpu/drm/amd/amdgpu/amdgpu.h struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; rings 124 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c struct amdgpu_ring *rings[AMDGPU_MAX_RINGS]; rings 131 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c rings[0] = &adev->gfx.gfx_ring[0]; rings 136 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c rings[j] = &adev->gfx.compute_ring[j]; rings 141 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c rings[j] = &adev->sdma.instance[j].ring; rings 145 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c rings[0] = &adev->uvd.inst[0].ring; rings 149 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c rings[0] = &adev->vce.ring[0]; rings 153 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c rings[0] = &adev->uvd.inst[0].ring_enc[0]; rings 160 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c rings[num_rings++] = &adev->vcn.inst[j].ring_dec; rings 168 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c rings[num_rings++] = &adev->vcn.inst[j].ring_enc[k]; rings 175 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c rings[num_rings++] = &adev->vcn.inst[j].ring_jpeg; rings 181 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c if (!rings[j]->adev) rings 184 drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c rqs[num_rqs++] = &rings[j]->sched.sched_rq[priority]; rings 867 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c struct amdgpu_ring *ring = adev->rings[i]; rings 883 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c struct amdgpu_ring *ring = adev->rings[i]; rings 1030 drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c ring = adev->rings[val]; rings 3560 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c struct amdgpu_ring *ring = adev->rings[i]; rings 3845 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c struct amdgpu_ring *ring = tmp_adev->rings[i]; rings 3918 drivers/gpu/drm/amd/amdgpu/amdgpu_device.c struct amdgpu_ring *ring = tmp_adev->rings[i]; rings 533 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c struct amdgpu_ring *ring = adev->rings[i]; rings 567 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c struct amdgpu_ring *ring = adev->rings[i]; rings 601 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c struct amdgpu_ring *ring = adev->rings[i]; rings 705 drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c struct amdgpu_ring *ring = adev->rings[i]; rings 358 drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c struct amdgpu_ring *ring = adev->rings[i]; rings 76 drivers/gpu/drm/amd/amdgpu/amdgpu_job.c (*job)->base.sched = &adev->rings[0]->sched; rings 3003 drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c struct amdgpu_ring *ring = adev->rings[i]; rings 261 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c adev->rings[ring->idx] = ring; rings 357 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c if (!(ring->adev) || !(ring->adev->rings[ring->idx])) rings 376 drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c ring->adev->rings[ring->idx] = NULL; rings 49 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c if (adev->rings[i]) rings 50 drivers/gpu/drm/amd/amdgpu/amdgpu_test.c n -= adev->rings[i]->ring_size; rings 966 drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c ring = adev->rings[i]; rings 558 drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c struct amdgpu_ring *ring = adev->rings[i]; rings 818 drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c ring = adev->rings[i]; rings 418 drivers/gpu/drm/i915/gvt/cmd_parser.c u16 rings; rings 653 drivers/gpu/drm/i915/gvt/cmd_parser.c if (opcode == e->info->opcode && e->info->rings & BIT(ring_id)) rings 3031 drivers/gpu/drm/i915/gvt/cmd_parser.c unsigned int opcode, unsigned long rings) rings 3036 drivers/gpu/drm/i915/gvt/cmd_parser.c for_each_set_bit(ring, &rings, I915_NUM_ENGINES) { rings 3063 drivers/gpu/drm/i915/gvt/cmd_parser.c e->info->opcode, e->info->rings); rings 3077 drivers/gpu/drm/i915/gvt/cmd_parser.c e->info->devices, e->info->rings); rings 295 drivers/mailbox/bcm-flexrm-mailbox.c struct flexrm_ring *rings; rings 941 drivers/mailbox/bcm-flexrm-mailbox.c ring = &mbox->rings[i]; rings 969 drivers/mailbox/bcm-flexrm-mailbox.c ring = &mbox->rings[i]; rings 1487 drivers/mailbox/bcm-flexrm-mailbox.c struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index]; rings 1548 drivers/mailbox/bcm-flexrm-mailbox.c mbox->rings = ring; rings 1553 drivers/mailbox/bcm-flexrm-mailbox.c ring = &mbox->rings[index]; rings 1612 drivers/mailbox/bcm-flexrm-mailbox.c ring = &mbox->rings[desc->platform.msi_index]; rings 1647 drivers/mailbox/bcm-flexrm-mailbox.c mbox->controller.chans[index].con_priv = &mbox->rings[index]; rings 1780 drivers/net/ethernet/intel/ice/ice_lib.c ice_vsi_cfg_txqs(struct ice_vsi *vsi, struct ice_ring **rings, int offset) rings 1800 drivers/net/ethernet/intel/ice/ice_lib.c err = ice_vsi_cfg_txq(vsi, rings[q_idx], i + offset, rings 2248 drivers/net/ethernet/intel/ice/ice_lib.c u16 rel_vmvf_num, struct ice_ring **rings) rings 2265 drivers/net/ethernet/intel/ice/ice_lib.c if (!rings || !rings[q_idx]) rings 2268 drivers/net/ethernet/intel/ice/ice_lib.c ice_fill_txq_meta(vsi, rings[q_idx], &txq_meta); rings 2271 drivers/net/ethernet/intel/ice/ice_lib.c rings[q_idx], &txq_meta); rings 103 drivers/net/ethernet/mellanox/mlx5/core/en/hv_vhca_stats.c block->rings = priv->max_nch; rings 28 drivers/net/ethernet/mellanox/mlx5/core/lib/hv_vhca.h u16 rings; rings 698 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 718 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 790 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 934 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 955 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 2213 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 2664 drivers/net/ethernet/neterion/s2io.c rxdp = mac_control->rings[ring_no]. rings 2696 drivers/net/ethernet/neterion/s2io.c mac_control->rings[ring_no].rx_bufs_left -= 1; rings 2717 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 2797 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 2851 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 2857 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 3765 drivers/net/ethernet/neterion/s2io.c nic->s2io_entries[j+1].arg = &nic->mac_control.rings[j]; rings 4706 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 4731 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 6879 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 7033 drivers/net/ethernet/neterion/s2io.c napi_disable(&sp->mac_control.rings[off].napi); rings 7117 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 7137 drivers/net/ethernet/neterion/s2io.c napi_enable(&sp->mac_control.rings[i].napi); rings 7437 drivers/net/ethernet/neterion/s2io.c sp->mac_control.rings[ring_no].rx_bufs_left -= 1; rings 7808 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 7914 drivers/net/ethernet/neterion/s2io.c struct ring_info *ring = &mac_control->rings[i]; rings 807 drivers/net/ethernet/neterion/s2io.h struct ring_info rings[MAX_RX_RINGS]; rings 1490 drivers/net/ethernet/via/via-velocity.c vptr->tx.rings[i] = pool; rings 1905 drivers/net/ethernet/via/via-velocity.c td = &(vptr->tx.rings[qnum][idx]); rings 2541 drivers/net/ethernet/via/via-velocity.c td_ptr = &(vptr->tx.rings[qnum][index]); rings 2603 drivers/net/ethernet/via/via-velocity.c td_ptr = &(vptr->tx.rings[qnum][prev]); rings 1448 drivers/net/ethernet/via/via-velocity.h struct tx_desc *rings[TX_QUEUE_NO]; rings 1295 drivers/net/tap.c struct ptr_ring **rings; rings 1299 drivers/net/tap.c rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); rings 1300 drivers/net/tap.c if (!rings) rings 1304 drivers/net/tap.c rings[i++] = &q->ring; rings 1306 drivers/net/tap.c ret = ptr_ring_resize_multiple(rings, n, rings 1310 drivers/net/tap.c kfree(rings); rings 3619 drivers/net/tun.c struct ptr_ring **rings; rings 3623 drivers/net/tun.c rings = kmalloc_array(n, sizeof(*rings), GFP_KERNEL); rings 3624 drivers/net/tun.c if (!rings) rings 3629 drivers/net/tun.c rings[i] = &tfile->tx_ring; rings 3632 drivers/net/tun.c rings[i++] = &tfile->tx_ring; rings 3634 drivers/net/tun.c ret = ptr_ring_resize_multiple(rings, n, rings 3638 drivers/net/tun.c kfree(rings); rings 282 drivers/net/wireless/ath/ath10k/htt.h struct htt_rx_ring_setup_ring32 rings[0]; rings 287 drivers/net/wireless/ath/ath10k/htt.h struct htt_rx_ring_setup_ring64 rings[0]; rings 817 drivers/net/wireless/ath/ath10k/htt_tx.c ring = &cmd->rx_setup_32.rings[0]; rings 888 drivers/net/wireless/ath/ath10k/htt_tx.c ring = &cmd->rx_setup_64.rings[0]; rings 957 drivers/net/wireless/ath/ath10k/htt_tx.c ring = &cmd->rx_setup_32.rings[0]; rings 142 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c if (flow->rings[i] == NULL) rings 160 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c flow->rings[i] = ring; rings 172 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ring = flow->rings[flowid]; rings 192 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ring = flow->rings[flowid]; rings 201 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c if ((flow->rings[i]) && (i != flowid)) { rings 202 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ring = flow->rings[i]; rings 212 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c flow->rings[flowid]->blocked = blocked; rings 236 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ring = flow->rings[flowid]; rings 247 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c flow->rings[flowid] = NULL; rings 264 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ring = flow->rings[flowid]; rings 290 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ring = flow->rings[flowid]; rings 311 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ring = flow->rings[flowid]; rings 321 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ring = flow->rings[flowid]; rings 336 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ring = flow->rings[flowid]; rings 351 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c ring = flow->rings[flowid]; rings 372 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c flow->rings = kcalloc(nrofrings, sizeof(*flow->rings), rings 374 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c if (!flow->rings) { rings 393 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c if (flow->rings[flowid]) rings 403 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c kfree(flow->rings); rings 420 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c if (flow->rings[flowid]->status != RING_OPEN) rings 422 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c flow->rings[flowid]->status = RING_CLOSING; rings 461 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c if (flow->rings[flowid]->status == RING_OPEN) { rings 462 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c flow->rings[flowid]->status = RING_CLOSING; rings 41 drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.h struct brcmf_flowring_ring **rings; rings 1472 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c if (!msgbuf->flow->rings[i]) rings 1474 drivers/net/wireless/broadcom/brcm80211/brcmfmac/msgbuf.c ring = msgbuf->flow->rings[i]; rings 1125 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c struct brcmf_pcie_ringbuf *rings; rings 1249 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c rings = kcalloc(max_flowrings, sizeof(*ring), GFP_KERNEL); rings 1250 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c if (!rings) rings 1256 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c ring = &rings[i]; rings 1271 drivers/net/wireless/broadcom/brcm80211/brcmfmac/pcie.c devinfo->shared.flowrings = rings; rings 2975 drivers/usb/host/u132-hcd.c int rings = MAX_U132_RINGS; rings 2982 drivers/usb/host/u132-hcd.c while (rings-- > 0) { rings 2983 drivers/usb/host/u132-hcd.c struct u132_ring *ring = &u132->ring[rings]; rings 3005 drivers/usb/host/u132-hcd.c int rings = MAX_U132_RINGS; rings 3016 drivers/usb/host/u132-hcd.c while (rings-- > 0) { rings 3017 drivers/usb/host/u132-hcd.c struct u132_ring *ring = &u132->ring[rings]; rings 3019 drivers/usb/host/u132-hcd.c ring->number = rings + 1; rings 227 fs/io_uring.c struct io_rings *rings; rings 479 fs/io_uring.c struct io_rings *rings = ctx->rings; rings 481 fs/io_uring.c if (ctx->cached_cq_tail != READ_ONCE(rings->cq.tail)) { rings 483 fs/io_uring.c smp_store_release(&rings->cq.tail, ctx->cached_cq_tail); rings 554 fs/io_uring.c struct io_rings *rings = ctx->rings; rings 563 fs/io_uring.c if (tail - READ_ONCE(rings->cq.head) == rings->cq_ring_entries) rings 567 fs/io_uring.c return &rings->cqes[tail & ctx->cq_mask]; rings 586 fs/io_uring.c WRITE_ONCE(ctx->rings->cq_overflow, rings 747 fs/io_uring.c static unsigned io_cqring_events(struct io_rings *rings) rings 751 fs/io_uring.c return READ_ONCE(rings->cq.tail) - READ_ONCE(rings->cq.head); rings 756 fs/io_uring.c struct io_rings *rings = ctx->rings; rings 759 fs/io_uring.c return smp_load_acquire(&rings->sq.tail) - ctx->cached_sq_head; rings 910 fs/io_uring.c if (io_cqring_events(ctx->rings)) rings 2625 fs/io_uring.c struct io_rings *rings = ctx->rings; rings 2627 fs/io_uring.c if (ctx->cached_sq_head != READ_ONCE(rings->sq.head)) { rings 2633 fs/io_uring.c smp_store_release(&rings->sq.head, ctx->cached_sq_head); rings 2647 fs/io_uring.c struct io_rings *rings = ctx->rings; rings 2661 fs/io_uring.c if (head == smp_load_acquire(&rings->sq.tail)) rings 2676 fs/io_uring.c WRITE_ONCE(rings->sq_dropped, ctx->cached_sq_dropped); rings 2823 fs/io_uring.c ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP; rings 2838 fs/io_uring.c ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; rings 2843 fs/io_uring.c ctx->rings->sq_flags &= ~IORING_SQ_NEED_WAKEUP; rings 2950 fs/io_uring.c return io_cqring_events(ctx->rings) >= iowq->to_wait || rings 2982 fs/io_uring.c struct io_rings *rings = ctx->rings; rings 2985 fs/io_uring.c if (io_cqring_events(rings) >= min_events) rings 3020 fs/io_uring.c return READ_ONCE(rings->cq.head) == READ_ONCE(rings->cq.tail) ? ret : 0; rings 3358 fs/io_uring.c struct io_rings *rings; rings 3361 fs/io_uring.c off = struct_size(rings, cqes, cq_entries); rings 3638 fs/io_uring.c io_mem_free(ctx->rings); rings 3662 fs/io_uring.c if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head != rings 3663 fs/io_uring.c ctx->rings->sq_ring_entries) rings 3665 fs/io_uring.c if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail) rings 3712 fs/io_uring.c ptr = ctx->rings; rings 3803 fs/io_uring.c struct io_rings *rings; rings 3810 fs/io_uring.c rings = io_mem_alloc(size); rings 3811 fs/io_uring.c if (!rings) rings 3814 fs/io_uring.c ctx->rings = rings; rings 3815 fs/io_uring.c ctx->sq_array = (u32 *)((char *)rings + sq_array_offset); rings 3816 fs/io_uring.c rings->sq_ring_mask = p->sq_entries - 1; rings 3817 fs/io_uring.c rings->cq_ring_mask = p->cq_entries - 1; rings 3818 fs/io_uring.c rings->sq_ring_entries = p->sq_entries; rings 3819 fs/io_uring.c rings->cq_ring_entries = p->cq_entries; rings 3820 fs/io_uring.c ctx->sq_mask = rings->sq_ring_mask; rings 3821 fs/io_uring.c ctx->cq_mask = rings->cq_ring_mask; rings 3822 fs/io_uring.c ctx->sq_entries = rings->sq_ring_entries; rings 3823 fs/io_uring.c ctx->cq_entries = rings->cq_ring_entries; rings 3827 fs/io_uring.c io_mem_free(ctx->rings); rings 3828 fs/io_uring.c ctx->rings = NULL; rings 3834 fs/io_uring.c io_mem_free(ctx->rings); rings 3835 fs/io_uring.c ctx->rings = NULL; rings 3950 fs/io_uring.c p->sq_off.array = (char *)ctx->sq_array - (char *)ctx->rings; rings 618 include/linux/ptr_ring.h static inline int ptr_ring_resize_multiple(struct ptr_ring **rings, rings 638 include/linux/ptr_ring.h spin_lock_irqsave(&(rings[i])->consumer_lock, flags); rings 639 include/linux/ptr_ring.h spin_lock(&(rings[i])->producer_lock); rings 640 include/linux/ptr_ring.h queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], rings 642 include/linux/ptr_ring.h spin_unlock(&(rings[i])->producer_lock); rings 643 include/linux/ptr_ring.h spin_unlock_irqrestore(&(rings[i])->consumer_lock, flags); rings 201 include/linux/skb_array.h static inline int skb_array_resize_multiple(struct skb_array **rings, rings 206 include/linux/skb_array.h return ptr_ring_resize_multiple((struct ptr_ring **)rings, rings 81 net/9p/trans_xen.c struct xen_9pfs_dataring *rings; rings 158 net/9p/trans_xen.c ring = &priv->rings[num]; rings 293 net/9p/trans_xen.c if (!priv->rings[i].intf) rings 295 net/9p/trans_xen.c if (priv->rings[i].irq > 0) rings 296 net/9p/trans_xen.c unbind_from_irqhandler(priv->rings[i].irq, priv->dev); rings 297 net/9p/trans_xen.c if (priv->rings[i].data.in) { rings 301 net/9p/trans_xen.c ref = priv->rings[i].intf->ref[j]; rings 304 net/9p/trans_xen.c free_pages((unsigned long)priv->rings[i].data.in, rings 308 net/9p/trans_xen.c gnttab_end_foreign_access(priv->rings[i].ref, 0, 0); rings 309 net/9p/trans_xen.c free_page((unsigned long)priv->rings[i].intf); rings 311 net/9p/trans_xen.c kfree(priv->rings); rings 416 net/9p/trans_xen.c priv->rings = kcalloc(priv->num_rings, sizeof(*priv->rings), rings 418 net/9p/trans_xen.c if (!priv->rings) { rings 424 net/9p/trans_xen.c priv->rings[i].priv = priv; rings 425 net/9p/trans_xen.c ret = xen_9pfs_front_alloc_dataring(dev, &priv->rings[i]); rings 449 net/9p/trans_xen.c priv->rings[i].ref); rings 455 net/9p/trans_xen.c priv->rings[i].evtchn); rings 221 tools/testing/selftests/net/psock_fanout.c static int sock_fanout_read(int fds[], char *rings[], const int expect[]) rings 225 tools/testing/selftests/net/psock_fanout.c ret[0] = sock_fanout_read_ring(fds[0], rings[0]); rings 226 tools/testing/selftests/net/psock_fanout.c ret[1] = sock_fanout_read_ring(fds[1], rings[1]); rings 348 tools/testing/selftests/net/psock_fanout.c char *rings[2]; rings 366 tools/testing/selftests/net/psock_fanout.c rings[0] = sock_fanout_open_ring(fds[0]); rings 367 tools/testing/selftests/net/psock_fanout.c rings[1] = sock_fanout_open_ring(fds[1]); rings 370 tools/testing/selftests/net/psock_fanout.c sock_fanout_read(fds, rings, expect0); rings 375 tools/testing/selftests/net/psock_fanout.c ret = sock_fanout_read(fds, rings, expect1); rings 380 tools/testing/selftests/net/psock_fanout.c ret |= sock_fanout_read(fds, rings, expect2); rings 382 tools/testing/selftests/net/psock_fanout.c if (munmap(rings[1], RING_NUM_FRAMES * getpagesize()) || rings 383 tools/testing/selftests/net/psock_fanout.c munmap(rings[0], RING_NUM_FRAMES * getpagesize())) {