ca                 25 arch/mips/include/asm/mach-rc32434/dma.h 	u32 ca;				/* Current Address. */
ca                146 arch/mips/include/asm/octeon/cvmx-npei-defs.h 		uint32_t ca:1;
ca                152 arch/mips/include/asm/octeon/cvmx-npei-defs.h 		uint32_t ca:1;
ca                123 arch/mips/include/asm/octeon/cvmx-pci-defs.h 		uint32_t ca:1;
ca                129 arch/mips/include/asm/octeon/cvmx-pci-defs.h 		uint32_t ca:1;
ca                 60 arch/mips/include/asm/octeon/cvmx-pemx-defs.h 		uint64_t ca:1;
ca                 66 arch/mips/include/asm/octeon/cvmx-pemx-defs.h 		uint64_t ca:1;
ca                641 arch/mips/pci/pci-octeon.c 			bar1_index.s.ca = 1;
ca                677 arch/mips/pci/pci-octeon.c 			bar1_index.s.ca = 1;
ca                927 arch/mips/pci/pcie-octeon.c 	bar1_index.s.ca = 1;	   /* Not Cached */
ca               1410 arch/mips/pci/pcie-octeon.c 	bar1_index.s.ca = 1;	   /* Not Cached */
ca                256 arch/s390/kvm/gaccess.c 	unsigned long ca     : 1; /* Controlled-ASN Bit */
ca               1039 arch/sparc/include/asm/hypervisor.h unsigned long sun4v_ccb_info(unsigned long ca,
ca               1073 arch/sparc/include/asm/hypervisor.h unsigned long sun4v_ccb_kill(unsigned long ca,
ca                213 drivers/base/cpu.c 	struct cpu_attr *ca = container_of(attr, struct cpu_attr, attr);
ca                215 drivers/base/cpu.c 	return cpumap_print_to_pagebuf(true, buf, ca->map);
ca                349 drivers/clk/keystone/sci-clk.c 	const struct sci_clk *ca = a;
ca                352 drivers/clk/keystone/sci-clk.c 	if (ca->dev_id == cb->dev_id && ca->clk_id == cb->clk_id)
ca                354 drivers/clk/keystone/sci-clk.c 	if (ca->dev_id > cb->dev_id ||
ca                355 drivers/clk/keystone/sci-clk.c 	    (ca->dev_id == cb->dev_id && ca->clk_id > cb->clk_id))
ca                494 drivers/clk/keystone/sci-clk.c 	struct sci_clk *ca = container_of(a, struct sci_clk, node);
ca                497 drivers/clk/keystone/sci-clk.c 	return _cmp_sci_clk(ca, &cb);
ca                285 drivers/dma/imx-sdma.c 	u32  ca;
ca                 80 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c 	u8 ca;
ca                402 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c 	u8 threshold, conf0, conf1, ca;
ca                431 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c 	ca = default_hdmi_channel_config[runtime->channels - 2].ca;
ca                438 drivers/gpu/drm/bridge/synopsys/dw-hdmi-ahb-audio.c 	dw_hdmi_set_channel_allocation(dw->data.hdmi, ca);
ca                684 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca)
ca                688 drivers/gpu/drm/bridge/synopsys/dw-hdmi.c 	hdmi_writeb(hdmi, ca, HDMI_FC_AUDICONF2);
ca                360 drivers/infiniband/ulp/ipoib/ipoib.h 	struct ib_device *ca;
ca                535 drivers/infiniband/ulp/ipoib/ipoib.h int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req);
ca                589 drivers/infiniband/ulp/ipoib/ipoib.h int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca);
ca                 88 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
ca                 91 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ib_dma_unmap_page(priv->ca, mapping[i + 1], PAGE_SIZE, DMA_FROM_DEVICE);
ca                161 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	mapping[0] = ib_dma_map_single(priv->ca, skb->data, IPOIB_CM_HEAD_SIZE,
ca                163 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) {
ca                175 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		mapping[i + 1] = ib_dma_map_page(priv->ca, page,
ca                177 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		if (unlikely(ib_dma_mapping_error(priv->ca, mapping[i + 1])))
ca                186 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_CM_HEAD_SIZE, DMA_FROM_DEVICE);
ca                189 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		ib_dma_unmap_page(priv->ca, mapping[i], PAGE_SIZE, DMA_FROM_DEVICE);
ca                632 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			ib_dma_sync_single_for_cpu(priv->ca, rx_ring[wr_id].mapping[0],
ca                635 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			ib_dma_sync_single_for_device(priv->ca, rx_ring[wr_id].mapping[0],
ca                753 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
ca                880 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	priv->cm.id = ib_create_cm_id(priv->ca, ipoib_cm_rx_handler, dev);
ca                882 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		pr_warn("%s: failed to create CM ID\n", priv->ca->name);
ca                890 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		pr_warn("%s: failed to listen on ID 0x%llx\n", priv->ca->name,
ca               1074 drivers/infiniband/ulp/ipoib/ipoib_cm.c 		attr.cap.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
ca               1125 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ret = ib_find_pkey(priv->ca, priv->port, priv->pkey, &qp_attr.pkey_index);
ca               1167 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	p->id = ib_create_cm_id(priv->ca, ipoib_cm_tx_handler, p);
ca               1571 drivers/infiniband/ulp/ipoib/ipoib_cm.c 			       priv->ca->name, PTR_ERR(priv->cm.srq));
ca               1606 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	ipoib_dbg(priv, "max_srq_sge=%d\n", priv->ca->attrs.max_srq_sge);
ca               1608 drivers/infiniband/ulp/ipoib/ipoib_cm.c 	max_srq_sge = min_t(int, IPOIB_CM_RX_SG, priv->ca->attrs.max_srq_sge);
ca                 66 drivers/infiniband/ulp/ipoib/ipoib_ethtool.c 	ib_get_device_fw_str(priv->ca, drvinfo->fw_version);
ca                 68 drivers/infiniband/ulp/ipoib/ipoib_ethtool.c 	strlcpy(drvinfo->bus_info, dev_name(priv->ca->dev.parent),
ca                190 drivers/infiniband/ulp/ipoib/ipoib_ethtool.c 	ret = ib_query_port(priv->ca, priv->port, &attr);
ca                 96 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	ib_dma_unmap_single(priv->ca, mapping[0],
ca                142 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size,
ca                144 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0])))
ca                275 drivers/infiniband/ulp/ipoib/ipoib_ib.c int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req)
ca                283 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb),
ca                285 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		if (unlikely(ib_dma_mapping_error(ca, mapping[0])))
ca                294 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		mapping[i + off] = ib_dma_map_page(ca,
ca                299 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		if (unlikely(ib_dma_mapping_error(ca, mapping[i + off])))
ca                308 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		ib_dma_unmap_page(ca, mapping[i - !off], skb_frag_size(frag), DMA_TO_DEVICE);
ca                312 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE);
ca                326 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		ib_dma_unmap_single(priv->ca, mapping[0], skb_headlen(skb),
ca                335 drivers/infiniband/ulp/ipoib/ipoib_ib.c 		ib_dma_unmap_page(priv->ca, mapping[i + off],
ca                628 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) {
ca                931 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	    ib_find_pkey(priv->ca, priv->port, priv->pkey,
ca               1022 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
ca               1076 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	if (rdma_query_gid(priv->ca, priv->port, 0, &gid0))
ca               1092 drivers/infiniband/ulp/ipoib/ipoib_ib.c 	err = ib_find_gid(priv->ca, &search_gid, &port, &index);
ca                768 drivers/infiniband/ulp/ipoib/ipoib_main.c 		if (!ib_init_ah_attr_from_path(priv->ca, priv->port,
ca                864 drivers/infiniband/ulp/ipoib/ipoib_main.c 	if (rdma_cap_opa_ah(priv->ca, priv->port))
ca                908 drivers/infiniband/ulp/ipoib/ipoib_main.c 		ib_sa_path_rec_get(&ipoib_sa_client, priv->ca, priv->port,
ca               1706 drivers/infiniband/ulp/ipoib/ipoib_main.c 			priv->ca->name, ipoib_sendq_size);
ca               1712 drivers/infiniband/ulp/ipoib/ipoib_main.c 	if (ipoib_transport_dev_init(dev, priv->ca)) {
ca               1714 drivers/infiniband/ulp/ipoib/ipoib_main.c 			priv->ca->name);
ca               1765 drivers/infiniband/ulp/ipoib/ipoib_main.c 	priv->pd = ib_alloc_pd(priv->ca, 0);
ca               1767 drivers/infiniband/ulp/ipoib/ipoib_main.c 		pr_warn("%s: failed to allocate PD\n", priv->ca->name);
ca               1843 drivers/infiniband/ulp/ipoib/ipoib_main.c 	priv->hca_caps = priv->ca->attrs.device_cap_flags;
ca               1861 drivers/infiniband/ulp/ipoib/ipoib_main.c 	result = ib_query_port(priv->ca, priv->port, &attr);
ca               1863 drivers/infiniband/ulp/ipoib/ipoib_main.c 		pr_warn("%s: ib_query_port %d failed\n", priv->ca->name,
ca               1869 drivers/infiniband/ulp/ipoib/ipoib_main.c 	result = ib_query_pkey(priv->ca, priv->port, 0, &priv->pkey);
ca               1872 drivers/infiniband/ulp/ipoib/ipoib_main.c 			priv->ca->name, priv->port, result);
ca               1876 drivers/infiniband/ulp/ipoib/ipoib_main.c 	result = rdma_query_gid(priv->ca, priv->port, 0, &priv->local_gid);
ca               1879 drivers/infiniband/ulp/ipoib/ipoib_main.c 			priv->ca->name, priv->port, result);
ca               1885 drivers/infiniband/ulp/ipoib/ipoib_main.c 	SET_NETDEV_DEV(priv->dev, priv->ca->dev.parent);
ca               1939 drivers/infiniband/ulp/ipoib/ipoib_main.c 			priv->ca->name, priv->dev->name, priv->port, rc);
ca               1995 drivers/infiniband/ulp/ipoib/ipoib_main.c 	return ib_set_vf_link_state(priv->ca, vf, priv->port, link_state);
ca               2004 drivers/infiniband/ulp/ipoib/ipoib_main.c 	err = ib_get_vf_config(priv->ca, vf, priv->port, ivf);
ca               2021 drivers/infiniband/ulp/ipoib/ipoib_main.c 	return ib_set_vf_guid(priv->ca, vf, priv->port, guid, type);
ca               2029 drivers/infiniband/ulp/ipoib/ipoib_main.c 	return ib_get_vf_stats(priv->ca, vf, priv->port, vf_stats);
ca               2158 drivers/infiniband/ulp/ipoib/ipoib_main.c 	priv->ca = hca;
ca               2458 drivers/infiniband/ulp/ipoib/ipoib_main.c 			      priv->ca, ipoib_event);
ca                265 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 		ret = rn->attach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
ca                278 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	av.type = rdma_ah_find_type(priv->ca, priv->port);
ca                332 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	if (ib_query_port(priv->ca, priv->port, &attr) ||
ca                345 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 					       priv->ca, priv->port);
ca                548 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
ca                577 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 	if (ib_query_port(priv->ca, priv->port, &port_attr)) {
ca                712 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 		ret = rn->detach_mcast(dev, priv->ca, &mcast->mcmember.mgid,
ca                929 drivers/infiniband/ulp/ipoib/ipoib_multicast.c 			    !ib_sa_get_mcmember_rec(priv->ca, priv->port, &mgid, &rec)) {
ca                125 drivers/infiniband/ulp/ipoib/ipoib_netlink.c 	err = ipoib_intf_init(ppriv->ca, ppriv->port, dev->name, dev);
ca                 46 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 	if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) {
ca                143 drivers/infiniband/ulp/ipoib/ipoib_verbs.c int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
ca                150 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 			.max_send_sge = min_t(u32, priv->ca->attrs.max_send_sge,
ca                177 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 	cq_attr.comp_vector = req_vec % priv->ca->num_comp_vectors;
ca                178 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 	priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_rx_completion, NULL,
ca                181 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 		pr_warn("%s: failed to create receive CQ\n", ca->name);
ca                186 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 	cq_attr.comp_vector = (req_vec + 1) % priv->ca->num_comp_vectors;
ca                187 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 	priv->send_cq = ib_create_cq(priv->ca, ipoib_ib_tx_completion, NULL,
ca                190 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 		pr_warn("%s: failed to create send CQ\n", ca->name);
ca                211 drivers/infiniband/ulp/ipoib/ipoib_verbs.c 		pr_warn("%s: failed to create QP\n", ca->name);
ca                188 drivers/infiniband/ulp/ipoib/ipoib_vlan.c 	ndev = ipoib_intf_alloc(ppriv->ca, ppriv->port, intf_name);
ca                 76 drivers/md/bcache/alloc.c uint8_t bch_inc_gen(struct cache *ca, struct bucket *b)
ca                 80 drivers/md/bcache/alloc.c 	ca->set->need_gc = max(ca->set->need_gc, bucket_gc_gen(b));
ca                 81 drivers/md/bcache/alloc.c 	WARN_ON_ONCE(ca->set->need_gc > BUCKET_GC_GEN_MAX);
ca                 88 drivers/md/bcache/alloc.c 	struct cache *ca;
ca                107 drivers/md/bcache/alloc.c 	for_each_cache(ca, c, i)
ca                108 drivers/md/bcache/alloc.c 		for_each_bucket(b, ca)
ca                131 drivers/md/bcache/alloc.c bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b)
ca                133 drivers/md/bcache/alloc.c 	BUG_ON(!ca->set->gc_mark_valid);
ca                141 drivers/md/bcache/alloc.c void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
ca                143 drivers/md/bcache/alloc.c 	lockdep_assert_held(&ca->set->bucket_lock);
ca                147 drivers/md/bcache/alloc.c 		trace_bcache_invalidate(ca, b - ca->buckets);
ca                149 drivers/md/bcache/alloc.c 	bch_inc_gen(ca, b);
ca                154 drivers/md/bcache/alloc.c static void bch_invalidate_one_bucket(struct cache *ca, struct bucket *b)
ca                156 drivers/md/bcache/alloc.c 	__bch_invalidate_one_bucket(ca, b);
ca                158 drivers/md/bcache/alloc.c 	fifo_push(&ca->free_inc, b - ca->buckets);
ca                172 drivers/md/bcache/alloc.c 	unsigned int min_prio = (INITIAL_PRIO - ca->set->min_prio) / 8;	\
ca                174 drivers/md/bcache/alloc.c 	(b->prio - ca->set->min_prio + min_prio) * GC_SECTORS_USED(b);	\
ca                180 drivers/md/bcache/alloc.c static void invalidate_buckets_lru(struct cache *ca)
ca                185 drivers/md/bcache/alloc.c 	ca->heap.used = 0;
ca                187 drivers/md/bcache/alloc.c 	for_each_bucket(b, ca) {
ca                188 drivers/md/bcache/alloc.c 		if (!bch_can_invalidate_bucket(ca, b))
ca                191 drivers/md/bcache/alloc.c 		if (!heap_full(&ca->heap))
ca                192 drivers/md/bcache/alloc.c 			heap_add(&ca->heap, b, bucket_max_cmp);
ca                193 drivers/md/bcache/alloc.c 		else if (bucket_max_cmp(b, heap_peek(&ca->heap))) {
ca                194 drivers/md/bcache/alloc.c 			ca->heap.data[0] = b;
ca                195 drivers/md/bcache/alloc.c 			heap_sift(&ca->heap, 0, bucket_max_cmp);
ca                199 drivers/md/bcache/alloc.c 	for (i = ca->heap.used / 2 - 1; i >= 0; --i)
ca                200 drivers/md/bcache/alloc.c 		heap_sift(&ca->heap, i, bucket_min_cmp);
ca                202 drivers/md/bcache/alloc.c 	while (!fifo_full(&ca->free_inc)) {
ca                203 drivers/md/bcache/alloc.c 		if (!heap_pop(&ca->heap, b, bucket_min_cmp)) {
ca                208 drivers/md/bcache/alloc.c 			ca->invalidate_needs_gc = 1;
ca                209 drivers/md/bcache/alloc.c 			wake_up_gc(ca->set);
ca                213 drivers/md/bcache/alloc.c 		bch_invalidate_one_bucket(ca, b);
ca                217 drivers/md/bcache/alloc.c static void invalidate_buckets_fifo(struct cache *ca)
ca                222 drivers/md/bcache/alloc.c 	while (!fifo_full(&ca->free_inc)) {
ca                223 drivers/md/bcache/alloc.c 		if (ca->fifo_last_bucket <  ca->sb.first_bucket ||
ca                224 drivers/md/bcache/alloc.c 		    ca->fifo_last_bucket >= ca->sb.nbuckets)
ca                225 drivers/md/bcache/alloc.c 			ca->fifo_last_bucket = ca->sb.first_bucket;
ca                227 drivers/md/bcache/alloc.c 		b = ca->buckets + ca->fifo_last_bucket++;
ca                229 drivers/md/bcache/alloc.c 		if (bch_can_invalidate_bucket(ca, b))
ca                230 drivers/md/bcache/alloc.c 			bch_invalidate_one_bucket(ca, b);
ca                232 drivers/md/bcache/alloc.c 		if (++checked >= ca->sb.nbuckets) {
ca                233 drivers/md/bcache/alloc.c 			ca->invalidate_needs_gc = 1;
ca                234 drivers/md/bcache/alloc.c 			wake_up_gc(ca->set);
ca                240 drivers/md/bcache/alloc.c static void invalidate_buckets_random(struct cache *ca)
ca                245 drivers/md/bcache/alloc.c 	while (!fifo_full(&ca->free_inc)) {
ca                250 drivers/md/bcache/alloc.c 		n %= (size_t) (ca->sb.nbuckets - ca->sb.first_bucket);
ca                251 drivers/md/bcache/alloc.c 		n += ca->sb.first_bucket;
ca                253 drivers/md/bcache/alloc.c 		b = ca->buckets + n;
ca                255 drivers/md/bcache/alloc.c 		if (bch_can_invalidate_bucket(ca, b))
ca                256 drivers/md/bcache/alloc.c 			bch_invalidate_one_bucket(ca, b);
ca                258 drivers/md/bcache/alloc.c 		if (++checked >= ca->sb.nbuckets / 2) {
ca                259 drivers/md/bcache/alloc.c 			ca->invalidate_needs_gc = 1;
ca                260 drivers/md/bcache/alloc.c 			wake_up_gc(ca->set);
ca                266 drivers/md/bcache/alloc.c static void invalidate_buckets(struct cache *ca)
ca                268 drivers/md/bcache/alloc.c 	BUG_ON(ca->invalidate_needs_gc);
ca                270 drivers/md/bcache/alloc.c 	switch (CACHE_REPLACEMENT(&ca->sb)) {
ca                272 drivers/md/bcache/alloc.c 		invalidate_buckets_lru(ca);
ca                275 drivers/md/bcache/alloc.c 		invalidate_buckets_fifo(ca);
ca                278 drivers/md/bcache/alloc.c 		invalidate_buckets_random(ca);
ca                283 drivers/md/bcache/alloc.c #define allocator_wait(ca, cond)					\
ca                290 drivers/md/bcache/alloc.c 		mutex_unlock(&(ca)->set->bucket_lock);			\
ca                292 drivers/md/bcache/alloc.c 		    test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)) {	\
ca                298 drivers/md/bcache/alloc.c 		mutex_lock(&(ca)->set->bucket_lock);			\
ca                303 drivers/md/bcache/alloc.c static int bch_allocator_push(struct cache *ca, long bucket)
ca                308 drivers/md/bcache/alloc.c 	if (fifo_push(&ca->free[RESERVE_PRIO], bucket))
ca                312 drivers/md/bcache/alloc.c 		if (fifo_push(&ca->free[i], bucket))
ca                320 drivers/md/bcache/alloc.c 	struct cache *ca = arg;
ca                322 drivers/md/bcache/alloc.c 	mutex_lock(&ca->set->bucket_lock);
ca                333 drivers/md/bcache/alloc.c 			if (!fifo_pop(&ca->free_inc, bucket))
ca                336 drivers/md/bcache/alloc.c 			if (ca->discard) {
ca                337 drivers/md/bcache/alloc.c 				mutex_unlock(&ca->set->bucket_lock);
ca                338 drivers/md/bcache/alloc.c 				blkdev_issue_discard(ca->bdev,
ca                339 drivers/md/bcache/alloc.c 					bucket_to_sector(ca->set, bucket),
ca                340 drivers/md/bcache/alloc.c 					ca->sb.bucket_size, GFP_KERNEL, 0);
ca                341 drivers/md/bcache/alloc.c 				mutex_lock(&ca->set->bucket_lock);
ca                344 drivers/md/bcache/alloc.c 			allocator_wait(ca, bch_allocator_push(ca, bucket));
ca                345 drivers/md/bcache/alloc.c 			wake_up(&ca->set->btree_cache_wait);
ca                346 drivers/md/bcache/alloc.c 			wake_up(&ca->set->bucket_wait);
ca                356 drivers/md/bcache/alloc.c 		allocator_wait(ca, ca->set->gc_mark_valid &&
ca                357 drivers/md/bcache/alloc.c 			       !ca->invalidate_needs_gc);
ca                358 drivers/md/bcache/alloc.c 		invalidate_buckets(ca);
ca                364 drivers/md/bcache/alloc.c 		allocator_wait(ca, !atomic_read(&ca->set->prio_blocked));
ca                365 drivers/md/bcache/alloc.c 		if (CACHE_SYNC(&ca->set->sb)) {
ca                377 drivers/md/bcache/alloc.c 			if (!fifo_full(&ca->free_inc))
ca                380 drivers/md/bcache/alloc.c 			if (bch_prio_write(ca, false) < 0) {
ca                381 drivers/md/bcache/alloc.c 				ca->invalidate_needs_gc = 1;
ca                382 drivers/md/bcache/alloc.c 				wake_up_gc(ca->set);
ca                393 drivers/md/bcache/alloc.c long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait)
ca                401 drivers/md/bcache/alloc.c 	if (unlikely(test_bit(CACHE_SET_IO_DISABLE, &ca->set->flags)))
ca                405 drivers/md/bcache/alloc.c 	if (fifo_pop(&ca->free[RESERVE_NONE], r) ||
ca                406 drivers/md/bcache/alloc.c 	    fifo_pop(&ca->free[reserve], r))
ca                410 drivers/md/bcache/alloc.c 		trace_bcache_alloc_fail(ca, reserve);
ca                415 drivers/md/bcache/alloc.c 		prepare_to_wait(&ca->set->bucket_wait, &w,
ca                418 drivers/md/bcache/alloc.c 		mutex_unlock(&ca->set->bucket_lock);
ca                420 drivers/md/bcache/alloc.c 		mutex_lock(&ca->set->bucket_lock);
ca                421 drivers/md/bcache/alloc.c 	} while (!fifo_pop(&ca->free[RESERVE_NONE], r) &&
ca                422 drivers/md/bcache/alloc.c 		 !fifo_pop(&ca->free[reserve], r));
ca                424 drivers/md/bcache/alloc.c 	finish_wait(&ca->set->bucket_wait, &w);
ca                426 drivers/md/bcache/alloc.c 	if (ca->alloc_thread)
ca                427 drivers/md/bcache/alloc.c 		wake_up_process(ca->alloc_thread);
ca                429 drivers/md/bcache/alloc.c 	trace_bcache_alloc(ca, reserve);
ca                431 drivers/md/bcache/alloc.c 	if (expensive_debug_checks(ca->set)) {
ca                436 drivers/md/bcache/alloc.c 		for (iter = 0; iter < prio_buckets(ca) * 2; iter++)
ca                437 drivers/md/bcache/alloc.c 			BUG_ON(ca->prio_buckets[iter] == (uint64_t) r);
ca                440 drivers/md/bcache/alloc.c 			fifo_for_each(i, &ca->free[j], iter)
ca                442 drivers/md/bcache/alloc.c 		fifo_for_each(i, &ca->free_inc, iter)
ca                446 drivers/md/bcache/alloc.c 	b = ca->buckets + r;
ca                450 drivers/md/bcache/alloc.c 	SET_GC_SECTORS_USED(b, ca->sb.bucket_size);
ca                462 drivers/md/bcache/alloc.c 	if (ca->set->avail_nbuckets > 0) {
ca                463 drivers/md/bcache/alloc.c 		ca->set->avail_nbuckets--;
ca                464 drivers/md/bcache/alloc.c 		bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
ca                470 drivers/md/bcache/alloc.c void __bch_bucket_free(struct cache *ca, struct bucket *b)
ca                475 drivers/md/bcache/alloc.c 	if (ca->set->avail_nbuckets < ca->set->nbuckets) {
ca                476 drivers/md/bcache/alloc.c 		ca->set->avail_nbuckets++;
ca                477 drivers/md/bcache/alloc.c 		bch_update_bucket_in_use(ca->set, &ca->set->gc_stats);
ca                507 drivers/md/bcache/alloc.c 		struct cache *ca = c->cache_by_alloc[i];
ca                508 drivers/md/bcache/alloc.c 		long b = bch_bucket_alloc(ca, reserve, wait);
ca                513 drivers/md/bcache/alloc.c 		k->ptr[i] = MAKE_PTR(ca->buckets[b].gen,
ca                515 drivers/md/bcache/alloc.c 				ca->sb.nr_this_dev);
ca                734 drivers/md/bcache/alloc.c int bch_cache_allocator_start(struct cache *ca)
ca                737 drivers/md/bcache/alloc.c 					    ca, "bcache_allocator");
ca                741 drivers/md/bcache/alloc.c 	ca->alloc_thread = k;
ca                867 drivers/md/bcache/bcache.h #define for_each_cache(ca, cs, iter)					\
ca                868 drivers/md/bcache/bcache.h 	for (iter = 0; ca = cs->cache[iter], iter < (cs)->sb.nr_in_set; iter++)
ca                870 drivers/md/bcache/bcache.h #define for_each_bucket(b, ca)						\
ca                871 drivers/md/bcache/bcache.h 	for (b = (ca)->buckets + (ca)->sb.first_bucket;			\
ca                872 drivers/md/bcache/bcache.h 	     b < (ca)->buckets + (ca)->sb.nbuckets; b++)
ca                911 drivers/md/bcache/bcache.h 	struct cache *ca;
ca                914 drivers/md/bcache/bcache.h 	for_each_cache(ca, c, i)
ca                915 drivers/md/bcache/bcache.h 		wake_up_process(ca->alloc_thread);
ca                948 drivers/md/bcache/bcache.h void bch_count_io_errors(struct cache *ca, blk_status_t error,
ca                961 drivers/md/bcache/bcache.h uint8_t bch_inc_gen(struct cache *ca, struct bucket *b);
ca                964 drivers/md/bcache/bcache.h bool bch_can_invalidate_bucket(struct cache *ca, struct bucket *b);
ca                965 drivers/md/bcache/bcache.h void __bch_invalidate_one_bucket(struct cache *ca, struct bucket *b);
ca                967 drivers/md/bcache/bcache.h void __bch_bucket_free(struct cache *ca, struct bucket *b);
ca                970 drivers/md/bcache/bcache.h long bch_bucket_alloc(struct cache *ca, unsigned int reserve, bool wait);
ca                983 drivers/md/bcache/bcache.h int bch_prio_write(struct cache *ca, bool wait);
ca               1023 drivers/md/bcache/bcache.h int bch_cache_allocator_start(struct cache *ca);
ca               1215 drivers/md/bcache/btree.c 	struct cache *ca;
ca               1220 drivers/md/bcache/btree.c 	for_each_cache(ca, c, i)
ca               1221 drivers/md/bcache/btree.c 		if (fifo_used(&ca->free[RESERVE_BTREE]) < reserve) {
ca               1737 drivers/md/bcache/btree.c 	struct cache *ca;
ca               1749 drivers/md/bcache/btree.c 	for_each_cache(ca, c, i)
ca               1750 drivers/md/bcache/btree.c 		for_each_bucket(b, ca) {
ca               1764 drivers/md/bcache/btree.c 	struct cache *ca;
ca               1800 drivers/md/bcache/btree.c 	for_each_cache(ca, c, i) {
ca               1803 drivers/md/bcache/btree.c 		ca->invalidate_needs_gc = 0;
ca               1805 drivers/md/bcache/btree.c 		for (i = ca->sb.d; i < ca->sb.d + ca->sb.keys; i++)
ca               1806 drivers/md/bcache/btree.c 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
ca               1808 drivers/md/bcache/btree.c 		for (i = ca->prio_buckets;
ca               1809 drivers/md/bcache/btree.c 		     i < ca->prio_buckets + prio_buckets(ca) * 2; i++)
ca               1810 drivers/md/bcache/btree.c 			SET_GC_MARK(ca->buckets + *i, GC_MARK_METADATA);
ca               1812 drivers/md/bcache/btree.c 		for_each_bucket(b, ca) {
ca               1874 drivers/md/bcache/btree.c 	struct cache *ca;
ca               1877 drivers/md/bcache/btree.c 	for_each_cache(ca, c, i)
ca               1878 drivers/md/bcache/btree.c 		if (ca->invalidate_needs_gc)
ca               1964 drivers/md/bcache/btree.c 	struct cache *ca;
ca               1981 drivers/md/bcache/btree.c 	for_each_cache(ca, c, i) {
ca               1982 drivers/md/bcache/btree.c 		for_each_bucket(b, ca) {
ca               1983 drivers/md/bcache/btree.c 			if (fifo_full(&ca->free[RESERVE_PRIO]) &&
ca               1984 drivers/md/bcache/btree.c 			    fifo_full(&ca->free[RESERVE_BTREE]))
ca               1987 drivers/md/bcache/btree.c 			if (bch_can_invalidate_bucket(ca, b) &&
ca               1989 drivers/md/bcache/btree.c 				__bch_invalidate_one_bucket(ca, b);
ca               1990 drivers/md/bcache/btree.c 				if (!fifo_push(&ca->free[RESERVE_PRIO],
ca               1991 drivers/md/bcache/btree.c 				   b - ca->buckets))
ca               1992 drivers/md/bcache/btree.c 					fifo_push(&ca->free[RESERVE_BTREE],
ca               1993 drivers/md/bcache/btree.c 						  b - ca->buckets);
ca                 53 drivers/md/bcache/extents.c 			struct cache *ca = PTR_CACHE(c, k, i);
ca                 58 drivers/md/bcache/extents.c 			    bucket <  ca->sb.first_bucket ||
ca                 59 drivers/md/bcache/extents.c 			    bucket >= ca->sb.nbuckets)
ca                 74 drivers/md/bcache/extents.c 			struct cache *ca = PTR_CACHE(c, k, i);
ca                 80 drivers/md/bcache/extents.c 			if (bucket <  ca->sb.first_bucket)
ca                 82 drivers/md/bcache/extents.c 			if (bucket >= ca->sb.nbuckets)
ca                 81 drivers/md/bcache/io.c void bch_count_io_errors(struct cache *ca,
ca                 91 drivers/md/bcache/io.c 	if (ca->set->error_decay) {
ca                 92 drivers/md/bcache/io.c 		unsigned int count = atomic_inc_return(&ca->io_count);
ca                 94 drivers/md/bcache/io.c 		while (count > ca->set->error_decay) {
ca                 97 drivers/md/bcache/io.c 			unsigned int new = count - ca->set->error_decay;
ca                104 drivers/md/bcache/io.c 			count = atomic_cmpxchg(&ca->io_count, old, new);
ca                109 drivers/md/bcache/io.c 				errors = atomic_read(&ca->io_errors);
ca                113 drivers/md/bcache/io.c 					errors = atomic_cmpxchg(&ca->io_errors,
ca                122 drivers/md/bcache/io.c 						    &ca->io_errors);
ca                125 drivers/md/bcache/io.c 		if (errors < ca->set->error_limit)
ca                127 drivers/md/bcache/io.c 			       ca->cache_dev_name, m,
ca                130 drivers/md/bcache/io.c 			bch_cache_set_error(ca->set,
ca                132 drivers/md/bcache/io.c 					    ca->cache_dev_name, m);
ca                140 drivers/md/bcache/io.c 	struct cache *ca = PTR_CACHE(c, &b->key, 0);
ca                163 drivers/md/bcache/io.c 	bch_count_io_errors(ca, error, is_read, m);
ca                 35 drivers/md/bcache/journal.c static int journal_read_bucket(struct cache *ca, struct list_head *list,
ca                 38 drivers/md/bcache/journal.c 	struct journal_device *ja = &ca->journal;
ca                 42 drivers/md/bcache/journal.c 	struct jset *j, *data = ca->set->journal.w[0].data;
ca                 46 drivers/md/bcache/journal.c 	sector_t bucket = bucket_to_sector(ca->set, ca->sb.d[bucket_index]);
ca                 52 drivers/md/bcache/journal.c 	while (offset < ca->sb.bucket_size) {
ca                 53 drivers/md/bcache/journal.c reread:		left = ca->sb.bucket_size - offset;
ca                 58 drivers/md/bcache/journal.c 		bio_set_dev(bio, ca->bdev);
ca                 66 drivers/md/bcache/journal.c 		closure_bio_submit(ca->set, bio, &cl);
ca                 80 drivers/md/bcache/journal.c 			if (j->magic != jset_magic(&ca->sb)) {
ca                101 drivers/md/bcache/journal.c 			blocks = set_blocks(j, block_bytes(ca->set));
ca                162 drivers/md/bcache/journal.c 			offset	+= blocks * ca->sb.block_size;
ca                163 drivers/md/bcache/journal.c 			len	-= blocks * ca->sb.block_size;
ca                164 drivers/md/bcache/journal.c 			j = ((void *) j) + blocks * block_bytes(ca);
ca                175 drivers/md/bcache/journal.c 		ret = journal_read_bucket(ca, list, b);			\
ca                182 drivers/md/bcache/journal.c 	struct cache *ca;
ca                186 drivers/md/bcache/journal.c 	for_each_cache(ca, c, iter) {
ca                187 drivers/md/bcache/journal.c 		struct journal_device *ja = &ca->journal;
ca                193 drivers/md/bcache/journal.c 		pr_debug("%u journal buckets", ca->sb.njournal_buckets);
ca                199 drivers/md/bcache/journal.c 		for (i = 0; i < ca->sb.njournal_buckets; i++) {
ca                205 drivers/md/bcache/journal.c 			l = (i * 2654435769U) % ca->sb.njournal_buckets;
ca                220 drivers/md/bcache/journal.c 		for (l = find_first_zero_bit(bitmap, ca->sb.njournal_buckets);
ca                221 drivers/md/bcache/journal.c 		     l < ca->sb.njournal_buckets;
ca                222 drivers/md/bcache/journal.c 		     l = find_next_zero_bit(bitmap, ca->sb.njournal_buckets,
ca                228 drivers/md/bcache/journal.c 		if (l == ca->sb.njournal_buckets)
ca                235 drivers/md/bcache/journal.c 		r = find_next_bit(bitmap, ca->sb.njournal_buckets, l + 1);
ca                257 drivers/md/bcache/journal.c 			 m, ca->sb.njournal_buckets);
ca                262 drivers/md/bcache/journal.c 				l = ca->sb.njournal_buckets - 1;
ca                276 drivers/md/bcache/journal.c 		for (i = 0; i < ca->sb.njournal_buckets; i++)
ca                286 drivers/md/bcache/journal.c 					ca->sb.njournal_buckets;
ca                348 drivers/md/bcache/journal.c 	struct cache *ca;
ca                351 drivers/md/bcache/journal.c 	for_each_cache(ca, s, i)
ca                352 drivers/md/bcache/journal.c 		if (ca->discard)
ca                582 drivers/md/bcache/journal.c 	struct cache *ca = container_of(ja, struct cache, journal);
ca                586 drivers/md/bcache/journal.c 	closure_wake_up(&ca->set->journal.wait);
ca                587 drivers/md/bcache/journal.c 	closure_put(&ca->set->cl);
ca                598 drivers/md/bcache/journal.c static void do_journal_discard(struct cache *ca)
ca                600 drivers/md/bcache/journal.c 	struct journal_device *ja = &ca->journal;
ca                603 drivers/md/bcache/journal.c 	if (!ca->discard) {
ca                614 drivers/md/bcache/journal.c 			ca->sb.njournal_buckets;
ca                627 drivers/md/bcache/journal.c 		bio->bi_iter.bi_sector	= bucket_to_sector(ca->set,
ca                628 drivers/md/bcache/journal.c 						ca->sb.d[ja->discard_idx]);
ca                629 drivers/md/bcache/journal.c 		bio_set_dev(bio, ca->bdev);
ca                630 drivers/md/bcache/journal.c 		bio->bi_iter.bi_size	= bucket_bytes(ca);
ca                633 drivers/md/bcache/journal.c 		closure_get(&ca->set->cl);
ca                642 drivers/md/bcache/journal.c 	struct cache *ca;
ca                656 drivers/md/bcache/journal.c 	for_each_cache(ca, c, iter) {
ca                657 drivers/md/bcache/journal.c 		struct journal_device *ja = &ca->journal;
ca                662 drivers/md/bcache/journal.c 				ca->sb.njournal_buckets;
ca                665 drivers/md/bcache/journal.c 	for_each_cache(ca, c, iter)
ca                666 drivers/md/bcache/journal.c 		do_journal_discard(ca);
ca                676 drivers/md/bcache/journal.c 	for_each_cache(ca, c, iter) {
ca                677 drivers/md/bcache/journal.c 		struct journal_device *ja = &ca->journal;
ca                678 drivers/md/bcache/journal.c 		unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
ca                686 drivers/md/bcache/journal.c 				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ca                687 drivers/md/bcache/journal.c 				  ca->sb.nr_this_dev);
ca                759 drivers/md/bcache/journal.c 	struct cache *ca;
ca                789 drivers/md/bcache/journal.c 	for_each_cache(ca, c, i)
ca                790 drivers/md/bcache/journal.c 		w->data->prio_bucket[ca->sb.nr_this_dev] = ca->prio_buckets[0];
ca                798 drivers/md/bcache/journal.c 		ca = PTR_CACHE(c, k, i);
ca                799 drivers/md/bcache/journal.c 		bio = &ca->journal.bio;
ca                801 drivers/md/bcache/journal.c 		atomic_long_add(sectors, &ca->meta_sectors_written);
ca                805 drivers/md/bcache/journal.c 		bio_set_dev(bio, ca->bdev);
ca                819 drivers/md/bcache/journal.c 		ca->journal.seq[ca->journal.cur_idx] = w->data->seq;
ca                190 drivers/md/bcache/movinggc.c static unsigned int bucket_heap_top(struct cache *ca)
ca                194 drivers/md/bcache/movinggc.c 	return (b = heap_peek(&ca->heap)) ? GC_SECTORS_USED(b) : 0;
ca                199 drivers/md/bcache/movinggc.c 	struct cache *ca;
ca                208 drivers/md/bcache/movinggc.c 	for_each_cache(ca, c, i) {
ca                210 drivers/md/bcache/movinggc.c 		unsigned int reserve_sectors = ca->sb.bucket_size *
ca                211 drivers/md/bcache/movinggc.c 			     fifo_used(&ca->free[RESERVE_MOVINGGC]);
ca                213 drivers/md/bcache/movinggc.c 		ca->heap.used = 0;
ca                215 drivers/md/bcache/movinggc.c 		for_each_bucket(b, ca) {
ca                218 drivers/md/bcache/movinggc.c 			    GC_SECTORS_USED(b) == ca->sb.bucket_size ||
ca                222 drivers/md/bcache/movinggc.c 			if (!heap_full(&ca->heap)) {
ca                224 drivers/md/bcache/movinggc.c 				heap_add(&ca->heap, b, bucket_cmp);
ca                225 drivers/md/bcache/movinggc.c 			} else if (bucket_cmp(b, heap_peek(&ca->heap))) {
ca                226 drivers/md/bcache/movinggc.c 				sectors_to_move -= bucket_heap_top(ca);
ca                229 drivers/md/bcache/movinggc.c 				ca->heap.data[0] = b;
ca                230 drivers/md/bcache/movinggc.c 				heap_sift(&ca->heap, 0, bucket_cmp);
ca                235 drivers/md/bcache/movinggc.c 			heap_pop(&ca->heap, b, bucket_cmp);
ca                239 drivers/md/bcache/movinggc.c 		while (heap_pop(&ca->heap, b, bucket_cmp))
ca               1264 drivers/md/bcache/request.c 		struct cache *ca;
ca               1266 drivers/md/bcache/request.c 		for_each_cache(ca, d->c, i) {
ca               1267 drivers/md/bcache/request.c 			q = bdev_get_queue(ca->bdev);
ca               1374 drivers/md/bcache/request.c 	struct cache *ca;
ca               1378 drivers/md/bcache/request.c 	for_each_cache(ca, d->c, i) {
ca               1379 drivers/md/bcache/request.c 		q = bdev_get_queue(ca->bdev);
ca                273 drivers/md/bcache/super.c 	struct cache *ca = bio->bi_private;
ca                276 drivers/md/bcache/super.c 	bch_count_io_errors(ca, bio->bi_status, 0,
ca                278 drivers/md/bcache/super.c 	closure_put(&ca->set->sb_write);
ca                291 drivers/md/bcache/super.c 	struct cache *ca;
ca                299 drivers/md/bcache/super.c 	for_each_cache(ca, c, i) {
ca                300 drivers/md/bcache/super.c 		struct bio *bio = &ca->sb_bio;
ca                302 drivers/md/bcache/super.c 		ca->sb.version		= BCACHE_SB_VERSION_CDEV_WITH_UUID;
ca                303 drivers/md/bcache/super.c 		ca->sb.seq		= c->sb.seq;
ca                304 drivers/md/bcache/super.c 		ca->sb.last_mount	= c->sb.last_mount;
ca                306 drivers/md/bcache/super.c 		SET_CACHE_SYNC(&ca->sb, CACHE_SYNC(&c->sb));
ca                309 drivers/md/bcache/super.c 		bio_set_dev(bio, ca->bdev);
ca                311 drivers/md/bcache/super.c 		bio->bi_private = ca;
ca                314 drivers/md/bcache/super.c 		__write_super(&ca->sb, bio);
ca                425 drivers/md/bcache/super.c 	struct cache *ca;
ca                438 drivers/md/bcache/super.c 	ca = PTR_CACHE(c, &k.key, 0);
ca                439 drivers/md/bcache/super.c 	atomic_long_add(ca->sb.bucket_size, &ca->meta_sectors_written);
ca                504 drivers/md/bcache/super.c 	struct cache *ca = bio->bi_private;
ca                506 drivers/md/bcache/super.c 	cache_set_err_on(bio->bi_status, ca->set, "accessing priorities");
ca                507 drivers/md/bcache/super.c 	bch_bbio_free(bio, ca->set);
ca                508 drivers/md/bcache/super.c 	closure_put(&ca->prio);
ca                511 drivers/md/bcache/super.c static void prio_io(struct cache *ca, uint64_t bucket, int op,
ca                514 drivers/md/bcache/super.c 	struct closure *cl = &ca->prio;
ca                515 drivers/md/bcache/super.c 	struct bio *bio = bch_bbio_alloc(ca->set);
ca                519 drivers/md/bcache/super.c 	bio->bi_iter.bi_sector	= bucket * ca->sb.bucket_size;
ca                520 drivers/md/bcache/super.c 	bio_set_dev(bio, ca->bdev);
ca                521 drivers/md/bcache/super.c 	bio->bi_iter.bi_size	= bucket_bytes(ca);
ca                524 drivers/md/bcache/super.c 	bio->bi_private = ca;
ca                526 drivers/md/bcache/super.c 	bch_bio_map(bio, ca->disk_buckets);
ca                528 drivers/md/bcache/super.c 	closure_bio_submit(ca->set, bio, &ca->prio);
ca                532 drivers/md/bcache/super.c int bch_prio_write(struct cache *ca, bool wait)
ca                539 drivers/md/bcache/super.c 		 fifo_used(&ca->free[RESERVE_PRIO]),
ca                540 drivers/md/bcache/super.c 		 fifo_used(&ca->free[RESERVE_NONE]),
ca                541 drivers/md/bcache/super.c 		 fifo_used(&ca->free_inc));
ca                549 drivers/md/bcache/super.c 		size_t avail = fifo_used(&ca->free[RESERVE_PRIO]) +
ca                550 drivers/md/bcache/super.c 			       fifo_used(&ca->free[RESERVE_NONE]);
ca                551 drivers/md/bcache/super.c 		if (prio_buckets(ca) > avail)
ca                557 drivers/md/bcache/super.c 	lockdep_assert_held(&ca->set->bucket_lock);
ca                559 drivers/md/bcache/super.c 	ca->disk_buckets->seq++;
ca                561 drivers/md/bcache/super.c 	atomic_long_add(ca->sb.bucket_size * prio_buckets(ca),
ca                562 drivers/md/bcache/super.c 			&ca->meta_sectors_written);
ca                564 drivers/md/bcache/super.c 	for (i = prio_buckets(ca) - 1; i >= 0; --i) {
ca                566 drivers/md/bcache/super.c 		struct prio_set *p = ca->disk_buckets;
ca                568 drivers/md/bcache/super.c 		struct bucket_disk *end = d + prios_per_bucket(ca);
ca                570 drivers/md/bcache/super.c 		for (b = ca->buckets + i * prios_per_bucket(ca);
ca                571 drivers/md/bcache/super.c 		     b < ca->buckets + ca->sb.nbuckets && d < end;
ca                577 drivers/md/bcache/super.c 		p->next_bucket	= ca->prio_buckets[i + 1];
ca                578 drivers/md/bcache/super.c 		p->magic	= pset_magic(&ca->sb);
ca                579 drivers/md/bcache/super.c 		p->csum		= bch_crc64(&p->magic, bucket_bytes(ca) - 8);
ca                581 drivers/md/bcache/super.c 		bucket = bch_bucket_alloc(ca, RESERVE_PRIO, wait);
ca                584 drivers/md/bcache/super.c 		mutex_unlock(&ca->set->bucket_lock);
ca                585 drivers/md/bcache/super.c 		prio_io(ca, bucket, REQ_OP_WRITE, 0);
ca                586 drivers/md/bcache/super.c 		mutex_lock(&ca->set->bucket_lock);
ca                588 drivers/md/bcache/super.c 		ca->prio_buckets[i] = bucket;
ca                589 drivers/md/bcache/super.c 		atomic_dec_bug(&ca->buckets[bucket].pin);
ca                592 drivers/md/bcache/super.c 	mutex_unlock(&ca->set->bucket_lock);
ca                594 drivers/md/bcache/super.c 	bch_journal_meta(ca->set, &cl);
ca                597 drivers/md/bcache/super.c 	mutex_lock(&ca->set->bucket_lock);
ca                603 drivers/md/bcache/super.c 	for (i = 0; i < prio_buckets(ca); i++) {
ca                604 drivers/md/bcache/super.c 		if (ca->prio_last_buckets[i])
ca                605 drivers/md/bcache/super.c 			__bch_bucket_free(ca,
ca                606 drivers/md/bcache/super.c 				&ca->buckets[ca->prio_last_buckets[i]]);
ca                608 drivers/md/bcache/super.c 		ca->prio_last_buckets[i] = ca->prio_buckets[i];
ca                613 drivers/md/bcache/super.c static void prio_read(struct cache *ca, uint64_t bucket)
ca                615 drivers/md/bcache/super.c 	struct prio_set *p = ca->disk_buckets;
ca                616 drivers/md/bcache/super.c 	struct bucket_disk *d = p->data + prios_per_bucket(ca), *end = d;
ca                620 drivers/md/bcache/super.c 	for (b = ca->buckets;
ca                621 drivers/md/bcache/super.c 	     b < ca->buckets + ca->sb.nbuckets;
ca                624 drivers/md/bcache/super.c 			ca->prio_buckets[bucket_nr] = bucket;
ca                625 drivers/md/bcache/super.c 			ca->prio_last_buckets[bucket_nr] = bucket;
ca                628 drivers/md/bcache/super.c 			prio_io(ca, bucket, REQ_OP_READ, 0);
ca                631 drivers/md/bcache/super.c 			    bch_crc64(&p->magic, bucket_bytes(ca) - 8))
ca                634 drivers/md/bcache/super.c 			if (p->magic != pset_magic(&ca->sb))
ca                698 drivers/md/bcache/super.c 		struct cache *ca;
ca                703 drivers/md/bcache/super.c 		for_each_cache(ca, d->c, i)
ca                704 drivers/md/bcache/super.c 			bd_unlink_disk_holder(ca->bdev, d->disk);
ca                712 drivers/md/bcache/super.c 	struct cache *ca;
ca                715 drivers/md/bcache/super.c 	for_each_cache(ca, d->c, i)
ca                716 drivers/md/bcache/super.c 		bd_link_disk_holder(ca->bdev, d->disk);
ca               1573 drivers/md/bcache/super.c 	struct cache *ca;
ca               1583 drivers/md/bcache/super.c 	for_each_cache(ca, c, i)
ca               1584 drivers/md/bcache/super.c 		if (ca) {
ca               1585 drivers/md/bcache/super.c 			ca->set = NULL;
ca               1586 drivers/md/bcache/super.c 			c->cache[ca->sb.nr_this_dev] = NULL;
ca               1587 drivers/md/bcache/super.c 			kobject_put(&ca->kobj);
ca               1614 drivers/md/bcache/super.c 	struct cache *ca;
ca               1641 drivers/md/bcache/super.c 	for_each_cache(ca, c, i)
ca               1642 drivers/md/bcache/super.c 		if (ca->alloc_thread)
ca               1643 drivers/md/bcache/super.c 			kthread_stop(ca->alloc_thread);
ca               1850 drivers/md/bcache/super.c 	struct cache *ca;
ca               1858 drivers/md/bcache/super.c 	for_each_cache(ca, c, i)
ca               1859 drivers/md/bcache/super.c 		c->nbuckets += ca->sb.nbuckets;
ca               1879 drivers/md/bcache/super.c 		for_each_cache(ca, c, i)
ca               1880 drivers/md/bcache/super.c 			prio_read(ca, j->prio_bucket[ca->sb.nr_this_dev]);
ca               1941 drivers/md/bcache/super.c 		for_each_cache(ca, c, i)
ca               1942 drivers/md/bcache/super.c 			if (bch_cache_allocator_start(ca))
ca               1964 drivers/md/bcache/super.c 		for_each_cache(ca, c, i) {
ca               1967 drivers/md/bcache/super.c 			ca->sb.keys = clamp_t(int, ca->sb.nbuckets >> 7,
ca               1970 drivers/md/bcache/super.c 			for (j = 0; j < ca->sb.keys; j++)
ca               1971 drivers/md/bcache/super.c 				ca->sb.d[j] = ca->sb.first_bucket + j;
ca               1977 drivers/md/bcache/super.c 		for_each_cache(ca, c, i)
ca               1978 drivers/md/bcache/super.c 			if (bch_cache_allocator_start(ca))
ca               1982 drivers/md/bcache/super.c 		for_each_cache(ca, c, i)
ca               1983 drivers/md/bcache/super.c 			bch_prio_write(ca, true);
ca               2043 drivers/md/bcache/super.c static bool can_attach_cache(struct cache *ca, struct cache_set *c)
ca               2045 drivers/md/bcache/super.c 	return ca->sb.block_size	== c->sb.block_size &&
ca               2046 drivers/md/bcache/super.c 		ca->sb.bucket_size	== c->sb.bucket_size &&
ca               2047 drivers/md/bcache/super.c 		ca->sb.nr_in_set	== c->sb.nr_in_set;
ca               2050 drivers/md/bcache/super.c static const char *register_cache_set(struct cache *ca)
ca               2057 drivers/md/bcache/super.c 		if (!memcmp(c->sb.set_uuid, ca->sb.set_uuid, 16)) {
ca               2058 drivers/md/bcache/super.c 			if (c->cache[ca->sb.nr_this_dev])
ca               2061 drivers/md/bcache/super.c 			if (!can_attach_cache(ca, c))
ca               2064 drivers/md/bcache/super.c 			if (!CACHE_SYNC(&ca->sb))
ca               2070 drivers/md/bcache/super.c 	c = bch_cache_set_alloc(&ca->sb);
ca               2086 drivers/md/bcache/super.c 	sprintf(buf, "cache%i", ca->sb.nr_this_dev);
ca               2087 drivers/md/bcache/super.c 	if (sysfs_create_link(&ca->kobj, &c->kobj, "set") ||
ca               2088 drivers/md/bcache/super.c 	    sysfs_create_link(&c->kobj, &ca->kobj, buf))
ca               2091 drivers/md/bcache/super.c 	if (ca->sb.seq > c->sb.seq) {
ca               2092 drivers/md/bcache/super.c 		c->sb.version		= ca->sb.version;
ca               2093 drivers/md/bcache/super.c 		memcpy(c->sb.set_uuid, ca->sb.set_uuid, 16);
ca               2094 drivers/md/bcache/super.c 		c->sb.flags             = ca->sb.flags;
ca               2095 drivers/md/bcache/super.c 		c->sb.seq		= ca->sb.seq;
ca               2099 drivers/md/bcache/super.c 	kobject_get(&ca->kobj);
ca               2100 drivers/md/bcache/super.c 	ca->set = c;
ca               2101 drivers/md/bcache/super.c 	ca->set->cache[ca->sb.nr_this_dev] = ca;
ca               2102 drivers/md/bcache/super.c 	c->cache_by_alloc[c->caches_loaded++] = ca;
ca               2121 drivers/md/bcache/super.c 	struct cache *ca = container_of(kobj, struct cache, kobj);
ca               2124 drivers/md/bcache/super.c 	if (ca->set) {
ca               2125 drivers/md/bcache/super.c 		BUG_ON(ca->set->cache[ca->sb.nr_this_dev] != ca);
ca               2126 drivers/md/bcache/super.c 		ca->set->cache[ca->sb.nr_this_dev] = NULL;
ca               2129 drivers/md/bcache/super.c 	free_pages((unsigned long) ca->disk_buckets, ilog2(bucket_pages(ca)));
ca               2130 drivers/md/bcache/super.c 	kfree(ca->prio_buckets);
ca               2131 drivers/md/bcache/super.c 	vfree(ca->buckets);
ca               2133 drivers/md/bcache/super.c 	free_heap(&ca->heap);
ca               2134 drivers/md/bcache/super.c 	free_fifo(&ca->free_inc);
ca               2137 drivers/md/bcache/super.c 		free_fifo(&ca->free[i]);
ca               2139 drivers/md/bcache/super.c 	if (ca->sb_bio.bi_inline_vecs[0].bv_page)
ca               2140 drivers/md/bcache/super.c 		put_page(bio_first_page_all(&ca->sb_bio));
ca               2142 drivers/md/bcache/super.c 	if (!IS_ERR_OR_NULL(ca->bdev))
ca               2143 drivers/md/bcache/super.c 		blkdev_put(ca->bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL);
ca               2145 drivers/md/bcache/super.c 	kfree(ca);
ca               2149 drivers/md/bcache/super.c static int cache_alloc(struct cache *ca)
ca               2158 drivers/md/bcache/super.c 	kobject_init(&ca->kobj, &bch_cache_ktype);
ca               2160 drivers/md/bcache/super.c 	bio_init(&ca->journal.bio, ca->journal.bio.bi_inline_vecs, 8);
ca               2171 drivers/md/bcache/super.c 	btree_buckets = ca->sb.njournal_buckets ?: 8;
ca               2172 drivers/md/bcache/super.c 	free = roundup_pow_of_two(ca->sb.nbuckets) >> 10;
ca               2179 drivers/md/bcache/super.c 	if (!init_fifo(&ca->free[RESERVE_BTREE], btree_buckets,
ca               2185 drivers/md/bcache/super.c 	if (!init_fifo_exact(&ca->free[RESERVE_PRIO], prio_buckets(ca),
ca               2191 drivers/md/bcache/super.c 	if (!init_fifo(&ca->free[RESERVE_MOVINGGC], free, GFP_KERNEL)) {
ca               2196 drivers/md/bcache/super.c 	if (!init_fifo(&ca->free[RESERVE_NONE], free, GFP_KERNEL)) {
ca               2201 drivers/md/bcache/super.c 	if (!init_fifo(&ca->free_inc, free << 2, GFP_KERNEL)) {
ca               2206 drivers/md/bcache/super.c 	if (!init_heap(&ca->heap, free << 3, GFP_KERNEL)) {
ca               2211 drivers/md/bcache/super.c 	ca->buckets = vzalloc(array_size(sizeof(struct bucket),
ca               2212 drivers/md/bcache/super.c 			      ca->sb.nbuckets));
ca               2213 drivers/md/bcache/super.c 	if (!ca->buckets) {
ca               2218 drivers/md/bcache/super.c 	ca->prio_buckets = kzalloc(array3_size(sizeof(uint64_t),
ca               2219 drivers/md/bcache/super.c 				   prio_buckets(ca), 2),
ca               2221 drivers/md/bcache/super.c 	if (!ca->prio_buckets) {
ca               2226 drivers/md/bcache/super.c 	ca->disk_buckets = alloc_bucket_pages(GFP_KERNEL, ca);
ca               2227 drivers/md/bcache/super.c 	if (!ca->disk_buckets) {
ca               2232 drivers/md/bcache/super.c 	ca->prio_last_buckets = ca->prio_buckets + prio_buckets(ca);
ca               2234 drivers/md/bcache/super.c 	for_each_bucket(b, ca)
ca               2239 drivers/md/bcache/super.c 	kfree(ca->prio_buckets);
ca               2241 drivers/md/bcache/super.c 	vfree(ca->buckets);
ca               2243 drivers/md/bcache/super.c 	free_heap(&ca->heap);
ca               2245 drivers/md/bcache/super.c 	free_fifo(&ca->free_inc);
ca               2247 drivers/md/bcache/super.c 	free_fifo(&ca->free[RESERVE_NONE]);
ca               2249 drivers/md/bcache/super.c 	free_fifo(&ca->free[RESERVE_MOVINGGC]);
ca               2251 drivers/md/bcache/super.c 	free_fifo(&ca->free[RESERVE_PRIO]);
ca               2253 drivers/md/bcache/super.c 	free_fifo(&ca->free[RESERVE_BTREE]);
ca               2258 drivers/md/bcache/super.c 		pr_notice("error %s: %s", ca->cache_dev_name, err);
ca               2263 drivers/md/bcache/super.c 				struct block_device *bdev, struct cache *ca)
ca               2268 drivers/md/bcache/super.c 	bdevname(bdev, ca->cache_dev_name);
ca               2269 drivers/md/bcache/super.c 	memcpy(&ca->sb, sb, sizeof(struct cache_sb));
ca               2270 drivers/md/bcache/super.c 	ca->bdev = bdev;
ca               2271 drivers/md/bcache/super.c 	ca->bdev->bd_holder = ca;
ca               2273 drivers/md/bcache/super.c 	bio_init(&ca->sb_bio, ca->sb_bio.bi_inline_vecs, 1);
ca               2274 drivers/md/bcache/super.c 	bio_first_bvec_all(&ca->sb_bio)->bv_page = sb_page;
ca               2278 drivers/md/bcache/super.c 		ca->discard = CACHE_DISCARD(&ca->sb);
ca               2280 drivers/md/bcache/super.c 	ret = cache_alloc(ca);
ca               2298 drivers/md/bcache/super.c 	if (kobject_add(&ca->kobj,
ca               2307 drivers/md/bcache/super.c 	err = register_cache_set(ca);
ca               2315 drivers/md/bcache/super.c 	pr_info("registered cache device %s", ca->cache_dev_name);
ca               2318 drivers/md/bcache/super.c 	kobject_put(&ca->kobj);
ca               2322 drivers/md/bcache/super.c 		pr_notice("error %s: %s", ca->cache_dev_name, err);
ca               2357 drivers/md/bcache/super.c 	struct cache *ca;
ca               2361 drivers/md/bcache/super.c 		for_each_cache(ca, c, i)
ca               2362 drivers/md/bcache/super.c 			if (ca->bdev == bdev)
ca               2449 drivers/md/bcache/super.c 		struct cache *ca = kzalloc(sizeof(*ca), GFP_KERNEL);
ca               2451 drivers/md/bcache/super.c 		if (!ca)
ca               2455 drivers/md/bcache/super.c 		if (register_cache(sb, sb_page, bdev, ca) != 0) {
ca                995 drivers/md/bcache/sysfs.c 	struct cache *ca = container_of(kobj, struct cache, kobj);
ca                997 drivers/md/bcache/sysfs.c 	sysfs_hprint(bucket_size,	bucket_bytes(ca));
ca                998 drivers/md/bcache/sysfs.c 	sysfs_hprint(block_size,	block_bytes(ca));
ca                999 drivers/md/bcache/sysfs.c 	sysfs_print(nbuckets,		ca->sb.nbuckets);
ca               1000 drivers/md/bcache/sysfs.c 	sysfs_print(discard,		ca->discard);
ca               1001 drivers/md/bcache/sysfs.c 	sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
ca               1003 drivers/md/bcache/sysfs.c 		     atomic_long_read(&ca->btree_sectors_written) << 9);
ca               1005 drivers/md/bcache/sysfs.c 		     (atomic_long_read(&ca->meta_sectors_written) +
ca               1006 drivers/md/bcache/sysfs.c 		      atomic_long_read(&ca->btree_sectors_written)) << 9);
ca               1009 drivers/md/bcache/sysfs.c 		    atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
ca               1014 drivers/md/bcache/sysfs.c 					       CACHE_REPLACEMENT(&ca->sb));
ca               1018 drivers/md/bcache/sysfs.c 		size_t n = ca->sb.nbuckets, i;
ca               1026 drivers/md/bcache/sysfs.c 						ca->sb.nbuckets));
ca               1030 drivers/md/bcache/sysfs.c 		mutex_lock(&ca->set->bucket_lock);
ca               1031 drivers/md/bcache/sysfs.c 		for_each_bucket(b, ca) {
ca               1042 drivers/md/bcache/sysfs.c 		for (i = ca->sb.first_bucket; i < n; i++)
ca               1043 drivers/md/bcache/sysfs.c 			p[i] = ca->buckets[i].prio;
ca               1044 drivers/md/bcache/sysfs.c 		mutex_unlock(&ca->set->bucket_lock);
ca               1076 drivers/md/bcache/sysfs.c 				unused * 100 / (size_t) ca->sb.nbuckets,
ca               1077 drivers/md/bcache/sysfs.c 				available * 100 / (size_t) ca->sb.nbuckets,
ca               1078 drivers/md/bcache/sysfs.c 				dirty * 100 / (size_t) ca->sb.nbuckets,
ca               1079 drivers/md/bcache/sysfs.c 				meta * 100 / (size_t) ca->sb.nbuckets, sum,
ca               1080 drivers/md/bcache/sysfs.c 				n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
ca               1098 drivers/md/bcache/sysfs.c 	struct cache *ca = container_of(kobj, struct cache, kobj);
ca               1108 drivers/md/bcache/sysfs.c 		if (blk_queue_discard(bdev_get_queue(ca->bdev)))
ca               1109 drivers/md/bcache/sysfs.c 			ca->discard = v;
ca               1111 drivers/md/bcache/sysfs.c 		if (v != CACHE_DISCARD(&ca->sb)) {
ca               1112 drivers/md/bcache/sysfs.c 			SET_CACHE_DISCARD(&ca->sb, v);
ca               1113 drivers/md/bcache/sysfs.c 			bcache_write_super(ca->set);
ca               1122 drivers/md/bcache/sysfs.c 		if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
ca               1123 drivers/md/bcache/sysfs.c 			mutex_lock(&ca->set->bucket_lock);
ca               1124 drivers/md/bcache/sysfs.c 			SET_CACHE_REPLACEMENT(&ca->sb, v);
ca               1125 drivers/md/bcache/sysfs.c 			mutex_unlock(&ca->set->bucket_lock);
ca               1127 drivers/md/bcache/sysfs.c 			bcache_write_super(ca->set);
ca               1132 drivers/md/bcache/sysfs.c 		atomic_long_set(&ca->sectors_written, 0);
ca               1133 drivers/md/bcache/sysfs.c 		atomic_long_set(&ca->btree_sectors_written, 0);
ca               1134 drivers/md/bcache/sysfs.c 		atomic_long_set(&ca->meta_sectors_written, 0);
ca               1135 drivers/md/bcache/sysfs.c 		atomic_set(&ca->io_count, 0);
ca               1136 drivers/md/bcache/sysfs.c 		atomic_set(&ca->io_errors, 0);
ca               2094 drivers/md/dm-cache-target.c static void destroy_cache_args(struct cache_args *ca)
ca               2096 drivers/md/dm-cache-target.c 	if (ca->metadata_dev)
ca               2097 drivers/md/dm-cache-target.c 		dm_put_device(ca->ti, ca->metadata_dev);
ca               2099 drivers/md/dm-cache-target.c 	if (ca->cache_dev)
ca               2100 drivers/md/dm-cache-target.c 		dm_put_device(ca->ti, ca->cache_dev);
ca               2102 drivers/md/dm-cache-target.c 	if (ca->origin_dev)
ca               2103 drivers/md/dm-cache-target.c 		dm_put_device(ca->ti, ca->origin_dev);
ca               2105 drivers/md/dm-cache-target.c 	kfree(ca);
ca               2118 drivers/md/dm-cache-target.c static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
ca               2128 drivers/md/dm-cache-target.c 	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
ca               2129 drivers/md/dm-cache-target.c 			  &ca->metadata_dev);
ca               2135 drivers/md/dm-cache-target.c 	metadata_dev_size = get_dev_size(ca->metadata_dev);
ca               2138 drivers/md/dm-cache-target.c 		       bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
ca               2143 drivers/md/dm-cache-target.c static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
ca               2151 drivers/md/dm-cache-target.c 	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
ca               2152 drivers/md/dm-cache-target.c 			  &ca->cache_dev);
ca               2157 drivers/md/dm-cache-target.c 	ca->cache_sectors = get_dev_size(ca->cache_dev);
ca               2162 drivers/md/dm-cache-target.c static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
ca               2170 drivers/md/dm-cache-target.c 	r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
ca               2171 drivers/md/dm-cache-target.c 			  &ca->origin_dev);
ca               2177 drivers/md/dm-cache-target.c 	ca->origin_sectors = get_dev_size(ca->origin_dev);
ca               2178 drivers/md/dm-cache-target.c 	if (ca->ti->len > ca->origin_sectors) {
ca               2186 drivers/md/dm-cache-target.c static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
ca               2202 drivers/md/dm-cache-target.c 	if (block_size > ca->cache_sectors) {
ca               2207 drivers/md/dm-cache-target.c 	ca->block_size = block_size;
ca               2220 drivers/md/dm-cache-target.c static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
ca               2230 drivers/md/dm-cache-target.c 	struct cache_features *cf = &ca->features;
ca               2276 drivers/md/dm-cache-target.c static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
ca               2288 drivers/md/dm-cache-target.c 	ca->policy_name = dm_shift_arg(as);
ca               2290 drivers/md/dm-cache-target.c 	r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
ca               2294 drivers/md/dm-cache-target.c 	ca->policy_argv = (const char **)as->argv;
ca               2295 drivers/md/dm-cache-target.c 	dm_consume_args(as, ca->policy_argc);
ca               2300 drivers/md/dm-cache-target.c static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
ca               2309 drivers/md/dm-cache-target.c 	r = parse_metadata_dev(ca, &as, error);
ca               2313 drivers/md/dm-cache-target.c 	r = parse_cache_dev(ca, &as, error);
ca               2317 drivers/md/dm-cache-target.c 	r = parse_origin_dev(ca, &as, error);
ca               2321 drivers/md/dm-cache-target.c 	r = parse_block_size(ca, &as, error);
ca               2325 drivers/md/dm-cache-target.c 	r = parse_features(ca, &as, error);
ca               2329 drivers/md/dm-cache-target.c 	r = parse_policy(ca, &as, error);
ca               2391 drivers/md/dm-cache-target.c static int create_cache_policy(struct cache *cache, struct cache_args *ca,
ca               2394 drivers/md/dm-cache-target.c 	struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
ca               2463 drivers/md/dm-cache-target.c static int cache_create(struct cache_args *ca, struct cache **result)
ca               2466 drivers/md/dm-cache-target.c 	char **error = &ca->ti->error;
ca               2468 drivers/md/dm-cache-target.c 	struct dm_target *ti = ca->ti;
ca               2471 drivers/md/dm-cache-target.c 	bool may_format = ca->features.mode == CM_WRITE;
ca               2477 drivers/md/dm-cache-target.c 	cache->ti = ca->ti;
ca               2487 drivers/md/dm-cache-target.c 	cache->features = ca->features;
ca               2498 drivers/md/dm-cache-target.c 	cache->metadata_dev = ca->metadata_dev;
ca               2499 drivers/md/dm-cache-target.c 	cache->origin_dev = ca->origin_dev;
ca               2500 drivers/md/dm-cache-target.c 	cache->cache_dev = ca->cache_dev;
ca               2502 drivers/md/dm-cache-target.c 	ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
ca               2504 drivers/md/dm-cache-target.c 	origin_blocks = cache->origin_sectors = ca->origin_sectors;
ca               2505 drivers/md/dm-cache-target.c 	origin_blocks = block_div(origin_blocks, ca->block_size);
ca               2508 drivers/md/dm-cache-target.c 	cache->sectors_per_block = ca->block_size;
ca               2514 drivers/md/dm-cache-target.c 	if (ca->block_size & (ca->block_size - 1)) {
ca               2515 drivers/md/dm-cache-target.c 		dm_block_t cache_size = ca->cache_sectors;
ca               2518 drivers/md/dm-cache-target.c 		cache_size = block_div(cache_size, ca->block_size);
ca               2521 drivers/md/dm-cache-target.c 		cache->sectors_per_block_shift = __ffs(ca->block_size);
ca               2522 drivers/md/dm-cache-target.c 		set_cache_size(cache, to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift));
ca               2525 drivers/md/dm-cache-target.c 	r = create_cache_policy(cache, ca, error);
ca               2529 drivers/md/dm-cache-target.c 	cache->policy_nr_args = ca->policy_argc;
ca               2532 drivers/md/dm-cache-target.c 	r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
ca               2539 drivers/md/dm-cache-target.c 				     ca->block_size, may_format,
ca               2541 drivers/md/dm-cache-target.c 				     ca->features.metadata_version);
ca               2689 drivers/md/dm-cache-target.c 	struct cache_args *ca;
ca               2692 drivers/md/dm-cache-target.c 	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
ca               2693 drivers/md/dm-cache-target.c 	if (!ca) {
ca               2697 drivers/md/dm-cache-target.c 	ca->ti = ti;
ca               2699 drivers/md/dm-cache-target.c 	r = parse_cache_args(ca, argc, argv, &ti->error);
ca               2703 drivers/md/dm-cache-target.c 	r = cache_create(ca, &cache);
ca               2715 drivers/md/dm-cache-target.c 	destroy_cache_args(ca);
ca                156 drivers/media/dvb-core/dvb_ca_en50221.c static void dvb_ca_private_free(struct dvb_ca_private *ca)
ca                160 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_free_device(ca->dvbdev);
ca                161 drivers/media/dvb-core/dvb_ca_en50221.c 	for (i = 0; i < ca->slot_count; i++)
ca                162 drivers/media/dvb-core/dvb_ca_en50221.c 		vfree(ca->slot_info[i].rx_buffer.data);
ca                164 drivers/media/dvb-core/dvb_ca_en50221.c 	kfree(ca->slot_info);
ca                165 drivers/media/dvb-core/dvb_ca_en50221.c 	kfree(ca);
ca                170 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca;
ca                172 drivers/media/dvb-core/dvb_ca_en50221.c 	ca = container_of(ref, struct dvb_ca_private, refcount);
ca                173 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_ca_private_free(ca);
ca                176 drivers/media/dvb-core/dvb_ca_en50221.c static void dvb_ca_private_get(struct dvb_ca_private *ca)
ca                178 drivers/media/dvb-core/dvb_ca_en50221.c 	kref_get(&ca->refcount);
ca                181 drivers/media/dvb-core/dvb_ca_en50221.c static void dvb_ca_private_put(struct dvb_ca_private *ca)
ca                183 drivers/media/dvb-core/dvb_ca_en50221.c 	kref_put(&ca->refcount, dvb_ca_private_release);
ca                186 drivers/media/dvb-core/dvb_ca_en50221.c static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca);
ca                187 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
ca                189 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
ca                222 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_check_camstatus(struct dvb_ca_private *ca, int slot)
ca                224 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca                230 drivers/media/dvb-core/dvb_ca_en50221.c 	if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)
ca                234 drivers/media/dvb-core/dvb_ca_en50221.c 	slot_status = ca->pub->poll_slot_status(ca->pub, slot, ca->open);
ca                272 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_wait_if_status(struct dvb_ca_private *ca, int slot,
ca                287 drivers/media/dvb-core/dvb_ca_en50221.c 		res = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
ca                320 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_link_init(struct dvb_ca_private *ca, int slot)
ca                322 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca                339 drivers/media/dvb-core/dvb_ca_en50221.c 	ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
ca                343 drivers/media/dvb-core/dvb_ca_en50221.c 	ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_DA, HZ);
ca                346 drivers/media/dvb-core/dvb_ca_en50221.c 	ret = dvb_ca_en50221_read_data(ca, slot, buf, 2);
ca                349 drivers/media/dvb-core/dvb_ca_en50221.c 	ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN);
ca                366 drivers/media/dvb-core/dvb_ca_en50221.c 	ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
ca                370 drivers/media/dvb-core/dvb_ca_en50221.c 	ret = dvb_ca_en50221_wait_if_status(ca, slot, STATUSREG_FR, HZ / 10);
ca                373 drivers/media/dvb-core/dvb_ca_en50221.c 	ret = dvb_ca_en50221_write_data(ca, slot, buf, 2);
ca                376 drivers/media/dvb-core/dvb_ca_en50221.c 	ret = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN);
ca                396 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_read_tuple(struct dvb_ca_private *ca, int slot,
ca                406 drivers/media/dvb-core/dvb_ca_en50221.c 	_tuple_type = ca->pub->read_attribute_mem(ca->pub, slot, _address);
ca                416 drivers/media/dvb-core/dvb_ca_en50221.c 	_tuple_length = ca->pub->read_attribute_mem(ca->pub, slot,
ca                426 drivers/media/dvb-core/dvb_ca_en50221.c 		tuple[i] = ca->pub->read_attribute_mem(ca->pub, slot,
ca                450 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_parse_attributes(struct dvb_ca_private *ca, int slot)
ca                467 drivers/media/dvb-core/dvb_ca_en50221.c 	status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type,
ca                475 drivers/media/dvb-core/dvb_ca_en50221.c 	status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type,
ca                483 drivers/media/dvb-core/dvb_ca_en50221.c 	status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type,
ca                491 drivers/media/dvb-core/dvb_ca_en50221.c 	status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type,
ca                503 drivers/media/dvb-core/dvb_ca_en50221.c 	status = dvb_ca_en50221_read_tuple(ca, slot, &address, &tuple_type,
ca                516 drivers/media/dvb-core/dvb_ca_en50221.c 	sl = &ca->slot_info[slot];
ca                531 drivers/media/dvb-core/dvb_ca_en50221.c 		       ca->dvbdev->adapter->num, dvb_str[8], dvb_str[9],
ca                538 drivers/media/dvb-core/dvb_ca_en50221.c 		status = dvb_ca_en50221_read_tuple(ca, slot, &address,
ca                595 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_set_configoption(struct dvb_ca_private *ca, int slot)
ca                597 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca                603 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->pub->write_attribute_mem(ca->pub, slot, sl->config_base,
ca                607 drivers/media/dvb-core/dvb_ca_en50221.c 	configoption = ca->pub->read_attribute_mem(ca->pub, slot,
ca                631 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_read_data(struct dvb_ca_private *ca, int slot,
ca                634 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca                659 drivers/media/dvb-core/dvb_ca_en50221.c 	if (ca->pub->read_data &&
ca                662 drivers/media/dvb-core/dvb_ca_en50221.c 			status = ca->pub->read_data(ca->pub, slot, buf,
ca                665 drivers/media/dvb-core/dvb_ca_en50221.c 			status = ca->pub->read_data(ca->pub, slot, buf, ecount);
ca                673 drivers/media/dvb-core/dvb_ca_en50221.c 		status = ca->pub->read_cam_control(ca->pub, slot,
ca                684 drivers/media/dvb-core/dvb_ca_en50221.c 		status = ca->pub->read_cam_control(ca->pub, slot,
ca                689 drivers/media/dvb-core/dvb_ca_en50221.c 		status = ca->pub->read_cam_control(ca->pub, slot,
ca                699 drivers/media/dvb-core/dvb_ca_en50221.c 				       ca->dvbdev->adapter->num, bytes_read,
ca                707 drivers/media/dvb-core/dvb_ca_en50221.c 				       ca->dvbdev->adapter->num);
ca                715 drivers/media/dvb-core/dvb_ca_en50221.c 				       ca->dvbdev->adapter->num);
ca                724 drivers/media/dvb-core/dvb_ca_en50221.c 			status = ca->pub->read_cam_control(ca->pub, slot,
ca                734 drivers/media/dvb-core/dvb_ca_en50221.c 		status = ca->pub->read_cam_control(ca->pub, slot,
ca                764 drivers/media/dvb-core/dvb_ca_en50221.c 		wake_up_interruptible(&ca->wait_queue);
ca                784 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_write_data(struct dvb_ca_private *ca, int slot,
ca                787 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca                797 drivers/media/dvb-core/dvb_ca_en50221.c 	if (ca->pub->write_data &&
ca                799 drivers/media/dvb-core/dvb_ca_en50221.c 		return ca->pub->write_data(ca->pub, slot, buf, bytes_write);
ca                807 drivers/media/dvb-core/dvb_ca_en50221.c 	status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
ca                812 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_wakeup(ca);
ca                819 drivers/media/dvb-core/dvb_ca_en50221.c 	status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND,
ca                825 drivers/media/dvb-core/dvb_ca_en50221.c 	status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
ca                845 drivers/media/dvb-core/dvb_ca_en50221.c 	status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
ca                851 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_wakeup(ca);
ca                858 drivers/media/dvb-core/dvb_ca_en50221.c 	status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_HIGH,
ca                862 drivers/media/dvb-core/dvb_ca_en50221.c 	status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_SIZE_LOW,
ca                869 drivers/media/dvb-core/dvb_ca_en50221.c 		status = ca->pub->write_cam_control(ca->pub, slot, CTRLIF_DATA,
ca                876 drivers/media/dvb-core/dvb_ca_en50221.c 	status = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
ca                890 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->pub->write_cam_control(ca->pub, slot, CTRLIF_COMMAND, IRQEN);
ca                905 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_slot_shutdown(struct dvb_ca_private *ca, int slot)
ca                909 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->pub->slot_shutdown(ca->pub, slot);
ca                910 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE;
ca                916 drivers/media/dvb-core/dvb_ca_en50221.c 	wake_up_interruptible(&ca->wait_queue);
ca                934 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = pubca->private;
ca                935 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca                950 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_ca_en50221_thread_wakeup(ca);
ca                962 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = pubca->private;
ca                963 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca                969 drivers/media/dvb-core/dvb_ca_en50221.c 		dvb_ca_en50221_thread_wakeup(ca);
ca                982 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = pubca->private;
ca                983 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca                990 drivers/media/dvb-core/dvb_ca_en50221.c 		flags = ca->pub->read_cam_control(pubca, slot, CTRLIF_STATUS);
ca                998 drivers/media/dvb-core/dvb_ca_en50221.c 		if (ca->open)
ca                999 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_wakeup(ca);
ca               1013 drivers/media/dvb-core/dvb_ca_en50221.c static void dvb_ca_en50221_thread_wakeup(struct dvb_ca_private *ca)
ca               1017 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->wakeup = 1;
ca               1019 drivers/media/dvb-core/dvb_ca_en50221.c 	wake_up_process(ca->thread);
ca               1027 drivers/media/dvb-core/dvb_ca_en50221.c static void dvb_ca_en50221_thread_update_delay(struct dvb_ca_private *ca)
ca               1037 drivers/media/dvb-core/dvb_ca_en50221.c 	for (slot = 0; slot < ca->slot_count; slot++) {
ca               1038 drivers/media/dvb-core/dvb_ca_en50221.c 		struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca               1044 drivers/media/dvb-core/dvb_ca_en50221.c 			if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE))
ca               1049 drivers/media/dvb-core/dvb_ca_en50221.c 			if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE))
ca               1063 drivers/media/dvb-core/dvb_ca_en50221.c 			if (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE))
ca               1065 drivers/media/dvb-core/dvb_ca_en50221.c 			if (ca->open) {
ca               1067 drivers/media/dvb-core/dvb_ca_en50221.c 				    (!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_DA)))
ca               1077 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->delay = curdelay;
ca               1089 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_poll_cam_gone(struct dvb_ca_private *ca, int slot)
ca               1098 drivers/media/dvb-core/dvb_ca_en50221.c 	if ((!(ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)) &&
ca               1099 drivers/media/dvb-core/dvb_ca_en50221.c 	    (ca->pub->poll_slot_status)) {
ca               1100 drivers/media/dvb-core/dvb_ca_en50221.c 		status = ca->pub->poll_slot_status(ca->pub, slot, 0);
ca               1103 drivers/media/dvb-core/dvb_ca_en50221.c 			ca->slot_info[slot].slot_state = DVB_CA_SLOTSTATE_NONE;
ca               1104 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_update_delay(ca);
ca               1117 drivers/media/dvb-core/dvb_ca_en50221.c static void dvb_ca_en50221_thread_state_machine(struct dvb_ca_private *ca,
ca               1120 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca               1128 drivers/media/dvb-core/dvb_ca_en50221.c 	while (dvb_ca_en50221_check_camstatus(ca, slot)) {
ca               1131 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_slot_shutdown(ca, slot);
ca               1138 drivers/media/dvb-core/dvb_ca_en50221.c 		dvb_ca_en50221_thread_update_delay(ca);
ca               1151 drivers/media/dvb-core/dvb_ca_en50221.c 		ca->pub->slot_reset(ca->pub, slot);
ca               1158 drivers/media/dvb-core/dvb_ca_en50221.c 			       ca->dvbdev->adapter->num);
ca               1160 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_update_delay(ca);
ca               1170 drivers/media/dvb-core/dvb_ca_en50221.c 		if (dvb_ca_en50221_parse_attributes(ca, slot) != 0) {
ca               1171 drivers/media/dvb-core/dvb_ca_en50221.c 			if (dvb_ca_en50221_poll_cam_gone(ca, slot))
ca               1175 drivers/media/dvb-core/dvb_ca_en50221.c 			       ca->dvbdev->adapter->num);
ca               1177 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_update_delay(ca);
ca               1180 drivers/media/dvb-core/dvb_ca_en50221.c 		if (dvb_ca_en50221_set_configoption(ca, slot) != 0) {
ca               1182 drivers/media/dvb-core/dvb_ca_en50221.c 			       ca->dvbdev->adapter->num);
ca               1184 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_update_delay(ca);
ca               1187 drivers/media/dvb-core/dvb_ca_en50221.c 		if (ca->pub->write_cam_control(ca->pub, slot,
ca               1191 drivers/media/dvb-core/dvb_ca_en50221.c 			       ca->dvbdev->adapter->num);
ca               1193 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_update_delay(ca);
ca               1200 drivers/media/dvb-core/dvb_ca_en50221.c 		ca->wakeup = 1;
ca               1206 drivers/media/dvb-core/dvb_ca_en50221.c 			       ca->dvbdev->adapter->num);
ca               1208 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_update_delay(ca);
ca               1212 drivers/media/dvb-core/dvb_ca_en50221.c 		flags = ca->pub->read_cam_control(ca->pub, slot, CTRLIF_STATUS);
ca               1215 drivers/media/dvb-core/dvb_ca_en50221.c 			ca->wakeup = 1;
ca               1220 drivers/media/dvb-core/dvb_ca_en50221.c 		if (dvb_ca_en50221_link_init(ca, slot) != 0) {
ca               1221 drivers/media/dvb-core/dvb_ca_en50221.c 			if (dvb_ca_en50221_poll_cam_gone(ca, slot))
ca               1225 drivers/media/dvb-core/dvb_ca_en50221.c 			       ca->dvbdev->adapter->num);
ca               1227 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_update_delay(ca);
ca               1235 drivers/media/dvb-core/dvb_ca_en50221.c 				       ca->dvbdev->adapter->num);
ca               1237 drivers/media/dvb-core/dvb_ca_en50221.c 				dvb_ca_en50221_thread_update_delay(ca);
ca               1244 drivers/media/dvb-core/dvb_ca_en50221.c 		ca->pub->slot_ts_enable(ca->pub, slot);
ca               1246 drivers/media/dvb-core/dvb_ca_en50221.c 		dvb_ca_en50221_thread_update_delay(ca);
ca               1248 drivers/media/dvb-core/dvb_ca_en50221.c 			ca->dvbdev->adapter->num);
ca               1252 drivers/media/dvb-core/dvb_ca_en50221.c 		if (!ca->open)
ca               1257 drivers/media/dvb-core/dvb_ca_en50221.c 		while (dvb_ca_en50221_read_data(ca, slot, NULL, 0) > 0) {
ca               1258 drivers/media/dvb-core/dvb_ca_en50221.c 			if (!ca->open)
ca               1265 drivers/media/dvb-core/dvb_ca_en50221.c 			if (dvb_ca_en50221_check_camstatus(ca, slot)) {
ca               1270 drivers/media/dvb-core/dvb_ca_en50221.c 				ca->wakeup = 1;
ca               1280 drivers/media/dvb-core/dvb_ca_en50221.c 				ca->wakeup = 1;
ca               1296 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = data;
ca               1302 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_ca_en50221_thread_update_delay(ca);
ca               1307 drivers/media/dvb-core/dvb_ca_en50221.c 		if (!ca->wakeup) {
ca               1309 drivers/media/dvb-core/dvb_ca_en50221.c 			schedule_timeout(ca->delay);
ca               1313 drivers/media/dvb-core/dvb_ca_en50221.c 		ca->wakeup = 0;
ca               1316 drivers/media/dvb-core/dvb_ca_en50221.c 		for (slot = 0; slot < ca->slot_count; slot++)
ca               1317 drivers/media/dvb-core/dvb_ca_en50221.c 			dvb_ca_en50221_thread_state_machine(ca, slot);
ca               1340 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = dvbdev->priv;
ca               1346 drivers/media/dvb-core/dvb_ca_en50221.c 	if (mutex_lock_interruptible(&ca->ioctl_mutex))
ca               1351 drivers/media/dvb-core/dvb_ca_en50221.c 		for (slot = 0; slot < ca->slot_count; slot++) {
ca               1352 drivers/media/dvb-core/dvb_ca_en50221.c 			struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca               1356 drivers/media/dvb-core/dvb_ca_en50221.c 				dvb_ca_en50221_slot_shutdown(ca, slot);
ca               1357 drivers/media/dvb-core/dvb_ca_en50221.c 				if (ca->flags & DVB_CA_EN50221_FLAG_IRQ_CAMCHANGE)
ca               1358 drivers/media/dvb-core/dvb_ca_en50221.c 					dvb_ca_en50221_camchange_irq(ca->pub,
ca               1364 drivers/media/dvb-core/dvb_ca_en50221.c 		ca->next_read_slot = 0;
ca               1365 drivers/media/dvb-core/dvb_ca_en50221.c 		dvb_ca_en50221_thread_wakeup(ca);
ca               1371 drivers/media/dvb-core/dvb_ca_en50221.c 		caps->slot_num = ca->slot_count;
ca               1383 drivers/media/dvb-core/dvb_ca_en50221.c 		if ((slot >= ca->slot_count) || (slot < 0)) {
ca               1390 drivers/media/dvb-core/dvb_ca_en50221.c 		sl = &ca->slot_info[slot];
ca               1406 drivers/media/dvb-core/dvb_ca_en50221.c 	mutex_unlock(&ca->ioctl_mutex);
ca               1440 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = dvbdev->priv;
ca               1467 drivers/media/dvb-core/dvb_ca_en50221.c 	if (slot >= ca->slot_count)
ca               1469 drivers/media/dvb-core/dvb_ca_en50221.c 	slot = array_index_nospec(slot, ca->slot_count);
ca               1470 drivers/media/dvb-core/dvb_ca_en50221.c 	sl = &ca->slot_info[slot];
ca               1507 drivers/media/dvb-core/dvb_ca_en50221.c 			status = dvb_ca_en50221_write_data(ca, slot, fragbuf,
ca               1535 drivers/media/dvb-core/dvb_ca_en50221.c static int dvb_ca_en50221_io_read_condition(struct dvb_ca_private *ca,
ca               1546 drivers/media/dvb-core/dvb_ca_en50221.c 	slot = ca->next_read_slot;
ca               1547 drivers/media/dvb-core/dvb_ca_en50221.c 	while ((slot_count < ca->slot_count) && (!found)) {
ca               1548 drivers/media/dvb-core/dvb_ca_en50221.c 		struct dvb_ca_slot *sl = &ca->slot_info[slot];
ca               1573 drivers/media/dvb-core/dvb_ca_en50221.c 		slot = (slot + 1) % ca->slot_count;
ca               1577 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->next_read_slot = slot;
ca               1595 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = dvbdev->priv;
ca               1618 drivers/media/dvb-core/dvb_ca_en50221.c 	status = dvb_ca_en50221_io_read_condition(ca, &result, &slot);
ca               1625 drivers/media/dvb-core/dvb_ca_en50221.c 		status = wait_event_interruptible(ca->wait_queue,
ca               1627 drivers/media/dvb-core/dvb_ca_en50221.c 						  (ca, &result, &slot));
ca               1635 drivers/media/dvb-core/dvb_ca_en50221.c 	sl = &ca->slot_info[slot];
ca               1641 drivers/media/dvb-core/dvb_ca_en50221.c 			       ca->dvbdev->adapter->num);
ca               1703 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = dvbdev->priv;
ca               1709 drivers/media/dvb-core/dvb_ca_en50221.c 	if (!try_module_get(ca->pub->owner))
ca               1714 drivers/media/dvb-core/dvb_ca_en50221.c 		module_put(ca->pub->owner);
ca               1718 drivers/media/dvb-core/dvb_ca_en50221.c 	for (i = 0; i < ca->slot_count; i++) {
ca               1719 drivers/media/dvb-core/dvb_ca_en50221.c 		struct dvb_ca_slot *sl = &ca->slot_info[i];
ca               1733 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->open = 1;
ca               1734 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_ca_en50221_thread_update_delay(ca);
ca               1735 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_ca_en50221_thread_wakeup(ca);
ca               1737 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_ca_private_get(ca);
ca               1753 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = dvbdev->priv;
ca               1759 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->open = 0;
ca               1760 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_ca_en50221_thread_update_delay(ca);
ca               1764 drivers/media/dvb-core/dvb_ca_en50221.c 	module_put(ca->pub->owner);
ca               1766 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_ca_private_put(ca);
ca               1782 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = dvbdev->priv;
ca               1789 drivers/media/dvb-core/dvb_ca_en50221.c 	poll_wait(file, &ca->wait_queue, wait);
ca               1791 drivers/media/dvb-core/dvb_ca_en50221.c 	if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1)
ca               1798 drivers/media/dvb-core/dvb_ca_en50221.c 	if (dvb_ca_en50221_io_read_condition(ca, &result, &slot) == 1)
ca               1843 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = NULL;
ca               1852 drivers/media/dvb-core/dvb_ca_en50221.c 	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
ca               1853 drivers/media/dvb-core/dvb_ca_en50221.c 	if (!ca) {
ca               1857 drivers/media/dvb-core/dvb_ca_en50221.c 	kref_init(&ca->refcount);
ca               1858 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->pub = pubca;
ca               1859 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->flags = flags;
ca               1860 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->slot_count = slot_count;
ca               1861 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->slot_info = kcalloc(slot_count, sizeof(struct dvb_ca_slot),
ca               1863 drivers/media/dvb-core/dvb_ca_en50221.c 	if (!ca->slot_info) {
ca               1867 drivers/media/dvb-core/dvb_ca_en50221.c 	init_waitqueue_head(&ca->wait_queue);
ca               1868 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->open = 0;
ca               1869 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->wakeup = 0;
ca               1870 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->next_read_slot = 0;
ca               1871 drivers/media/dvb-core/dvb_ca_en50221.c 	pubca->private = ca;
ca               1874 drivers/media/dvb-core/dvb_ca_en50221.c 	ret = dvb_register_device(dvb_adapter, &ca->dvbdev, &dvbdev_ca, ca,
ca               1881 drivers/media/dvb-core/dvb_ca_en50221.c 		struct dvb_ca_slot *sl = &ca->slot_info[i];
ca               1890 drivers/media/dvb-core/dvb_ca_en50221.c 	mutex_init(&ca->ioctl_mutex);
ca               1899 drivers/media/dvb-core/dvb_ca_en50221.c 	ca->thread = kthread_run(dvb_ca_en50221_thread, ca, "kdvb-ca-%i:%i",
ca               1900 drivers/media/dvb-core/dvb_ca_en50221.c 				 ca->dvbdev->adapter->num, ca->dvbdev->id);
ca               1901 drivers/media/dvb-core/dvb_ca_en50221.c 	if (IS_ERR(ca->thread)) {
ca               1902 drivers/media/dvb-core/dvb_ca_en50221.c 		ret = PTR_ERR(ca->thread);
ca               1910 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_unregister_device(ca->dvbdev);
ca               1912 drivers/media/dvb-core/dvb_ca_en50221.c 	kfree(ca->slot_info);
ca               1914 drivers/media/dvb-core/dvb_ca_en50221.c 	kfree(ca);
ca               1928 drivers/media/dvb-core/dvb_ca_en50221.c 	struct dvb_ca_private *ca = pubca->private;
ca               1934 drivers/media/dvb-core/dvb_ca_en50221.c 	kthread_stop(ca->thread);
ca               1936 drivers/media/dvb-core/dvb_ca_en50221.c 	for (i = 0; i < ca->slot_count; i++)
ca               1937 drivers/media/dvb-core/dvb_ca_en50221.c 		dvb_ca_en50221_slot_shutdown(ca, i);
ca               1939 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_remove_device(ca->dvbdev);
ca               1940 drivers/media/dvb-core/dvb_ca_en50221.c 	dvb_ca_private_put(ca);
ca                618 drivers/media/dvb-core/dvbdev.c 	struct media_entity *demux = NULL, *ca = NULL;
ca                644 drivers/media/dvb-core/dvbdev.c 			ca = entity;
ca                734 drivers/media/dvb-core/dvbdev.c 	if (demux && ca) {
ca                735 drivers/media/dvb-core/dvbdev.c 		ret = media_create_pad_link(demux, 1, ca,
ca                767 drivers/media/dvb-core/dvbdev.c 		if (intf->type == MEDIA_INTF_T_DVB_CA && ca) {
ca                768 drivers/media/dvb-core/dvbdev.c 			link = media_create_intf_link(ca, intf,
ca                 33 drivers/media/dvb-frontends/cxd2099.c static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount);
ca                385 drivers/media/dvb-frontends/cxd2099.c static int read_attribute_mem(struct dvb_ca_en50221 *ca,
ca                388 drivers/media/dvb-frontends/cxd2099.c 	struct cxd *ci = ca->data;
ca                398 drivers/media/dvb-frontends/cxd2099.c static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot,
ca                401 drivers/media/dvb-frontends/cxd2099.c 	struct cxd *ci = ca->data;
ca                410 drivers/media/dvb-frontends/cxd2099.c static int read_cam_control(struct dvb_ca_en50221 *ca,
ca                413 drivers/media/dvb-frontends/cxd2099.c 	struct cxd *ci = ca->data;
ca                423 drivers/media/dvb-frontends/cxd2099.c static int write_cam_control(struct dvb_ca_en50221 *ca, int slot,
ca                426 drivers/media/dvb-frontends/cxd2099.c 	struct cxd *ci = ca->data;
ca                435 drivers/media/dvb-frontends/cxd2099.c static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
ca                437 drivers/media/dvb-frontends/cxd2099.c 	struct cxd *ci = ca->data;
ca                440 drivers/media/dvb-frontends/cxd2099.c 		read_data(ca, slot, ci->rbuf, 0);
ca                464 drivers/media/dvb-frontends/cxd2099.c static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
ca                466 drivers/media/dvb-frontends/cxd2099.c 	struct cxd *ci = ca->data;
ca                470 drivers/media/dvb-frontends/cxd2099.c 		read_data(ca, slot, ci->rbuf, 0);
ca                486 drivers/media/dvb-frontends/cxd2099.c static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
ca                488 drivers/media/dvb-frontends/cxd2099.c 	struct cxd *ci = ca->data;
ca                540 drivers/media/dvb-frontends/cxd2099.c static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
ca                542 drivers/media/dvb-frontends/cxd2099.c 	struct cxd *ci = ca->data;
ca                553 drivers/media/dvb-frontends/cxd2099.c static int read_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
ca                555 drivers/media/dvb-frontends/cxd2099.c 	struct cxd *ci = ca->data;
ca                582 drivers/media/dvb-frontends/cxd2099.c static int write_data(struct dvb_ca_en50221 *ca, int slot, u8 *ebuf, int ecount)
ca                584 drivers/media/dvb-frontends/cxd2099.c 	struct cxd *ci = ca->data;
ca                306 drivers/media/dvb-frontends/sp2.c 	s->ca.owner = THIS_MODULE;
ca                307 drivers/media/dvb-frontends/sp2.c 	s->ca.read_attribute_mem = sp2_ci_read_attribute_mem;
ca                308 drivers/media/dvb-frontends/sp2.c 	s->ca.write_attribute_mem = sp2_ci_write_attribute_mem;
ca                309 drivers/media/dvb-frontends/sp2.c 	s->ca.read_cam_control = sp2_ci_read_cam_control;
ca                310 drivers/media/dvb-frontends/sp2.c 	s->ca.write_cam_control = sp2_ci_write_cam_control;
ca                311 drivers/media/dvb-frontends/sp2.c 	s->ca.slot_reset = sp2_ci_slot_reset;
ca                312 drivers/media/dvb-frontends/sp2.c 	s->ca.slot_shutdown = sp2_ci_slot_shutdown;
ca                313 drivers/media/dvb-frontends/sp2.c 	s->ca.slot_ts_enable = sp2_ci_slot_ts_enable;
ca                314 drivers/media/dvb-frontends/sp2.c 	s->ca.poll_slot_status = sp2_ci_poll_slot_status;
ca                315 drivers/media/dvb-frontends/sp2.c 	s->ca.data = s;
ca                334 drivers/media/dvb-frontends/sp2.c 	ret = dvb_ca_en50221_init(s->dvb_adap, &s->ca, 0, 1);
ca                358 drivers/media/dvb-frontends/sp2.c 	if (!s->ca.data)
ca                361 drivers/media/dvb-frontends/sp2.c 	dvb_ca_en50221_release(&s->ca);
ca                 19 drivers/media/dvb-frontends/sp2_priv.h 	struct dvb_ca_en50221 ca;
ca                112 drivers/media/pci/cx23885/altera-ci.c 	struct dvb_ca_en50221 ca;
ca                504 drivers/media/pci/cx23885/altera-ci.c 			if (state->ca.data != NULL)
ca                505 drivers/media/pci/cx23885/altera-ci.c 				dvb_ca_en50221_release(&state->ca);
ca                748 drivers/media/pci/cx23885/altera-ci.c 	state->ca.owner = THIS_MODULE;
ca                749 drivers/media/pci/cx23885/altera-ci.c 	state->ca.read_attribute_mem = altera_ci_read_attribute_mem;
ca                750 drivers/media/pci/cx23885/altera-ci.c 	state->ca.write_attribute_mem = altera_ci_write_attribute_mem;
ca                751 drivers/media/pci/cx23885/altera-ci.c 	state->ca.read_cam_control = altera_ci_read_cam_ctl;
ca                752 drivers/media/pci/cx23885/altera-ci.c 	state->ca.write_cam_control = altera_ci_write_cam_ctl;
ca                753 drivers/media/pci/cx23885/altera-ci.c 	state->ca.slot_reset = altera_ci_slot_reset;
ca                754 drivers/media/pci/cx23885/altera-ci.c 	state->ca.slot_shutdown = altera_ci_slot_shutdown;
ca                755 drivers/media/pci/cx23885/altera-ci.c 	state->ca.slot_ts_enable = altera_ci_slot_ts_ctl;
ca                756 drivers/media/pci/cx23885/altera-ci.c 	state->ca.poll_slot_status = altera_poll_ci_slot_status;
ca                757 drivers/media/pci/cx23885/altera-ci.c 	state->ca.data = state;
ca                760 drivers/media/pci/cx23885/altera-ci.c 				   &state->ca,
ca                 69 drivers/media/pci/cx23885/cimax2.c 	struct dvb_ca_en50221 ca;
ca                285 drivers/media/pci/cx23885/cimax2.c 	dvb_ca_en50221_camready_irq(&state->ca, 0);
ca                343 drivers/media/pci/cx23885/cimax2.c 	dvb_ca_en50221_frda_irq(&state->ca, 0);
ca                473 drivers/media/pci/cx23885/cimax2.c 	state->ca.owner = THIS_MODULE;
ca                474 drivers/media/pci/cx23885/cimax2.c 	state->ca.read_attribute_mem = netup_ci_read_attribute_mem;
ca                475 drivers/media/pci/cx23885/cimax2.c 	state->ca.write_attribute_mem = netup_ci_write_attribute_mem;
ca                476 drivers/media/pci/cx23885/cimax2.c 	state->ca.read_cam_control = netup_ci_read_cam_ctl;
ca                477 drivers/media/pci/cx23885/cimax2.c 	state->ca.write_cam_control = netup_ci_write_cam_ctl;
ca                478 drivers/media/pci/cx23885/cimax2.c 	state->ca.slot_reset = netup_ci_slot_reset;
ca                479 drivers/media/pci/cx23885/cimax2.c 	state->ca.slot_shutdown = netup_ci_slot_shutdown;
ca                480 drivers/media/pci/cx23885/cimax2.c 	state->ca.slot_ts_enable = netup_ci_slot_ts_ctl;
ca                481 drivers/media/pci/cx23885/cimax2.c 	state->ca.poll_slot_status = netup_poll_ci_slot_status;
ca                482 drivers/media/pci/cx23885/cimax2.c 	state->ca.data = state;
ca                499 drivers/media/pci/cx23885/cimax2.c 				   &state->ca,
ca                528 drivers/media/pci/cx23885/cimax2.c 	if (NULL == state->ca.data)
ca                531 drivers/media/pci/cx23885/cimax2.c 	dvb_ca_en50221_release(&state->ca);
ca                 45 drivers/media/pci/ddbridge/ddbridge-ci.c static int read_attribute_mem(struct dvb_ca_en50221 *ca,
ca                 48 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                 60 drivers/media/pci/ddbridge/ddbridge-ci.c static int write_attribute_mem(struct dvb_ca_en50221 *ca, int slot,
ca                 63 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                 71 drivers/media/pci/ddbridge/ddbridge-ci.c static int read_cam_control(struct dvb_ca_en50221 *ca,
ca                 75 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                 92 drivers/media/pci/ddbridge/ddbridge-ci.c static int write_cam_control(struct dvb_ca_en50221 *ca, int slot,
ca                 95 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                103 drivers/media/pci/ddbridge/ddbridge-ci.c static int slot_reset(struct dvb_ca_en50221 *ca, int slot)
ca                105 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                120 drivers/media/pci/ddbridge/ddbridge-ci.c static int slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
ca                122 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                129 drivers/media/pci/ddbridge/ddbridge-ci.c static int slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
ca                131 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                139 drivers/media/pci/ddbridge/ddbridge-ci.c static int poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
ca                141 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                189 drivers/media/pci/ddbridge/ddbridge-ci.c static int read_attribute_mem_xo2(struct dvb_ca_en50221 *ca,
ca                192 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                202 drivers/media/pci/ddbridge/ddbridge-ci.c static int write_attribute_mem_xo2(struct dvb_ca_en50221 *ca, int slot,
ca                205 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                212 drivers/media/pci/ddbridge/ddbridge-ci.c static int read_cam_control_xo2(struct dvb_ca_en50221 *ca,
ca                215 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                225 drivers/media/pci/ddbridge/ddbridge-ci.c static int write_cam_control_xo2(struct dvb_ca_en50221 *ca, int slot,
ca                228 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                235 drivers/media/pci/ddbridge/ddbridge-ci.c static int slot_reset_xo2(struct dvb_ca_en50221 *ca, int slot)
ca                237 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                249 drivers/media/pci/ddbridge/ddbridge-ci.c static int slot_shutdown_xo2(struct dvb_ca_en50221 *ca, int slot)
ca                251 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                259 drivers/media/pci/ddbridge/ddbridge-ci.c static int slot_ts_enable_xo2(struct dvb_ca_en50221 *ca, int slot)
ca                261 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                268 drivers/media/pci/ddbridge/ddbridge-ci.c static int poll_slot_status_xo2(struct dvb_ca_en50221 *ca, int slot, int open)
ca                270 drivers/media/pci/ddbridge/ddbridge-ci.c 	struct ddb_ci *ci = ca->data;
ca                 60 drivers/media/pci/mantis/hopper_cards.c 	struct mantis_ca *ca;
ca                 67 drivers/media/pci/mantis/hopper_cards.c 	ca = mantis->mantis_ca;
ca                 92 drivers/media/pci/mantis/hopper_cards.c 		wake_up(&ca->hif_write_wq);
ca                 93 drivers/media/pci/mantis/hopper_cards.c 		schedule_work(&ca->hif_evm_work);
ca                 30 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_ca *ca = en50221->data;
ca                 31 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 38 drivers/media/pci/mantis/mantis_ca.c 	return mantis_hif_read_mem(ca, addr);
ca                 43 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_ca *ca = en50221->data;
ca                 44 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 51 drivers/media/pci/mantis/mantis_ca.c 	return mantis_hif_write_mem(ca, addr, data);
ca                 56 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_ca *ca = en50221->data;
ca                 57 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 64 drivers/media/pci/mantis/mantis_ca.c 	return mantis_hif_read_iom(ca, addr);
ca                 69 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_ca *ca = en50221->data;
ca                 70 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 77 drivers/media/pci/mantis/mantis_ca.c 	return mantis_hif_write_iom(ca, addr, data);
ca                 82 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_ca *ca = en50221->data;
ca                 83 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 91 drivers/media/pci/mantis/mantis_ca.c 	dvb_ca_en50221_camready_irq(&ca->en50221, 0);
ca                 98 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_ca *ca = en50221->data;
ca                 99 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                108 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_ca *ca = en50221->data;
ca                109 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                119 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_ca *ca = en50221->data;
ca                120 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                124 drivers/media/pci/mantis/mantis_ca.c 	if (ca->slot_state == MODULE_INSERTED) {
ca                137 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_ca *ca;
ca                141 drivers/media/pci/mantis/mantis_ca.c 	ca = kzalloc(sizeof(struct mantis_ca), GFP_KERNEL);
ca                142 drivers/media/pci/mantis/mantis_ca.c 	if (!ca) {
ca                148 drivers/media/pci/mantis/mantis_ca.c 	ca->ca_priv		= mantis;
ca                149 drivers/media/pci/mantis/mantis_ca.c 	mantis->mantis_ca	= ca;
ca                152 drivers/media/pci/mantis/mantis_ca.c 	ca->en50221.owner		= THIS_MODULE;
ca                153 drivers/media/pci/mantis/mantis_ca.c 	ca->en50221.read_attribute_mem	= mantis_ca_read_attr_mem;
ca                154 drivers/media/pci/mantis/mantis_ca.c 	ca->en50221.write_attribute_mem	= mantis_ca_write_attr_mem;
ca                155 drivers/media/pci/mantis/mantis_ca.c 	ca->en50221.read_cam_control	= mantis_ca_read_cam_ctl;
ca                156 drivers/media/pci/mantis/mantis_ca.c 	ca->en50221.write_cam_control	= mantis_ca_write_cam_ctl;
ca                157 drivers/media/pci/mantis/mantis_ca.c 	ca->en50221.slot_reset		= mantis_ca_slot_reset;
ca                158 drivers/media/pci/mantis/mantis_ca.c 	ca->en50221.slot_shutdown	= mantis_ca_slot_shutdown;
ca                159 drivers/media/pci/mantis/mantis_ca.c 	ca->en50221.slot_ts_enable	= mantis_ts_control;
ca                160 drivers/media/pci/mantis/mantis_ca.c 	ca->en50221.poll_slot_status	= mantis_slot_status;
ca                161 drivers/media/pci/mantis/mantis_ca.c 	ca->en50221.data		= ca;
ca                163 drivers/media/pci/mantis/mantis_ca.c 	mutex_init(&ca->ca_lock);
ca                165 drivers/media/pci/mantis/mantis_ca.c 	init_waitqueue_head(&ca->hif_data_wq);
ca                166 drivers/media/pci/mantis/mantis_ca.c 	init_waitqueue_head(&ca->hif_opdone_wq);
ca                167 drivers/media/pci/mantis/mantis_ca.c 	init_waitqueue_head(&ca->hif_write_wq);
ca                170 drivers/media/pci/mantis/mantis_ca.c 	result = dvb_ca_en50221_init(dvb_adapter, &ca->en50221, ca_flags, 1);
ca                176 drivers/media/pci/mantis/mantis_ca.c 	mantis_evmgr_init(ca);
ca                179 drivers/media/pci/mantis/mantis_ca.c 	kfree(ca);
ca                186 drivers/media/pci/mantis/mantis_ca.c 	struct mantis_ca *ca = mantis->mantis_ca;
ca                189 drivers/media/pci/mantis/mantis_ca.c 	if (!ca)
ca                192 drivers/media/pci/mantis/mantis_ca.c 	mantis_evmgr_exit(ca);
ca                194 drivers/media/pci/mantis/mantis_ca.c 	dvb_ca_en50221_release(&ca->en50221);
ca                196 drivers/media/pci/mantis/mantis_ca.c 	kfree(ca);
ca                 69 drivers/media/pci/mantis/mantis_cards.c 	struct mantis_ca *ca;
ca                 76 drivers/media/pci/mantis/mantis_cards.c 	ca = mantis->mantis_ca;
ca                101 drivers/media/pci/mantis/mantis_cards.c 		wake_up(&ca->hif_write_wq);
ca                102 drivers/media/pci/mantis/mantis_cards.c 		schedule_work(&ca->hif_evm_work);
ca                 29 drivers/media/pci/mantis/mantis_evm.c 	struct mantis_ca *ca = container_of(work, struct mantis_ca, hif_evm_work);
ca                 30 drivers/media/pci/mantis/mantis_evm.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 40 drivers/media/pci/mantis/mantis_evm.c 			mantis_event_cam_plugin(ca);
ca                 41 drivers/media/pci/mantis/mantis_evm.c 			dvb_ca_en50221_camchange_irq(&ca->en50221,
ca                 49 drivers/media/pci/mantis/mantis_evm.c 			mantis_event_cam_unplug(ca);
ca                 50 drivers/media/pci/mantis/mantis_evm.c 			dvb_ca_en50221_camchange_irq(&ca->en50221,
ca                 79 drivers/media/pci/mantis/mantis_evm.c 		ca->sbuf_status = MANTIS_SBUF_DATA_AVAIL;
ca                 80 drivers/media/pci/mantis/mantis_evm.c 		ca->hif_event = MANTIS_SBUF_OPDONE;
ca                 81 drivers/media/pci/mantis/mantis_evm.c 		wake_up(&ca->hif_opdone_wq);
ca                 85 drivers/media/pci/mantis/mantis_evm.c int mantis_evmgr_init(struct mantis_ca *ca)
ca                 87 drivers/media/pci/mantis/mantis_evm.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 90 drivers/media/pci/mantis/mantis_evm.c 	INIT_WORK(&ca->hif_evm_work, mantis_hifevm_work);
ca                 91 drivers/media/pci/mantis/mantis_evm.c 	mantis_pcmcia_init(ca);
ca                 92 drivers/media/pci/mantis/mantis_evm.c 	schedule_work(&ca->hif_evm_work);
ca                 93 drivers/media/pci/mantis/mantis_evm.c 	mantis_hif_init(ca);
ca                 97 drivers/media/pci/mantis/mantis_evm.c void mantis_evmgr_exit(struct mantis_ca *ca)
ca                 99 drivers/media/pci/mantis/mantis_evm.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                102 drivers/media/pci/mantis/mantis_evm.c 	flush_work(&ca->hif_evm_work);
ca                103 drivers/media/pci/mantis/mantis_evm.c 	mantis_hif_exit(ca);
ca                104 drivers/media/pci/mantis/mantis_evm.c 	mantis_pcmcia_exit(ca);
ca                 30 drivers/media/pci/mantis/mantis_hif.c static int mantis_hif_sbuf_opdone_wait(struct mantis_ca *ca)
ca                 32 drivers/media/pci/mantis/mantis_hif.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 35 drivers/media/pci/mantis/mantis_hif.c 	if (wait_event_timeout(ca->hif_opdone_wq,
ca                 36 drivers/media/pci/mantis/mantis_hif.c 			       ca->hif_event & MANTIS_SBUF_OPDONE,
ca                 43 drivers/media/pci/mantis/mantis_hif.c 	ca->hif_event &= ~MANTIS_SBUF_OPDONE;
ca                 47 drivers/media/pci/mantis/mantis_hif.c static int mantis_hif_write_wait(struct mantis_ca *ca)
ca                 49 drivers/media/pci/mantis/mantis_hif.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 53 drivers/media/pci/mantis/mantis_hif.c 	if (wait_event_timeout(ca->hif_write_wq,
ca                 77 drivers/media/pci/mantis/mantis_hif.c int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr)
ca                 79 drivers/media/pci/mantis/mantis_hif.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 83 drivers/media/pci/mantis/mantis_hif.c 	mutex_lock(&ca->ca_lock);
ca                 94 drivers/media/pci/mantis/mantis_hif.c 	if (mantis_hif_sbuf_opdone_wait(ca) != 0) {
ca                 96 drivers/media/pci/mantis/mantis_hif.c 		mutex_unlock(&ca->ca_lock);
ca                100 drivers/media/pci/mantis/mantis_hif.c 	mutex_unlock(&ca->ca_lock);
ca                105 drivers/media/pci/mantis/mantis_hif.c int mantis_hif_write_mem(struct mantis_ca *ca, u32 addr, u8 data)
ca                107 drivers/media/pci/mantis/mantis_hif.c 	struct mantis_slot *slot = ca->slot;
ca                108 drivers/media/pci/mantis/mantis_hif.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                112 drivers/media/pci/mantis/mantis_hif.c 	mutex_lock(&ca->ca_lock);
ca                123 drivers/media/pci/mantis/mantis_hif.c 	if (mantis_hif_write_wait(ca) != 0) {
ca                125 drivers/media/pci/mantis/mantis_hif.c 		mutex_unlock(&ca->ca_lock);
ca                129 drivers/media/pci/mantis/mantis_hif.c 	mutex_unlock(&ca->ca_lock);
ca                134 drivers/media/pci/mantis/mantis_hif.c int mantis_hif_read_iom(struct mantis_ca *ca, u32 addr)
ca                136 drivers/media/pci/mantis/mantis_hif.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                140 drivers/media/pci/mantis/mantis_hif.c 	mutex_lock(&ca->ca_lock);
ca                151 drivers/media/pci/mantis/mantis_hif.c 	if (mantis_hif_sbuf_opdone_wait(ca) != 0) {
ca                153 drivers/media/pci/mantis/mantis_hif.c 		mutex_unlock(&ca->ca_lock);
ca                159 drivers/media/pci/mantis/mantis_hif.c 	mutex_unlock(&ca->ca_lock);
ca                164 drivers/media/pci/mantis/mantis_hif.c int mantis_hif_write_iom(struct mantis_ca *ca, u32 addr, u8 data)
ca                166 drivers/media/pci/mantis/mantis_hif.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                170 drivers/media/pci/mantis/mantis_hif.c 	mutex_lock(&ca->ca_lock);
ca                180 drivers/media/pci/mantis/mantis_hif.c 	if (mantis_hif_write_wait(ca) != 0) {
ca                182 drivers/media/pci/mantis/mantis_hif.c 		mutex_unlock(&ca->ca_lock);
ca                186 drivers/media/pci/mantis/mantis_hif.c 	mutex_unlock(&ca->ca_lock);
ca                192 drivers/media/pci/mantis/mantis_hif.c int mantis_hif_init(struct mantis_ca *ca)
ca                194 drivers/media/pci/mantis/mantis_hif.c 	struct mantis_slot *slot = ca->slot;
ca                195 drivers/media/pci/mantis/mantis_hif.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                201 drivers/media/pci/mantis/mantis_hif.c 	mutex_lock(&ca->ca_lock);
ca                211 drivers/media/pci/mantis/mantis_hif.c 	mutex_unlock(&ca->ca_lock);
ca                216 drivers/media/pci/mantis/mantis_hif.c void mantis_hif_exit(struct mantis_ca *ca)
ca                218 drivers/media/pci/mantis/mantis_hif.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                222 drivers/media/pci/mantis/mantis_hif.c 	mutex_lock(&ca->ca_lock);
ca                226 drivers/media/pci/mantis/mantis_hif.c 	mutex_unlock(&ca->ca_lock);
ca                 56 drivers/media/pci/mantis/mantis_link.h extern void mantis_event_cam_plugin(struct mantis_ca *ca);
ca                 57 drivers/media/pci/mantis/mantis_link.h extern void mantis_event_cam_unplug(struct mantis_ca *ca);
ca                 58 drivers/media/pci/mantis/mantis_link.h extern int mantis_pcmcia_init(struct mantis_ca *ca);
ca                 59 drivers/media/pci/mantis/mantis_link.h extern void mantis_pcmcia_exit(struct mantis_ca *ca);
ca                 60 drivers/media/pci/mantis/mantis_link.h extern int mantis_evmgr_init(struct mantis_ca *ca);
ca                 61 drivers/media/pci/mantis/mantis_link.h extern void mantis_evmgr_exit(struct mantis_ca *ca);
ca                 64 drivers/media/pci/mantis/mantis_link.h extern int mantis_hif_init(struct mantis_ca *ca);
ca                 65 drivers/media/pci/mantis/mantis_link.h extern void mantis_hif_exit(struct mantis_ca *ca);
ca                 66 drivers/media/pci/mantis/mantis_link.h extern int mantis_hif_read_mem(struct mantis_ca *ca, u32 addr);
ca                 67 drivers/media/pci/mantis/mantis_link.h extern int mantis_hif_write_mem(struct mantis_ca *ca, u32 addr, u8 data);
ca                 68 drivers/media/pci/mantis/mantis_link.h extern int mantis_hif_read_iom(struct mantis_ca *ca, u32 addr);
ca                 69 drivers/media/pci/mantis/mantis_link.h extern int mantis_hif_write_iom(struct mantis_ca *ca, u32 addr, u8 data);
ca                 30 drivers/media/pci/mantis/mantis_pcmcia.c void mantis_event_cam_plugin(struct mantis_ca *ca)
ca                 32 drivers/media/pci/mantis/mantis_pcmcia.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 36 drivers/media/pci/mantis/mantis_pcmcia.c 	if (ca->slot_state == MODULE_XTRACTED) {
ca                 45 drivers/media/pci/mantis/mantis_pcmcia.c 		ca->slot_state = MODULE_INSERTED;
ca                 54 drivers/media/pci/mantis/mantis_pcmcia.c void mantis_event_cam_unplug(struct mantis_ca *ca)
ca                 56 drivers/media/pci/mantis/mantis_pcmcia.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 60 drivers/media/pci/mantis/mantis_pcmcia.c 	if (ca->slot_state == MODULE_INSERTED) {
ca                 69 drivers/media/pci/mantis/mantis_pcmcia.c 		ca->slot_state = MODULE_XTRACTED;
ca                 74 drivers/media/pci/mantis/mantis_pcmcia.c int mantis_pcmcia_init(struct mantis_ca *ca)
ca                 76 drivers/media/pci/mantis/mantis_pcmcia.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 87 drivers/media/pci/mantis/mantis_pcmcia.c 		ca->slot_state = MODULE_INSERTED;
ca                 88 drivers/media/pci/mantis/mantis_pcmcia.c 		dvb_ca_en50221_camchange_irq(&ca->en50221,
ca                 94 drivers/media/pci/mantis/mantis_pcmcia.c 		ca->slot_state = MODULE_XTRACTED;
ca                 95 drivers/media/pci/mantis/mantis_pcmcia.c 		dvb_ca_en50221_camchange_irq(&ca->en50221,
ca                103 drivers/media/pci/mantis/mantis_pcmcia.c void mantis_pcmcia_exit(struct mantis_ca *ca)
ca                105 drivers/media/pci/mantis/mantis_pcmcia.c 	struct mantis_pci *mantis = ca->ca_priv;
ca                 92 drivers/media/pci/netup_unidvb/netup_unidvb.h 	struct dvb_ca_en50221		ca;
ca                202 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	state->ca.owner = THIS_MODULE;
ca                203 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	state->ca.read_attribute_mem = netup_unidvb_ci_read_attribute_mem;
ca                204 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	state->ca.write_attribute_mem = netup_unidvb_ci_write_attribute_mem;
ca                205 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	state->ca.read_cam_control = netup_unidvb_ci_read_cam_ctl;
ca                206 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	state->ca.write_cam_control = netup_unidvb_ci_write_cam_ctl;
ca                207 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	state->ca.slot_reset = netup_unidvb_ci_slot_reset;
ca                208 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	state->ca.slot_shutdown = netup_unidvb_ci_slot_shutdown;
ca                209 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	state->ca.slot_ts_enable = netup_unidvb_ci_slot_ts_ctl;
ca                210 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	state->ca.poll_slot_status = netup_unidvb_poll_ci_slot_status;
ca                211 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	state->ca.data = state;
ca                213 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 		&state->ca, 0, 1);
ca                237 drivers/media/pci/netup_unidvb/netup_unidvb_ci.c 	dvb_ca_en50221_release(&state->ca);
ca                 59 drivers/media/pci/ttpci/budget-av.c 	struct dvb_ca_en50221 ca;
ca                 63 drivers/media/pci/ttpci/budget-av.c static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot);
ca                123 drivers/media/pci/ttpci/budget-av.c static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
ca                125 drivers/media/pci/ttpci/budget-av.c 	struct budget_av *budget_av = (struct budget_av *) ca->data;
ca                136 drivers/media/pci/ttpci/budget-av.c 		ciintf_slot_shutdown(ca, slot);
ca                142 drivers/media/pci/ttpci/budget-av.c static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value)
ca                144 drivers/media/pci/ttpci/budget-av.c 	struct budget_av *budget_av = (struct budget_av *) ca->data;
ca                155 drivers/media/pci/ttpci/budget-av.c 		ciintf_slot_shutdown(ca, slot);
ca                161 drivers/media/pci/ttpci/budget-av.c static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address)
ca                163 drivers/media/pci/ttpci/budget-av.c 	struct budget_av *budget_av = (struct budget_av *) ca->data;
ca                174 drivers/media/pci/ttpci/budget-av.c 		ciintf_slot_shutdown(ca, slot);
ca                181 drivers/media/pci/ttpci/budget-av.c static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value)
ca                183 drivers/media/pci/ttpci/budget-av.c 	struct budget_av *budget_av = (struct budget_av *) ca->data;
ca                194 drivers/media/pci/ttpci/budget-av.c 		ciintf_slot_shutdown(ca, slot);
ca                200 drivers/media/pci/ttpci/budget-av.c static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
ca                202 drivers/media/pci/ttpci/budget-av.c 	struct budget_av *budget_av = (struct budget_av *) ca->data;
ca                229 drivers/media/pci/ttpci/budget-av.c static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
ca                231 drivers/media/pci/ttpci/budget-av.c 	struct budget_av *budget_av = (struct budget_av *) ca->data;
ca                245 drivers/media/pci/ttpci/budget-av.c static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
ca                247 drivers/media/pci/ttpci/budget-av.c 	struct budget_av *budget_av = (struct budget_av *) ca->data;
ca                260 drivers/media/pci/ttpci/budget-av.c static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
ca                262 drivers/media/pci/ttpci/budget-av.c 	struct budget_av *budget_av = (struct budget_av *) ca->data;
ca                297 drivers/media/pci/ttpci/budget-av.c 				ciintf_slot_shutdown(ca, slot);
ca                306 drivers/media/pci/ttpci/budget-av.c 		result = ciintf_read_attribute_mem(ca, slot, 0);
ca                327 drivers/media/pci/ttpci/budget-av.c 	memset(&budget_av->ca, 0, sizeof(struct dvb_ca_en50221));
ca                338 drivers/media/pci/ttpci/budget-av.c 	budget_av->ca.owner = THIS_MODULE;
ca                339 drivers/media/pci/ttpci/budget-av.c 	budget_av->ca.read_attribute_mem = ciintf_read_attribute_mem;
ca                340 drivers/media/pci/ttpci/budget-av.c 	budget_av->ca.write_attribute_mem = ciintf_write_attribute_mem;
ca                341 drivers/media/pci/ttpci/budget-av.c 	budget_av->ca.read_cam_control = ciintf_read_cam_control;
ca                342 drivers/media/pci/ttpci/budget-av.c 	budget_av->ca.write_cam_control = ciintf_write_cam_control;
ca                343 drivers/media/pci/ttpci/budget-av.c 	budget_av->ca.slot_reset = ciintf_slot_reset;
ca                344 drivers/media/pci/ttpci/budget-av.c 	budget_av->ca.slot_shutdown = ciintf_slot_shutdown;
ca                345 drivers/media/pci/ttpci/budget-av.c 	budget_av->ca.slot_ts_enable = ciintf_slot_ts_enable;
ca                346 drivers/media/pci/ttpci/budget-av.c 	budget_av->ca.poll_slot_status = ciintf_poll_slot_status;
ca                347 drivers/media/pci/ttpci/budget-av.c 	budget_av->ca.data = budget_av;
ca                352 drivers/media/pci/ttpci/budget-av.c 					  &budget_av->ca, 0, 1)) != 0) {
ca                375 drivers/media/pci/ttpci/budget-av.c 	dvb_ca_en50221_release(&budget_av->ca);
ca                 97 drivers/media/pci/ttpci/budget-ci.c 	struct dvb_ca_en50221 ca;
ca                252 drivers/media/pci/ttpci/budget-ci.c static int ciintf_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
ca                254 drivers/media/pci/ttpci/budget-ci.c 	struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
ca                263 drivers/media/pci/ttpci/budget-ci.c static int ciintf_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value)
ca                265 drivers/media/pci/ttpci/budget-ci.c 	struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
ca                274 drivers/media/pci/ttpci/budget-ci.c static int ciintf_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address)
ca                276 drivers/media/pci/ttpci/budget-ci.c 	struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
ca                285 drivers/media/pci/ttpci/budget-ci.c static int ciintf_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value)
ca                287 drivers/media/pci/ttpci/budget-ci.c 	struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
ca                296 drivers/media/pci/ttpci/budget-ci.c static int ciintf_slot_reset(struct dvb_ca_en50221 *ca, int slot)
ca                298 drivers/media/pci/ttpci/budget-ci.c 	struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
ca                319 drivers/media/pci/ttpci/budget-ci.c static int ciintf_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
ca                321 drivers/media/pci/ttpci/budget-ci.c 	struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
ca                332 drivers/media/pci/ttpci/budget-ci.c static int ciintf_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
ca                334 drivers/media/pci/ttpci/budget-ci.c 	struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
ca                371 drivers/media/pci/ttpci/budget-ci.c 			dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0,
ca                377 drivers/media/pci/ttpci/budget-ci.c 			dvb_ca_en50221_camready_irq(&budget_ci->ca, 0);
ca                381 drivers/media/pci/ttpci/budget-ci.c 			dvb_ca_en50221_frda_irq(&budget_ci->ca, 0);
ca                394 drivers/media/pci/ttpci/budget-ci.c 			dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0,
ca                400 drivers/media/pci/ttpci/budget-ci.c static int ciintf_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
ca                402 drivers/media/pci/ttpci/budget-ci.c 	struct budget_ci *budget_ci = (struct budget_ci *) ca->data;
ca                419 drivers/media/pci/ttpci/budget-ci.c 			if (ciintf_read_attribute_mem(ca, slot, 0) == 0x1d) {
ca                445 drivers/media/pci/ttpci/budget-ci.c 	memset(&budget_ci->ca, 0, sizeof(struct dvb_ca_en50221));
ca                475 drivers/media/pci/ttpci/budget-ci.c 	budget_ci->ca.owner = THIS_MODULE;
ca                476 drivers/media/pci/ttpci/budget-ci.c 	budget_ci->ca.read_attribute_mem = ciintf_read_attribute_mem;
ca                477 drivers/media/pci/ttpci/budget-ci.c 	budget_ci->ca.write_attribute_mem = ciintf_write_attribute_mem;
ca                478 drivers/media/pci/ttpci/budget-ci.c 	budget_ci->ca.read_cam_control = ciintf_read_cam_control;
ca                479 drivers/media/pci/ttpci/budget-ci.c 	budget_ci->ca.write_cam_control = ciintf_write_cam_control;
ca                480 drivers/media/pci/ttpci/budget-ci.c 	budget_ci->ca.slot_reset = ciintf_slot_reset;
ca                481 drivers/media/pci/ttpci/budget-ci.c 	budget_ci->ca.slot_shutdown = ciintf_slot_shutdown;
ca                482 drivers/media/pci/ttpci/budget-ci.c 	budget_ci->ca.slot_ts_enable = ciintf_slot_ts_enable;
ca                483 drivers/media/pci/ttpci/budget-ci.c 	budget_ci->ca.poll_slot_status = ciintf_poll_slot_status;
ca                484 drivers/media/pci/ttpci/budget-ci.c 	budget_ci->ca.data = budget_ci;
ca                486 drivers/media/pci/ttpci/budget-ci.c 					  &budget_ci->ca,
ca                516 drivers/media/pci/ttpci/budget-ci.c 		dvb_ca_en50221_camchange_irq(&budget_ci->ca, 0, flags);
ca                547 drivers/media/pci/ttpci/budget-ci.c 	dvb_ca_en50221_release(&budget_ci->ca);
ca                 44 drivers/media/usb/dvb-usb-v2/az6007.c 	struct dvb_ca_en50221	ca;
ca                247 drivers/media/usb/dvb-usb-v2/az6007.c static int az6007_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
ca                251 drivers/media/usb/dvb-usb-v2/az6007.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                288 drivers/media/usb/dvb-usb-v2/az6007.c static int az6007_ci_write_attribute_mem(struct dvb_ca_en50221 *ca,
ca                293 drivers/media/usb/dvb-usb-v2/az6007.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                320 drivers/media/usb/dvb-usb-v2/az6007.c static int az6007_ci_read_cam_control(struct dvb_ca_en50221 *ca,
ca                324 drivers/media/usb/dvb-usb-v2/az6007.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                365 drivers/media/usb/dvb-usb-v2/az6007.c static int az6007_ci_write_cam_control(struct dvb_ca_en50221 *ca,
ca                370 drivers/media/usb/dvb-usb-v2/az6007.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                399 drivers/media/usb/dvb-usb-v2/az6007.c static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
ca                401 drivers/media/usb/dvb-usb-v2/az6007.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                430 drivers/media/usb/dvb-usb-v2/az6007.c static int az6007_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot)
ca                432 drivers/media/usb/dvb-usb-v2/az6007.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                469 drivers/media/usb/dvb-usb-v2/az6007.c 		if (CI_CamReady(ca, slot)) {
ca                481 drivers/media/usb/dvb-usb-v2/az6007.c static int az6007_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
ca                486 drivers/media/usb/dvb-usb-v2/az6007.c static int az6007_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
ca                488 drivers/media/usb/dvb-usb-v2/az6007.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                515 drivers/media/usb/dvb-usb-v2/az6007.c static int az6007_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
ca                517 drivers/media/usb/dvb-usb-v2/az6007.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                567 drivers/media/usb/dvb-usb-v2/az6007.c 	if (NULL == state->ca.data)
ca                570 drivers/media/usb/dvb-usb-v2/az6007.c 	dvb_ca_en50221_release(&state->ca);
ca                572 drivers/media/usb/dvb-usb-v2/az6007.c 	memset(&state->ca, 0, sizeof(state->ca));
ca                585 drivers/media/usb/dvb-usb-v2/az6007.c 	state->ca.owner			= THIS_MODULE;
ca                586 drivers/media/usb/dvb-usb-v2/az6007.c 	state->ca.read_attribute_mem	= az6007_ci_read_attribute_mem;
ca                587 drivers/media/usb/dvb-usb-v2/az6007.c 	state->ca.write_attribute_mem	= az6007_ci_write_attribute_mem;
ca                588 drivers/media/usb/dvb-usb-v2/az6007.c 	state->ca.read_cam_control	= az6007_ci_read_cam_control;
ca                589 drivers/media/usb/dvb-usb-v2/az6007.c 	state->ca.write_cam_control	= az6007_ci_write_cam_control;
ca                590 drivers/media/usb/dvb-usb-v2/az6007.c 	state->ca.slot_reset		= az6007_ci_slot_reset;
ca                591 drivers/media/usb/dvb-usb-v2/az6007.c 	state->ca.slot_shutdown		= az6007_ci_slot_shutdown;
ca                592 drivers/media/usb/dvb-usb-v2/az6007.c 	state->ca.slot_ts_enable	= az6007_ci_slot_ts_enable;
ca                593 drivers/media/usb/dvb-usb-v2/az6007.c 	state->ca.poll_slot_status	= az6007_ci_poll_slot_status;
ca                594 drivers/media/usb/dvb-usb-v2/az6007.c 	state->ca.data			= d;
ca                597 drivers/media/usb/dvb-usb-v2/az6007.c 				  &state->ca,
ca                602 drivers/media/usb/dvb-usb-v2/az6007.c 		memset(&state->ca, 0, sizeof(state->ca));
ca                 26 drivers/media/usb/dvb-usb/az6027.c 	struct dvb_ca_en50221 ca;
ca                405 drivers/media/usb/dvb-usb/az6027.c static int az6027_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
ca                409 drivers/media/usb/dvb-usb/az6027.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                446 drivers/media/usb/dvb-usb/az6027.c static int az6027_ci_write_attribute_mem(struct dvb_ca_en50221 *ca,
ca                451 drivers/media/usb/dvb-usb/az6027.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                478 drivers/media/usb/dvb-usb/az6027.c static int az6027_ci_read_cam_control(struct dvb_ca_en50221 *ca,
ca                482 drivers/media/usb/dvb-usb/az6027.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                523 drivers/media/usb/dvb-usb/az6027.c static int az6027_ci_write_cam_control(struct dvb_ca_en50221 *ca,
ca                528 drivers/media/usb/dvb-usb/az6027.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                557 drivers/media/usb/dvb-usb/az6027.c static int CI_CamReady(struct dvb_ca_en50221 *ca, int slot)
ca                559 drivers/media/usb/dvb-usb/az6027.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                588 drivers/media/usb/dvb-usb/az6027.c static int az6027_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot)
ca                590 drivers/media/usb/dvb-usb/az6027.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                627 drivers/media/usb/dvb-usb/az6027.c 		if (CI_CamReady(ca, slot)) {
ca                639 drivers/media/usb/dvb-usb/az6027.c static int az6027_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
ca                644 drivers/media/usb/dvb-usb/az6027.c static int az6027_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
ca                646 drivers/media/usb/dvb-usb/az6027.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                673 drivers/media/usb/dvb-usb/az6027.c static int az6027_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
ca                675 drivers/media/usb/dvb-usb/az6027.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                725 drivers/media/usb/dvb-usb/az6027.c 	if (NULL == state->ca.data)
ca                728 drivers/media/usb/dvb-usb/az6027.c 	dvb_ca_en50221_release(&state->ca);
ca                730 drivers/media/usb/dvb-usb/az6027.c 	memset(&state->ca, 0, sizeof(state->ca));
ca                744 drivers/media/usb/dvb-usb/az6027.c 	state->ca.owner			= THIS_MODULE;
ca                745 drivers/media/usb/dvb-usb/az6027.c 	state->ca.read_attribute_mem	= az6027_ci_read_attribute_mem;
ca                746 drivers/media/usb/dvb-usb/az6027.c 	state->ca.write_attribute_mem	= az6027_ci_write_attribute_mem;
ca                747 drivers/media/usb/dvb-usb/az6027.c 	state->ca.read_cam_control	= az6027_ci_read_cam_control;
ca                748 drivers/media/usb/dvb-usb/az6027.c 	state->ca.write_cam_control	= az6027_ci_write_cam_control;
ca                749 drivers/media/usb/dvb-usb/az6027.c 	state->ca.slot_reset		= az6027_ci_slot_reset;
ca                750 drivers/media/usb/dvb-usb/az6027.c 	state->ca.slot_shutdown		= az6027_ci_slot_shutdown;
ca                751 drivers/media/usb/dvb-usb/az6027.c 	state->ca.slot_ts_enable	= az6027_ci_slot_ts_enable;
ca                752 drivers/media/usb/dvb-usb/az6027.c 	state->ca.poll_slot_status	= az6027_ci_poll_slot_status;
ca                753 drivers/media/usb/dvb-usb/az6027.c 	state->ca.data			= d;
ca                756 drivers/media/usb/dvb-usb/az6027.c 				  &state->ca,
ca                761 drivers/media/usb/dvb-usb/az6027.c 		memset(&state->ca, 0, sizeof(state->ca));
ca                 90 drivers/media/usb/dvb-usb/pctv452e.c 	struct dvb_ca_en50221 ca;
ca                148 drivers/media/usb/dvb-usb/pctv452e.c static int tt3650_ci_msg_locked(struct dvb_ca_en50221 *ca,
ca                152 drivers/media/usb/dvb-usb/pctv452e.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                163 drivers/media/usb/dvb-usb/pctv452e.c static int tt3650_ci_read_attribute_mem(struct dvb_ca_en50221 *ca,
ca                175 drivers/media/usb/dvb-usb/pctv452e.c 	ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_ATTR, buf, 2, 3);
ca                186 drivers/media/usb/dvb-usb/pctv452e.c static int tt3650_ci_write_attribute_mem(struct dvb_ca_en50221 *ca,
ca                201 drivers/media/usb/dvb-usb/pctv452e.c 	return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_ATTR, buf, 3, 3);
ca                204 drivers/media/usb/dvb-usb/pctv452e.c static int tt3650_ci_read_cam_control(struct dvb_ca_en50221 *ca,
ca                216 drivers/media/usb/dvb-usb/pctv452e.c 	ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_CTRL, buf, 1, 2);
ca                227 drivers/media/usb/dvb-usb/pctv452e.c static int tt3650_ci_write_cam_control(struct dvb_ca_en50221 *ca,
ca                243 drivers/media/usb/dvb-usb/pctv452e.c 	return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_CTRL, buf, 2, 2);
ca                246 drivers/media/usb/dvb-usb/pctv452e.c static int tt3650_ci_set_video_port(struct dvb_ca_en50221 *ca,
ca                261 drivers/media/usb/dvb-usb/pctv452e.c 	ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1);
ca                273 drivers/media/usb/dvb-usb/pctv452e.c static int tt3650_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
ca                275 drivers/media/usb/dvb-usb/pctv452e.c 	return tt3650_ci_set_video_port(ca, slot, /* enable */ 0);
ca                278 drivers/media/usb/dvb-usb/pctv452e.c static int tt3650_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
ca                280 drivers/media/usb/dvb-usb/pctv452e.c 	return tt3650_ci_set_video_port(ca, slot, /* enable */ 1);
ca                283 drivers/media/usb/dvb-usb/pctv452e.c static int tt3650_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot)
ca                285 drivers/media/usb/dvb-usb/pctv452e.c 	struct dvb_usb_device *d = (struct dvb_usb_device *)ca->data;
ca                323 drivers/media/usb/dvb-usb/pctv452e.c static int tt3650_ci_poll_slot_status(struct dvb_ca_en50221 *ca,
ca                333 drivers/media/usb/dvb-usb/pctv452e.c 	ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_TEST, buf, 0, 1);
ca                358 drivers/media/usb/dvb-usb/pctv452e.c 	if (NULL == state->ca.data)
ca                362 drivers/media/usb/dvb-usb/pctv452e.c 	tt3650_ci_set_video_port(&state->ca, /* slot */ 0, /* enable */ 0);
ca                364 drivers/media/usb/dvb-usb/pctv452e.c 	dvb_ca_en50221_release(&state->ca);
ca                366 drivers/media/usb/dvb-usb/pctv452e.c 	memset(&state->ca, 0, sizeof(state->ca));
ca                379 drivers/media/usb/dvb-usb/pctv452e.c 	state->ca.owner = THIS_MODULE;
ca                380 drivers/media/usb/dvb-usb/pctv452e.c 	state->ca.read_attribute_mem = tt3650_ci_read_attribute_mem;
ca                381 drivers/media/usb/dvb-usb/pctv452e.c 	state->ca.write_attribute_mem = tt3650_ci_write_attribute_mem;
ca                382 drivers/media/usb/dvb-usb/pctv452e.c 	state->ca.read_cam_control = tt3650_ci_read_cam_control;
ca                383 drivers/media/usb/dvb-usb/pctv452e.c 	state->ca.write_cam_control = tt3650_ci_write_cam_control;
ca                384 drivers/media/usb/dvb-usb/pctv452e.c 	state->ca.slot_reset = tt3650_ci_slot_reset;
ca                385 drivers/media/usb/dvb-usb/pctv452e.c 	state->ca.slot_shutdown = tt3650_ci_slot_shutdown;
ca                386 drivers/media/usb/dvb-usb/pctv452e.c 	state->ca.slot_ts_enable = tt3650_ci_slot_ts_enable;
ca                387 drivers/media/usb/dvb-usb/pctv452e.c 	state->ca.poll_slot_status = tt3650_ci_poll_slot_status;
ca                388 drivers/media/usb/dvb-usb/pctv452e.c 	state->ca.data = d;
ca                391 drivers/media/usb/dvb-usb/pctv452e.c 				   &state->ca,
ca                396 drivers/media/usb/dvb-usb/pctv452e.c 		memset(&state->ca, 0, sizeof(state->ca));
ca                 65 drivers/media/usb/dvb-usb/ttusb2.c 	struct dvb_ca_en50221 ca;
ca                130 drivers/media/usb/dvb-usb/ttusb2.c static int tt3650_ci_msg_locked(struct dvb_ca_en50221 *ca, u8 cmd, u8 *data, unsigned int write_len, unsigned int read_len)
ca                132 drivers/media/usb/dvb-usb/ttusb2.c 	struct dvb_usb_device *d = ca->data;
ca                143 drivers/media/usb/dvb-usb/ttusb2.c static int tt3650_ci_read_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address)
ca                155 drivers/media/usb/dvb-usb/ttusb2.c 	ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_ATTR, buf, 2, 3);
ca                165 drivers/media/usb/dvb-usb/ttusb2.c static int tt3650_ci_write_attribute_mem(struct dvb_ca_en50221 *ca, int slot, int address, u8 value)
ca                178 drivers/media/usb/dvb-usb/ttusb2.c 	return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_ATTR, buf, 3, 3);
ca                181 drivers/media/usb/dvb-usb/ttusb2.c static int tt3650_ci_read_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address)
ca                191 drivers/media/usb/dvb-usb/ttusb2.c 	ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_RD_CTRL, buf, 1, 2);
ca                201 drivers/media/usb/dvb-usb/ttusb2.c static int tt3650_ci_write_cam_control(struct dvb_ca_en50221 *ca, int slot, u8 address, u8 value)
ca                213 drivers/media/usb/dvb-usb/ttusb2.c 	return tt3650_ci_msg_locked(ca, TT3650_CMD_CI_WR_CTRL, buf, 2, 2);
ca                216 drivers/media/usb/dvb-usb/ttusb2.c static int tt3650_ci_set_video_port(struct dvb_ca_en50221 *ca, int slot, int enable)
ca                228 drivers/media/usb/dvb-usb/ttusb2.c 	ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_SET_VIDEO_PORT, buf, 1, 1);
ca                240 drivers/media/usb/dvb-usb/ttusb2.c static int tt3650_ci_slot_shutdown(struct dvb_ca_en50221 *ca, int slot)
ca                242 drivers/media/usb/dvb-usb/ttusb2.c 	return tt3650_ci_set_video_port(ca, slot, 0);
ca                245 drivers/media/usb/dvb-usb/ttusb2.c static int tt3650_ci_slot_ts_enable(struct dvb_ca_en50221 *ca, int slot)
ca                247 drivers/media/usb/dvb-usb/ttusb2.c 	return tt3650_ci_set_video_port(ca, slot, 1);
ca                250 drivers/media/usb/dvb-usb/ttusb2.c static int tt3650_ci_slot_reset(struct dvb_ca_en50221 *ca, int slot)
ca                252 drivers/media/usb/dvb-usb/ttusb2.c 	struct dvb_usb_device *d = ca->data;
ca                292 drivers/media/usb/dvb-usb/ttusb2.c static int tt3650_ci_poll_slot_status(struct dvb_ca_en50221 *ca, int slot, int open)
ca                300 drivers/media/usb/dvb-usb/ttusb2.c 	ret = tt3650_ci_msg_locked(ca, TT3650_CMD_CI_TEST, buf, 0, 1);
ca                324 drivers/media/usb/dvb-usb/ttusb2.c 	if (NULL == state->ca.data)
ca                327 drivers/media/usb/dvb-usb/ttusb2.c 	dvb_ca_en50221_release(&state->ca);
ca                329 drivers/media/usb/dvb-usb/ttusb2.c 	memset(&state->ca, 0, sizeof(state->ca));
ca                342 drivers/media/usb/dvb-usb/ttusb2.c 	state->ca.owner = THIS_MODULE;
ca                343 drivers/media/usb/dvb-usb/ttusb2.c 	state->ca.read_attribute_mem = tt3650_ci_read_attribute_mem;
ca                344 drivers/media/usb/dvb-usb/ttusb2.c 	state->ca.write_attribute_mem = tt3650_ci_write_attribute_mem;
ca                345 drivers/media/usb/dvb-usb/ttusb2.c 	state->ca.read_cam_control = tt3650_ci_read_cam_control;
ca                346 drivers/media/usb/dvb-usb/ttusb2.c 	state->ca.write_cam_control = tt3650_ci_write_cam_control;
ca                347 drivers/media/usb/dvb-usb/ttusb2.c 	state->ca.slot_reset = tt3650_ci_slot_reset;
ca                348 drivers/media/usb/dvb-usb/ttusb2.c 	state->ca.slot_shutdown = tt3650_ci_slot_shutdown;
ca                349 drivers/media/usb/dvb-usb/ttusb2.c 	state->ca.slot_ts_enable = tt3650_ci_slot_ts_enable;
ca                350 drivers/media/usb/dvb-usb/ttusb2.c 	state->ca.poll_slot_status = tt3650_ci_poll_slot_status;
ca                351 drivers/media/usb/dvb-usb/ttusb2.c 	state->ca.data = d;
ca                354 drivers/media/usb/dvb-usb/ttusb2.c 				  &state->ca,
ca                359 drivers/media/usb/dvb-usb/ttusb2.c 		memset(&state->ca, 0, sizeof(state->ca));
ca                203 drivers/net/ethernet/i825xx/82596.c 	unsigned long ca;
ca                379 drivers/net/ethernet/i825xx/82596.c 		((struct i596_reg *) dev->base_addr)->ca = 1;
ca                169 drivers/net/ethernet/i825xx/lib82596.c 	u32            ca;
ca                329 drivers/net/ethernet/i825xx/lib82596.c 	void __iomem *ca;
ca                357 drivers/net/ethernet/i825xx/lib82596.c static inline void ca(struct net_device *dev);
ca                583 drivers/net/ethernet/i825xx/lib82596.c 	ca(dev);
ca                630 drivers/net/ethernet/i825xx/lib82596.c 	ca(dev);
ca                854 drivers/net/ethernet/i825xx/lib82596.c 	ca(dev);
ca                895 drivers/net/ethernet/i825xx/lib82596.c 		ca(dev);
ca                960 drivers/net/ethernet/i825xx/lib82596.c 		ca (dev);
ca               1291 drivers/net/ethernet/i825xx/lib82596.c 	ca(dev);
ca               1318 drivers/net/ethernet/i825xx/lib82596.c 	ca(dev);
ca                 54 drivers/net/ethernet/i825xx/sni_82596.c 	writel(0, lp->ca);
ca                 82 drivers/net/ethernet/i825xx/sni_82596.c 	struct  resource *res, *ca, *idprom, *options;
ca                 89 drivers/net/ethernet/i825xx/sni_82596.c 	ca = platform_get_resource(dev, IORESOURCE_MEM, 1);
ca                 92 drivers/net/ethernet/i825xx/sni_82596.c 	if (!res || !ca || !options || !idprom)
ca                 97 drivers/net/ethernet/i825xx/sni_82596.c 	ca_addr = ioremap_nocache(ca->start, 4);
ca                134 drivers/net/ethernet/i825xx/sni_82596.c 	lp->ca = ca_addr;
ca                158 drivers/net/ethernet/i825xx/sni_82596.c 	iounmap(lp->ca);
ca                 78 drivers/net/ethernet/intel/fm10k/fm10k_ethtool.c 	FM10K_STAT("ca", stats.ca.count),
ca               1430 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 	u32 timeout, ur, ca, um, xec, vlan_drop, loopback_drop, nodesc_drop;
ca               1441 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 		ca = fm10k_read_hw_stats_32b(hw, FM10K_STATS_CA, &stats->ca);
ca               1467 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 		stats->ca.count += ca;
ca               1478 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 	fm10k_update_hw_base_32b(&stats->ca, ca);
ca               1504 drivers/net/ethernet/intel/fm10k/fm10k_pf.c 	fm10k_unbind_hw_stats_32b(&stats->ca);
ca                433 drivers/net/ethernet/intel/fm10k/fm10k_type.h 	struct fm10k_hw_stat	ca;
ca                235 drivers/net/ethernet/korina.c 	td->ca = CPHYSADDR(skb->data);
ca                429 drivers/net/ethernet/korina.c 			rd->ca = CPHYSADDR(skb_new->data);
ca                431 drivers/net/ethernet/korina.c 			rd->ca = CPHYSADDR(skb->data);
ca                454 drivers/net/ethernet/korina.c 		rd->ca = CPHYSADDR(skb->data);
ca                586 drivers/net/ethernet/korina.c 		lp->td_ring[lp->tx_next_done].ca = 0;
ca                752 drivers/net/ethernet/korina.c 		lp->td_ring[i].ca = 0;
ca                768 drivers/net/ethernet/korina.c 		lp->rd_ring[i].ca = CPHYSADDR(skb->data);
ca               2480 drivers/net/ethernet/mellanox/mlxsw/reg.h MLXSW_ITEM32(reg, pefa, ca, 0x04, 24, 1);
ca               2491 drivers/net/ethernet/mellanox/mlxsw/reg.h static inline void mlxsw_reg_pefa_pack(char *payload, u32 index, bool ca,
ca               2496 drivers/net/ethernet/mellanox/mlxsw/reg.h 	mlxsw_reg_pefa_ca_set(payload, ca);
ca                  9 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c 				     char *enc_actions, bool is_first, bool ca)
ca                 26 drivers/net/ethernet/mellanox/mlxsw/spectrum_acl_flex_actions.c 	mlxsw_reg_pefa_pack(pefa_pl, kvdl_index, ca, enc_actions);
ca               1024 drivers/pcmcia/cistpl.c 	u_int len, ca, ha;
ca               1037 drivers/pcmcia/cistpl.c 		len = ca = ha = 0;
ca               1046 drivers/pcmcia/cistpl.c 			ca += *p << (j*8);
ca               1055 drivers/pcmcia/cistpl.c 		mem->win[i].card_addr = ca << 8;
ca                142 drivers/ras/cec.c static void do_spring_cleaning(struct ce_array *ca)
ca                146 drivers/ras/cec.c 	for (i = 0; i < ca->n; i++) {
ca                147 drivers/ras/cec.c 		u8 decay = DECAY(ca->array[i]);
ca                154 drivers/ras/cec.c 		ca->array[i] &= ~(DECAY_MASK << COUNT_BITS);
ca                155 drivers/ras/cec.c 		ca->array[i] |= (decay << COUNT_BITS);
ca                157 drivers/ras/cec.c 	ca->decay_count = 0;
ca                158 drivers/ras/cec.c 	ca->decays_done++;
ca                186 drivers/ras/cec.c static int __find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
ca                188 drivers/ras/cec.c 	int min = 0, max = ca->n - 1;
ca                194 drivers/ras/cec.c 		this_pfn = PFN(ca->array[i]);
ca                223 drivers/ras/cec.c static int find_elem(struct ce_array *ca, u64 pfn, unsigned int *to)
ca                227 drivers/ras/cec.c 	if (!ca->n) {
ca                231 drivers/ras/cec.c 	return __find_elem(ca, pfn, to);
ca                234 drivers/ras/cec.c static void del_elem(struct ce_array *ca, int idx)
ca                237 drivers/ras/cec.c 	if (ca->n - (idx + 1))
ca                238 drivers/ras/cec.c 		memmove((void *)&ca->array[idx],
ca                239 drivers/ras/cec.c 			(void *)&ca->array[idx + 1],
ca                240 drivers/ras/cec.c 			(ca->n - (idx + 1)) * sizeof(u64));
ca                242 drivers/ras/cec.c 	ca->n--;
ca                245 drivers/ras/cec.c static u64 del_lru_elem_unlocked(struct ce_array *ca)
ca                250 drivers/ras/cec.c 	for (i = 0; i < ca->n; i++) {
ca                251 drivers/ras/cec.c 		unsigned int this = FULL_COUNT(ca->array[i]);
ca                259 drivers/ras/cec.c 	del_elem(ca, min_idx);
ca                261 drivers/ras/cec.c 	return PFN(ca->array[min_idx]);
ca                270 drivers/ras/cec.c 	struct ce_array *ca = &ce_arr;
ca                273 drivers/ras/cec.c 	if (!ca->n)
ca                277 drivers/ras/cec.c 	pfn = del_lru_elem_unlocked(ca);
ca                283 drivers/ras/cec.c static bool sanity_check(struct ce_array *ca)
ca                289 drivers/ras/cec.c 	for (i = 0; i < ca->n; i++) {
ca                290 drivers/ras/cec.c 		u64 this = PFN(ca->array[i]);
ca                301 drivers/ras/cec.c 	pr_info("Sanity check dump:\n{ n: %d\n", ca->n);
ca                302 drivers/ras/cec.c 	for (i = 0; i < ca->n; i++) {
ca                303 drivers/ras/cec.c 		u64 this = PFN(ca->array[i]);
ca                305 drivers/ras/cec.c 		pr_info(" %03d: [%016llx|%03llx]\n", i, this, FULL_COUNT(ca->array[i]));
ca                314 drivers/ras/cec.c 	struct ce_array *ca = &ce_arr;
ca                327 drivers/ras/cec.c 	ca->ces_entered++;
ca                330 drivers/ras/cec.c 	if (ca->n == MAX_ELEMS)
ca                331 drivers/ras/cec.c 		WARN_ON(!del_lru_elem_unlocked(ca));
ca                333 drivers/ras/cec.c 	ret = find_elem(ca, pfn, &to);
ca                338 drivers/ras/cec.c 		memmove((void *)&ca->array[to + 1],
ca                339 drivers/ras/cec.c 			(void *)&ca->array[to],
ca                340 drivers/ras/cec.c 			(ca->n - to) * sizeof(u64));
ca                342 drivers/ras/cec.c 		ca->array[to] = pfn << PAGE_SHIFT;
ca                343 drivers/ras/cec.c 		ca->n++;
ca                347 drivers/ras/cec.c 	ca->array[to] |= DECAY_MASK << COUNT_BITS;
ca                348 drivers/ras/cec.c 	ca->array[to]++;
ca                351 drivers/ras/cec.c 	count = COUNT(ca->array[to]);
ca                353 drivers/ras/cec.c 		u64 pfn = ca->array[to] >> PAGE_SHIFT;
ca                361 drivers/ras/cec.c 			ca->pfns_poisoned++;
ca                364 drivers/ras/cec.c 		del_elem(ca, to);
ca                375 drivers/ras/cec.c 	ca->decay_count++;
ca                377 drivers/ras/cec.c 	if (ca->decay_count >= CLEAN_ELEMS)
ca                378 drivers/ras/cec.c 		do_spring_cleaning(ca);
ca                380 drivers/ras/cec.c 	WARN_ON_ONCE(sanity_check(ca));
ca                440 drivers/ras/cec.c 	struct ce_array *ca = &ce_arr;
ca                445 drivers/ras/cec.c 	seq_printf(m, "{ n: %d\n", ca->n);
ca                446 drivers/ras/cec.c 	for (i = 0; i < ca->n; i++) {
ca                447 drivers/ras/cec.c 		u64 this = PFN(ca->array[i]);
ca                450 drivers/ras/cec.c 			   i, this, bins[DECAY(ca->array[i])], COUNT(ca->array[i]));
ca                456 drivers/ras/cec.c 		   ca->ces_entered, ca->pfns_poisoned);
ca                458 drivers/ras/cec.c 	seq_printf(m, "Flags: 0x%x\n", ca->flags);
ca                461 drivers/ras/cec.c 	seq_printf(m, "Decays: %lld\n", ca->decays_done);
ca                166 drivers/sbus/char/oradax.c 	void *ca;		/* Completion Address */
ca                225 drivers/sbus/char/oradax.c static int dax_ccb_info(u64 ca, struct ccb_info_result *info);
ca                226 drivers/sbus/char/oradax.c static int dax_ccb_kill(u64 ca, u16 *kill_res);
ca                569 drivers/sbus/char/oradax.c 	unsigned long ca;
ca                591 drivers/sbus/char/oradax.c 	ca = ctx->ca_buf_ra + hdr.ca_offset;
ca                601 drivers/sbus/char/oradax.c 		ret = dax_ccb_kill(ca, &ctx->result.kill.action);
ca                621 drivers/sbus/char/oradax.c 		ret = dax_ccb_info(ca, &ctx->result.info);
ca                710 drivers/sbus/char/oradax.c static int dax_ccb_kill(u64 ca, u16 *kill_res)
ca                717 drivers/sbus/char/oradax.c 		dax_dbg("attempting kill on ca_ra 0x%llx", ca);
ca                718 drivers/sbus/char/oradax.c 		hv_ret = sun4v_ccb_kill(ca, kill_res);
ca                721 drivers/sbus/char/oradax.c 			dax_info_dbg("HV_EOK (ca_ra 0x%llx): %d", ca,
ca                725 drivers/sbus/char/oradax.c 			dax_dbg("%s (ca_ra 0x%llx)", err_str, ca);
ca                737 drivers/sbus/char/oradax.c static int dax_ccb_info(u64 ca, struct ccb_info_result *info)
ca                743 drivers/sbus/char/oradax.c 	dax_dbg("attempting info on ca_ra 0x%llx", ca);
ca                744 drivers/sbus/char/oradax.c 	hv_ret = sun4v_ccb_info(ca, info);
ca                747 drivers/sbus/char/oradax.c 		dax_info_dbg("HV_EOK (ca_ra 0x%llx): %d", ca, info->state);
ca                754 drivers/sbus/char/oradax.c 		dax_dbg("%s (ca_ra 0x%llx)", err_str, ca);
ca                838 drivers/sbus/char/oradax.c 		ccbp->ca = (void *)ctx->ca_buf_ra + ca_offset;
ca                669 drivers/video/fbdev/tridentfb.c 			       const struct fb_copyarea *ca)
ca                674 drivers/video/fbdev/tridentfb.c 		cfb_copyarea(info, ca);
ca                678 drivers/video/fbdev/tridentfb.c 	par->copy_rect(par, ca->sx, ca->sy, ca->dx, ca->dy,
ca                679 drivers/video/fbdev/tridentfb.c 		       ca->width, ca->height);
ca                163 fs/nfs/dns_resolve.c static int nfs_dns_match(struct cache_head *ca,
ca                169 fs/nfs/dns_resolve.c 	a = container_of(ca, struct nfs_dns_ent, h);
ca                141 fs/nfsd/nfs4idmap.c idtoname_match(struct cache_head *ca, struct cache_head *cb)
ca                143 fs/nfsd/nfs4idmap.c 	struct ent *a = container_of(ca, struct ent, h);
ca                312 fs/nfsd/nfs4idmap.c nametoid_match(struct cache_head *ca, struct cache_head *cb)
ca                314 fs/nfsd/nfs4idmap.c 	struct ent *a = container_of(ca, struct ent, h);
ca               1557 fs/nfsd/nfs4state.c static inline u32 slot_bytes(struct nfsd4_channel_attrs *ca)
ca               1561 fs/nfsd/nfs4state.c 	if (ca->maxresp_cached < NFSD_MIN_HDR_SEQ_SZ)
ca               1564 fs/nfsd/nfs4state.c 		size = ca->maxresp_cached - NFSD_MIN_HDR_SEQ_SZ;
ca               1573 fs/nfsd/nfs4state.c static u32 nfsd4_get_drc_mem(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
ca               1575 fs/nfsd/nfs4state.c 	u32 slotsize = slot_bytes(ca);
ca               1576 fs/nfsd/nfs4state.c 	u32 num = ca->maxreqs;
ca               1612 fs/nfsd/nfs4state.c static void nfsd4_put_drc_mem(struct nfsd4_channel_attrs *ca)
ca               1614 fs/nfsd/nfs4state.c 	int slotsize = slot_bytes(ca);
ca               1617 fs/nfsd/nfs4state.c 	nfsd_drc_mem_used -= slotsize * ca->maxreqs;
ca               3175 fs/nfsd/nfs4state.c static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs *ca, struct nfsd_net *nn)
ca               3179 fs/nfsd/nfs4state.c 	if (ca->maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ)
ca               3181 fs/nfsd/nfs4state.c 	if (ca->maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ)
ca               3183 fs/nfsd/nfs4state.c 	ca->headerpadsz = 0;
ca               3184 fs/nfsd/nfs4state.c 	ca->maxreq_sz = min_t(u32, ca->maxreq_sz, maxrpc);
ca               3185 fs/nfsd/nfs4state.c 	ca->maxresp_sz = min_t(u32, ca->maxresp_sz, maxrpc);
ca               3186 fs/nfsd/nfs4state.c 	ca->maxops = min_t(u32, ca->maxops, NFSD_MAX_OPS_PER_COMPOUND);
ca               3187 fs/nfsd/nfs4state.c 	ca->maxresp_cached = min_t(u32, ca->maxresp_cached,
ca               3189 fs/nfsd/nfs4state.c 	ca->maxreqs = min_t(u32, ca->maxreqs, NFSD_MAX_SLOTS_PER_SESSION);
ca               3200 fs/nfsd/nfs4state.c 	ca->maxreqs = nfsd4_get_drc_mem(ca, nn);
ca               3221 fs/nfsd/nfs4state.c static __be32 check_backchannel_attrs(struct nfsd4_channel_attrs *ca)
ca               3223 fs/nfsd/nfs4state.c 	ca->headerpadsz = 0;
ca               3225 fs/nfsd/nfs4state.c 	if (ca->maxreq_sz < NFSD_CB_MAX_REQ_SZ)
ca               3227 fs/nfsd/nfs4state.c 	if (ca->maxresp_sz < NFSD_CB_MAX_RESP_SZ)
ca               3229 fs/nfsd/nfs4state.c 	ca->maxresp_cached = 0;
ca               3230 fs/nfsd/nfs4state.c 	if (ca->maxops < 2)
ca                263 fs/ntfs/logfile.c 	LOG_CLIENT_RECORD *ca, *cr;
ca                269 fs/ntfs/logfile.c 	ca = (LOG_CLIENT_RECORD*)((u8*)ra +
ca                288 fs/ntfs/logfile.c 		cr = ca + idx;
ca                159 include/drm/bridge/dw_hdmi.h void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca);
ca                 61 include/media/dvb_ca_en50221.h 	int (*read_attribute_mem)(struct dvb_ca_en50221 *ca,
ca                 63 include/media/dvb_ca_en50221.h 	int (*write_attribute_mem)(struct dvb_ca_en50221 *ca,
ca                 66 include/media/dvb_ca_en50221.h 	int (*read_cam_control)(struct dvb_ca_en50221 *ca,
ca                 68 include/media/dvb_ca_en50221.h 	int (*write_cam_control)(struct dvb_ca_en50221 *ca,
ca                 71 include/media/dvb_ca_en50221.h 	int (*read_data)(struct dvb_ca_en50221 *ca,
ca                 73 include/media/dvb_ca_en50221.h 	int (*write_data)(struct dvb_ca_en50221 *ca,
ca                 76 include/media/dvb_ca_en50221.h 	int (*slot_reset)(struct dvb_ca_en50221 *ca, int slot);
ca                 77 include/media/dvb_ca_en50221.h 	int (*slot_shutdown)(struct dvb_ca_en50221 *ca, int slot);
ca                 78 include/media/dvb_ca_en50221.h 	int (*slot_ts_enable)(struct dvb_ca_en50221 *ca, int slot);
ca                 80 include/media/dvb_ca_en50221.h 	int (*poll_slot_status)(struct dvb_ca_en50221 *ca, int slot, int open);
ca                115 include/media/dvb_ca_en50221.h void dvb_ca_en50221_frda_irq(struct dvb_ca_en50221 *ca, int slot);
ca                132 include/media/dvb_ca_en50221.h 			struct dvb_ca_en50221 *ca, int flags,
ca                140 include/media/dvb_ca_en50221.h void dvb_ca_en50221_release(struct dvb_ca_en50221 *ca);
ca                 37 include/sound/hda_chmap.h 	int (*chmap_validate)(struct hdac_chmap *hchmap, int ca,
ca                 68 include/sound/hda_chmap.h int snd_hdac_get_active_channels(int ca);
ca                 70 include/sound/hda_chmap.h 		       hda_nid_t pin_nid, bool non_pcm, int ca,
ca                 74 include/sound/hda_chmap.h struct hdac_cea_channel_speaker_allocation *snd_hdac_get_ch_alloc_from_ca(int ca);
ca                428 include/trace/events/bcache.h 	TP_PROTO(struct cache *ca, size_t bucket),
ca                429 include/trace/events/bcache.h 	TP_ARGS(ca, bucket),
ca                438 include/trace/events/bcache.h 		__entry->dev		= ca->bdev->bd_dev;
ca                439 include/trace/events/bcache.h 		__entry->offset		= bucket << ca->set->bucket_bits;
ca                440 include/trace/events/bcache.h 		__entry->sectors	= GC_SECTORS_USED(&ca->buckets[bucket]);
ca                449 include/trace/events/bcache.h 	TP_PROTO(struct cache *ca, size_t bucket),
ca                450 include/trace/events/bcache.h 	TP_ARGS(ca, bucket),
ca                458 include/trace/events/bcache.h 		__entry->dev		= ca->bdev->bd_dev;
ca                459 include/trace/events/bcache.h 		__entry->offset		= bucket << ca->set->bucket_bits;
ca                467 include/trace/events/bcache.h 	TP_PROTO(struct cache *ca, unsigned reserve),
ca                468 include/trace/events/bcache.h 	TP_ARGS(ca, reserve),
ca                478 include/trace/events/bcache.h 		__entry->dev		= ca->bdev->bd_dev;
ca                479 include/trace/events/bcache.h 		__entry->free		= fifo_used(&ca->free[reserve]);
ca                480 include/trace/events/bcache.h 		__entry->free_inc	= fifo_used(&ca->free_inc);
ca                481 include/trace/events/bcache.h 		__entry->blocked	= atomic_read(&ca->set->prio_blocked);
ca                266 kernel/power/snapshot.c static void chain_init(struct chain_allocator *ca, gfp_t gfp_mask,
ca                269 kernel/power/snapshot.c 	ca->chain = NULL;
ca                270 kernel/power/snapshot.c 	ca->used_space = LINKED_PAGE_DATA_SIZE;
ca                271 kernel/power/snapshot.c 	ca->gfp_mask = gfp_mask;
ca                272 kernel/power/snapshot.c 	ca->safe_needed = safe_needed;
ca                275 kernel/power/snapshot.c static void *chain_alloc(struct chain_allocator *ca, unsigned int size)
ca                279 kernel/power/snapshot.c 	if (LINKED_PAGE_DATA_SIZE - ca->used_space < size) {
ca                282 kernel/power/snapshot.c 		lp = ca->safe_needed ? __get_safe_page(ca->gfp_mask) :
ca                283 kernel/power/snapshot.c 					get_image_page(ca->gfp_mask, PG_ANY);
ca                287 kernel/power/snapshot.c 		lp->next = ca->chain;
ca                288 kernel/power/snapshot.c 		ca->chain = lp;
ca                289 kernel/power/snapshot.c 		ca->used_space = 0;
ca                291 kernel/power/snapshot.c 	ret = ca->chain->data + ca->used_space;
ca                292 kernel/power/snapshot.c 	ca->used_space += size;
ca                403 kernel/power/snapshot.c 					   struct chain_allocator *ca,
ca                408 kernel/power/snapshot.c 	node = chain_alloc(ca, sizeof(struct rtree_node));
ca                429 kernel/power/snapshot.c 			   int safe_needed, struct chain_allocator *ca)
ca                446 kernel/power/snapshot.c 		node = alloc_rtree_node(gfp_mask, safe_needed, ca,
ca                457 kernel/power/snapshot.c 	block = alloc_rtree_node(gfp_mask, safe_needed, ca, &zone->leaves);
ca                469 kernel/power/snapshot.c 			node = alloc_rtree_node(gfp_mask, safe_needed, ca,
ca                500 kernel/power/snapshot.c 						      struct chain_allocator *ca,
ca                509 kernel/power/snapshot.c 	zone  = chain_alloc(ca, sizeof(struct mem_zone_bm_rtree));
ca                520 kernel/power/snapshot.c 		if (add_rtree_block(zone, gfp_mask, safe_needed, ca)) {
ca                646 kernel/power/snapshot.c 	struct chain_allocator ca;
ca                651 kernel/power/snapshot.c 	chain_init(&ca, gfp_mask, safe_needed);
ca                661 kernel/power/snapshot.c 		zone = create_zone_bm_rtree(gfp_mask, safe_needed, &ca,
ca                670 kernel/power/snapshot.c 	bm->p_list = ca.chain;
ca                677 kernel/power/snapshot.c 	bm->p_list = ca.chain;
ca               2353 kernel/power/snapshot.c 				     struct chain_allocator *ca)
ca               2370 kernel/power/snapshot.c 	pbe = chain_alloc(ca, sizeof(struct highmem_pbe));
ca               2435 kernel/power/snapshot.c 					    struct chain_allocator *ca)
ca               2538 kernel/power/snapshot.c static void *get_buffer(struct memory_bitmap *bm, struct chain_allocator *ca)
ca               2549 kernel/power/snapshot.c 		return get_highmem_page_buffer(page, ca);
ca               2562 kernel/power/snapshot.c 	pbe = chain_alloc(ca, sizeof(struct pbe));
ca               2593 kernel/power/snapshot.c 	static struct chain_allocator ca;
ca               2638 kernel/power/snapshot.c 			chain_init(&ca, GFP_ATOMIC, PG_SAFE);
ca               2641 kernel/power/snapshot.c 			handle->buffer = get_buffer(&orig_bm, &ca);
ca               2651 kernel/power/snapshot.c 		handle->buffer = get_buffer(&orig_bm, &ca);
ca                 46 kernel/sched/cpuacct.c static inline struct cpuacct *parent_ca(struct cpuacct *ca)
ca                 48 kernel/sched/cpuacct.c 	return css_ca(ca->css.parent);
ca                 61 kernel/sched/cpuacct.c 	struct cpuacct *ca;
ca                 66 kernel/sched/cpuacct.c 	ca = kzalloc(sizeof(*ca), GFP_KERNEL);
ca                 67 kernel/sched/cpuacct.c 	if (!ca)
ca                 70 kernel/sched/cpuacct.c 	ca->cpuusage = alloc_percpu(struct cpuacct_usage);
ca                 71 kernel/sched/cpuacct.c 	if (!ca->cpuusage)
ca                 74 kernel/sched/cpuacct.c 	ca->cpustat = alloc_percpu(struct kernel_cpustat);
ca                 75 kernel/sched/cpuacct.c 	if (!ca->cpustat)
ca                 78 kernel/sched/cpuacct.c 	return &ca->css;
ca                 81 kernel/sched/cpuacct.c 	free_percpu(ca->cpuusage);
ca                 83 kernel/sched/cpuacct.c 	kfree(ca);
ca                 91 kernel/sched/cpuacct.c 	struct cpuacct *ca = css_ca(css);
ca                 93 kernel/sched/cpuacct.c 	free_percpu(ca->cpustat);
ca                 94 kernel/sched/cpuacct.c 	free_percpu(ca->cpuusage);
ca                 95 kernel/sched/cpuacct.c 	kfree(ca);
ca                 98 kernel/sched/cpuacct.c static u64 cpuacct_cpuusage_read(struct cpuacct *ca, int cpu,
ca                101 kernel/sched/cpuacct.c 	struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
ca                134 kernel/sched/cpuacct.c static void cpuacct_cpuusage_write(struct cpuacct *ca, int cpu, u64 val)
ca                136 kernel/sched/cpuacct.c 	struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
ca                158 kernel/sched/cpuacct.c 	struct cpuacct *ca = css_ca(css);
ca                163 kernel/sched/cpuacct.c 		totalcpuusage += cpuacct_cpuusage_read(ca, i, index);
ca                188 kernel/sched/cpuacct.c 	struct cpuacct *ca = css_ca(css);
ca                198 kernel/sched/cpuacct.c 		cpuacct_cpuusage_write(ca, cpu, 0);
ca                206 kernel/sched/cpuacct.c 	struct cpuacct *ca = css_ca(seq_css(m));
ca                211 kernel/sched/cpuacct.c 		percpu = cpuacct_cpuusage_read(ca, i, index);
ca                235 kernel/sched/cpuacct.c 	struct cpuacct *ca = css_ca(seq_css(m));
ca                245 kernel/sched/cpuacct.c 		struct cpuacct_usage *cpuusage = per_cpu_ptr(ca->cpuusage, cpu);
ca                271 kernel/sched/cpuacct.c 	struct cpuacct *ca = css_ca(seq_css(sf));
ca                278 kernel/sched/cpuacct.c 		u64 *cpustat = per_cpu_ptr(ca->cpustat, cpu)->cpustat;
ca                340 kernel/sched/cpuacct.c 	struct cpuacct *ca;
ca                349 kernel/sched/cpuacct.c 	for (ca = task_ca(tsk); ca; ca = parent_ca(ca))
ca                350 kernel/sched/cpuacct.c 		this_cpu_ptr(ca->cpuusage)->usages[index] += cputime;
ca                362 kernel/sched/cpuacct.c 	struct cpuacct *ca;
ca                365 kernel/sched/cpuacct.c 	for (ca = task_ca(tsk); ca != &root_cpuacct; ca = parent_ca(ca))
ca                366 kernel/sched/cpuacct.c 		this_cpu_ptr(ca->cpustat)->cpustat[index] += val;
ca                 55 net/bluetooth/bnep/sock.c 	struct bnep_connadd_req  ca;
ca                 69 net/bluetooth/bnep/sock.c 		if (copy_from_user(&ca, argp, sizeof(ca)))
ca                 72 net/bluetooth/bnep/sock.c 		nsock = sockfd_lookup(ca.sock, &err);
ca                 80 net/bluetooth/bnep/sock.c 		ca.device[sizeof(ca.device)-1] = 0;
ca                 82 net/bluetooth/bnep/sock.c 		err = bnep_add_connection(&ca, nsock);
ca                 84 net/bluetooth/bnep/sock.c 			if (copy_to_user(argp, &ca, sizeof(ca)))
ca                 68 net/bluetooth/cmtp/sock.c 	struct cmtp_connadd_req ca;
ca                 82 net/bluetooth/cmtp/sock.c 		if (copy_from_user(&ca, argp, sizeof(ca)))
ca                 85 net/bluetooth/cmtp/sock.c 		nsock = sockfd_lookup(ca.sock, &err);
ca                 94 net/bluetooth/cmtp/sock.c 		err = cmtp_add_connection(&ca, nsock);
ca                 96 net/bluetooth/cmtp/sock.c 			if (copy_to_user(argp, &ca, sizeof(ca)))
ca                 51 net/bluetooth/hidp/sock.c 	struct hidp_connadd_req ca;
ca                 66 net/bluetooth/hidp/sock.c 		if (copy_from_user(&ca, argp, sizeof(ca)))
ca                 69 net/bluetooth/hidp/sock.c 		csock = sockfd_lookup(ca.ctrl_sock, &err);
ca                 73 net/bluetooth/hidp/sock.c 		isock = sockfd_lookup(ca.intr_sock, &err);
ca                 78 net/bluetooth/hidp/sock.c 		ca.name[sizeof(ca.name)-1] = 0;
ca                 80 net/bluetooth/hidp/sock.c 		err = hidp_connection_add(&ca, csock, isock);
ca                 81 net/bluetooth/hidp/sock.c 		if (!err && copy_to_user(argp, &ca, sizeof(ca)))
ca                173 net/bluetooth/hidp/sock.c 		struct hidp_connadd_req ca;
ca                183 net/bluetooth/hidp/sock.c 		ca.ctrl_sock = ca32.ctrl_sock;
ca                184 net/bluetooth/hidp/sock.c 		ca.intr_sock = ca32.intr_sock;
ca                185 net/bluetooth/hidp/sock.c 		ca.parser = ca32.parser;
ca                186 net/bluetooth/hidp/sock.c 		ca.rd_size = ca32.rd_size;
ca                187 net/bluetooth/hidp/sock.c 		ca.rd_data = compat_ptr(ca32.rd_data);
ca                188 net/bluetooth/hidp/sock.c 		ca.country = ca32.country;
ca                189 net/bluetooth/hidp/sock.c 		ca.subclass = ca32.subclass;
ca                190 net/bluetooth/hidp/sock.c 		ca.vendor = ca32.vendor;
ca                191 net/bluetooth/hidp/sock.c 		ca.product = ca32.product;
ca                192 net/bluetooth/hidp/sock.c 		ca.version = ca32.version;
ca                193 net/bluetooth/hidp/sock.c 		ca.flags = ca32.flags;
ca                194 net/bluetooth/hidp/sock.c 		ca.idle_to = ca32.idle_to;
ca                196 net/bluetooth/hidp/sock.c 		memcpy(ca.name, ca32.name, 128);
ca                198 net/bluetooth/hidp/sock.c 		csock = sockfd_lookup(ca.ctrl_sock, &err);
ca                202 net/bluetooth/hidp/sock.c 		isock = sockfd_lookup(ca.intr_sock, &err);
ca                208 net/bluetooth/hidp/sock.c 		err = hidp_connection_add(&ca, csock, isock);
ca                 60 net/ipv4/tcp_bic.c static inline void bictcp_reset(struct bictcp *ca)
ca                 62 net/ipv4/tcp_bic.c 	ca->cnt = 0;
ca                 63 net/ipv4/tcp_bic.c 	ca->last_max_cwnd = 0;
ca                 64 net/ipv4/tcp_bic.c 	ca->last_cwnd = 0;
ca                 65 net/ipv4/tcp_bic.c 	ca->last_time = 0;
ca                 66 net/ipv4/tcp_bic.c 	ca->epoch_start = 0;
ca                 67 net/ipv4/tcp_bic.c 	ca->delayed_ack = 2 << ACK_RATIO_SHIFT;
ca                 72 net/ipv4/tcp_bic.c 	struct bictcp *ca = inet_csk_ca(sk);
ca                 74 net/ipv4/tcp_bic.c 	bictcp_reset(ca);
ca                 83 net/ipv4/tcp_bic.c static inline void bictcp_update(struct bictcp *ca, u32 cwnd)
ca                 85 net/ipv4/tcp_bic.c 	if (ca->last_cwnd == cwnd &&
ca                 86 net/ipv4/tcp_bic.c 	    (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
ca                 89 net/ipv4/tcp_bic.c 	ca->last_cwnd = cwnd;
ca                 90 net/ipv4/tcp_bic.c 	ca->last_time = tcp_jiffies32;
ca                 92 net/ipv4/tcp_bic.c 	if (ca->epoch_start == 0) /* record the beginning of an epoch */
ca                 93 net/ipv4/tcp_bic.c 		ca->epoch_start = tcp_jiffies32;
ca                 97 net/ipv4/tcp_bic.c 		ca->cnt = cwnd;
ca                102 net/ipv4/tcp_bic.c 	if (cwnd < ca->last_max_cwnd) {
ca                103 net/ipv4/tcp_bic.c 		__u32	dist = (ca->last_max_cwnd - cwnd)
ca                108 net/ipv4/tcp_bic.c 			ca->cnt = cwnd / max_increment;
ca                111 net/ipv4/tcp_bic.c 			ca->cnt = (cwnd * smooth_part) / BICTCP_B;
ca                114 net/ipv4/tcp_bic.c 			ca->cnt = cwnd / dist;
ca                117 net/ipv4/tcp_bic.c 		if (cwnd < ca->last_max_cwnd + BICTCP_B)
ca                119 net/ipv4/tcp_bic.c 			ca->cnt = (cwnd * smooth_part) / BICTCP_B;
ca                120 net/ipv4/tcp_bic.c 		else if (cwnd < ca->last_max_cwnd + max_increment*(BICTCP_B-1))
ca                122 net/ipv4/tcp_bic.c 			ca->cnt = (cwnd * (BICTCP_B-1))
ca                123 net/ipv4/tcp_bic.c 				/ (cwnd - ca->last_max_cwnd);
ca                126 net/ipv4/tcp_bic.c 			ca->cnt = cwnd / max_increment;
ca                130 net/ipv4/tcp_bic.c 	if (ca->last_max_cwnd == 0) {
ca                131 net/ipv4/tcp_bic.c 		if (ca->cnt > 20) /* increase cwnd 5% per RTT */
ca                132 net/ipv4/tcp_bic.c 			ca->cnt = 20;
ca                135 net/ipv4/tcp_bic.c 	ca->cnt = (ca->cnt << ACK_RATIO_SHIFT) / ca->delayed_ack;
ca                136 net/ipv4/tcp_bic.c 	if (ca->cnt == 0)			/* cannot be zero */
ca                137 net/ipv4/tcp_bic.c 		ca->cnt = 1;
ca                143 net/ipv4/tcp_bic.c 	struct bictcp *ca = inet_csk_ca(sk);
ca                151 net/ipv4/tcp_bic.c 		bictcp_update(ca, tp->snd_cwnd);
ca                152 net/ipv4/tcp_bic.c 		tcp_cong_avoid_ai(tp, ca->cnt, 1);
ca                163 net/ipv4/tcp_bic.c 	struct bictcp *ca = inet_csk_ca(sk);
ca                165 net/ipv4/tcp_bic.c 	ca->epoch_start = 0;	/* end of epoch */
ca                168 net/ipv4/tcp_bic.c 	if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
ca                169 net/ipv4/tcp_bic.c 		ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
ca                172 net/ipv4/tcp_bic.c 		ca->last_max_cwnd = tp->snd_cwnd;
ca                194 net/ipv4/tcp_bic.c 		struct bictcp *ca = inet_csk_ca(sk);
ca                196 net/ipv4/tcp_bic.c 		ca->delayed_ack += sample->pkts_acked -
ca                197 net/ipv4/tcp_bic.c 			(ca->delayed_ack >> ACK_RATIO_SHIFT);
ca                142 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
ca                145 net/ipv4/tcp_cdg.c 	ca->delay_min = min_not_zero(ca->delay_min, ca->rtt.min);
ca                146 net/ipv4/tcp_cdg.c 	if (ca->delay_min == 0)
ca                152 net/ipv4/tcp_cdg.c 		if (ca->last_ack == 0 || !tcp_is_cwnd_limited(sk)) {
ca                153 net/ipv4/tcp_cdg.c 			ca->last_ack = now_us;
ca                154 net/ipv4/tcp_cdg.c 			ca->round_start = now_us;
ca                155 net/ipv4/tcp_cdg.c 		} else if (before(now_us, ca->last_ack + 3000)) {
ca                156 net/ipv4/tcp_cdg.c 			u32 base_owd = max(ca->delay_min / 2U, 125U);
ca                158 net/ipv4/tcp_cdg.c 			ca->last_ack = now_us;
ca                159 net/ipv4/tcp_cdg.c 			if (after(now_us, ca->round_start + base_owd)) {
ca                172 net/ipv4/tcp_cdg.c 		if (ca->sample_cnt < 8) {
ca                173 net/ipv4/tcp_cdg.c 			ca->sample_cnt++;
ca                175 net/ipv4/tcp_cdg.c 			s32 thresh = max(ca->delay_min + ca->delay_min / 8U,
ca                178 net/ipv4/tcp_cdg.c 			if (ca->rtt.min > thresh) {
ca                190 net/ipv4/tcp_cdg.c static s32 tcp_cdg_grad(struct cdg *ca)
ca                192 net/ipv4/tcp_cdg.c 	s32 gmin = ca->rtt.min - ca->rtt_prev.min;
ca                193 net/ipv4/tcp_cdg.c 	s32 gmax = ca->rtt.max - ca->rtt_prev.max;
ca                196 net/ipv4/tcp_cdg.c 	if (ca->gradients) {
ca                197 net/ipv4/tcp_cdg.c 		ca->gsum.min += gmin - ca->gradients[ca->tail].min;
ca                198 net/ipv4/tcp_cdg.c 		ca->gsum.max += gmax - ca->gradients[ca->tail].max;
ca                199 net/ipv4/tcp_cdg.c 		ca->gradients[ca->tail].min = gmin;
ca                200 net/ipv4/tcp_cdg.c 		ca->gradients[ca->tail].max = gmax;
ca                201 net/ipv4/tcp_cdg.c 		ca->tail = (ca->tail + 1) & (window - 1);
ca                202 net/ipv4/tcp_cdg.c 		gmin = ca->gsum.min;
ca                203 net/ipv4/tcp_cdg.c 		gmax = ca->gsum.max;
ca                215 net/ipv4/tcp_cdg.c 	if (!ca->gfilled) {
ca                216 net/ipv4/tcp_cdg.c 		if (!ca->gradients && window > 1)
ca                218 net/ipv4/tcp_cdg.c 		else if (ca->tail == 0)
ca                219 net/ipv4/tcp_cdg.c 			ca->gfilled = true;
ca                221 net/ipv4/tcp_cdg.c 			grad = (grad * window) / (int)ca->tail;
ca                226 net/ipv4/tcp_cdg.c 		ca->backoff_cnt = 0;
ca                234 net/ipv4/tcp_cdg.c 			ca->state = CDG_FULL;
ca                236 net/ipv4/tcp_cdg.c 			ca->state = CDG_NONFULL;
ca                243 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
ca                250 net/ipv4/tcp_cdg.c 		ca->backoff_cnt++;
ca                251 net/ipv4/tcp_cdg.c 		if (ca->backoff_cnt > use_ineff)
ca                255 net/ipv4/tcp_cdg.c 	ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd);
ca                256 net/ipv4/tcp_cdg.c 	ca->state = CDG_BACKOFF;
ca                264 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
ca                272 net/ipv4/tcp_cdg.c 	if (after(ack, ca->rtt_seq) && ca->rtt.v64) {
ca                275 net/ipv4/tcp_cdg.c 		if (ca->rtt_prev.v64)
ca                276 net/ipv4/tcp_cdg.c 			grad = tcp_cdg_grad(ca);
ca                277 net/ipv4/tcp_cdg.c 		ca->rtt_seq = tp->snd_nxt;
ca                278 net/ipv4/tcp_cdg.c 		ca->rtt_prev = ca->rtt;
ca                279 net/ipv4/tcp_cdg.c 		ca->rtt.v64 = 0;
ca                280 net/ipv4/tcp_cdg.c 		ca->last_ack = 0;
ca                281 net/ipv4/tcp_cdg.c 		ca->sample_cnt = 0;
ca                288 net/ipv4/tcp_cdg.c 		ca->shadow_wnd = min(ca->shadow_wnd, tp->snd_cwnd);
ca                296 net/ipv4/tcp_cdg.c 	ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr);
ca                301 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
ca                312 net/ipv4/tcp_cdg.c 		if (sample->pkts_acked == 1 && ca->delack) {
ca                316 net/ipv4/tcp_cdg.c 			ca->rtt.min = min(ca->rtt.min, sample->rtt_us);
ca                317 net/ipv4/tcp_cdg.c 			ca->delack--;
ca                319 net/ipv4/tcp_cdg.c 		} else if (sample->pkts_acked > 1 && ca->delack < 5) {
ca                320 net/ipv4/tcp_cdg.c 			ca->delack++;
ca                324 net/ipv4/tcp_cdg.c 	ca->rtt.min = min_not_zero(ca->rtt.min, sample->rtt_us);
ca                325 net/ipv4/tcp_cdg.c 	ca->rtt.max = max(ca->rtt.max, sample->rtt_us);
ca                330 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
ca                333 net/ipv4/tcp_cdg.c 	if (ca->state == CDG_BACKOFF)
ca                336 net/ipv4/tcp_cdg.c 	if (ca->state == CDG_NONFULL && use_tolerance)
ca                339 net/ipv4/tcp_cdg.c 	ca->shadow_wnd = min(ca->shadow_wnd >> 1, tp->snd_cwnd);
ca                341 net/ipv4/tcp_cdg.c 		return max3(2U, ca->shadow_wnd, tp->snd_cwnd >> 1);
ca                347 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
ca                353 net/ipv4/tcp_cdg.c 		gradients = ca->gradients;
ca                356 net/ipv4/tcp_cdg.c 		memset(ca, 0, sizeof(*ca));
ca                358 net/ipv4/tcp_cdg.c 		ca->gradients = gradients;
ca                359 net/ipv4/tcp_cdg.c 		ca->rtt_seq = tp->snd_nxt;
ca                360 net/ipv4/tcp_cdg.c 		ca->shadow_wnd = tp->snd_cwnd;
ca                363 net/ipv4/tcp_cdg.c 		ca->state = CDG_UNKNOWN;
ca                364 net/ipv4/tcp_cdg.c 		ca->rtt_seq = tp->snd_nxt;
ca                365 net/ipv4/tcp_cdg.c 		ca->rtt_prev = ca->rtt;
ca                366 net/ipv4/tcp_cdg.c 		ca->rtt.v64 = 0;
ca                375 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
ca                380 net/ipv4/tcp_cdg.c 		ca->gradients = kcalloc(window, sizeof(ca->gradients[0]),
ca                382 net/ipv4/tcp_cdg.c 	ca->rtt_seq = tp->snd_nxt;
ca                383 net/ipv4/tcp_cdg.c 	ca->shadow_wnd = tp->snd_cwnd;
ca                388 net/ipv4/tcp_cdg.c 	struct cdg *ca = inet_csk_ca(sk);
ca                390 net/ipv4/tcp_cdg.c 	kfree(ca->gradients);
ca                 40 net/ipv4/tcp_cong.c 	struct tcp_congestion_ops *ca = tcp_ca_find(name);
ca                 43 net/ipv4/tcp_cong.c 	if (!ca && capable(CAP_NET_ADMIN)) {
ca                 47 net/ipv4/tcp_cong.c 		ca = tcp_ca_find(name);
ca                 50 net/ipv4/tcp_cong.c 	return ca;
ca                 70 net/ipv4/tcp_cong.c int tcp_register_congestion_control(struct tcp_congestion_ops *ca)
ca                 75 net/ipv4/tcp_cong.c 	if (!ca->ssthresh || !ca->undo_cwnd ||
ca                 76 net/ipv4/tcp_cong.c 	    !(ca->cong_avoid || ca->cong_control)) {
ca                 77 net/ipv4/tcp_cong.c 		pr_err("%s does not implement required ops\n", ca->name);
ca                 81 net/ipv4/tcp_cong.c 	ca->key = jhash(ca->name, sizeof(ca->name), strlen(ca->name));
ca                 84 net/ipv4/tcp_cong.c 	if (ca->key == TCP_CA_UNSPEC || tcp_ca_find_key(ca->key)) {
ca                 86 net/ipv4/tcp_cong.c 			  ca->name);
ca                 89 net/ipv4/tcp_cong.c 		list_add_tail_rcu(&ca->list, &tcp_cong_list);
ca                 90 net/ipv4/tcp_cong.c 		pr_debug("%s registered\n", ca->name);
ca                104 net/ipv4/tcp_cong.c void tcp_unregister_congestion_control(struct tcp_congestion_ops *ca)
ca                107 net/ipv4/tcp_cong.c 	list_del_rcu(&ca->list);
ca                123 net/ipv4/tcp_cong.c 	const struct tcp_congestion_ops *ca;
ca                129 net/ipv4/tcp_cong.c 	ca = tcp_ca_find_autoload(net, name);
ca                130 net/ipv4/tcp_cong.c 	if (ca) {
ca                131 net/ipv4/tcp_cong.c 		key = ca->key;
ca                132 net/ipv4/tcp_cong.c 		*ecn_ca = ca->flags & TCP_CONG_NEEDS_ECN;
ca                142 net/ipv4/tcp_cong.c 	const struct tcp_congestion_ops *ca;
ca                146 net/ipv4/tcp_cong.c 	ca = tcp_ca_find_key(key);
ca                147 net/ipv4/tcp_cong.c 	if (ca)
ca                148 net/ipv4/tcp_cong.c 		ret = strncpy(buffer, ca->name,
ca                161 net/ipv4/tcp_cong.c 	const struct tcp_congestion_ops *ca;
ca                164 net/ipv4/tcp_cong.c 	ca = rcu_dereference(net->ipv4.tcp_congestion_control);
ca                165 net/ipv4/tcp_cong.c 	if (unlikely(!try_module_get(ca->owner)))
ca                166 net/ipv4/tcp_cong.c 		ca = &tcp_reno;
ca                167 net/ipv4/tcp_cong.c 	icsk->icsk_ca_ops = ca;
ca                171 net/ipv4/tcp_cong.c 	if (ca->flags & TCP_CONG_NEEDS_ECN)
ca                191 net/ipv4/tcp_cong.c 					  const struct tcp_congestion_ops *ca)
ca                196 net/ipv4/tcp_cong.c 	icsk->icsk_ca_ops = ca;
ca                217 net/ipv4/tcp_cong.c 	struct tcp_congestion_ops *ca;
ca                222 net/ipv4/tcp_cong.c 	ca = tcp_ca_find_autoload(net, name);
ca                223 net/ipv4/tcp_cong.c 	if (!ca) {
ca                225 net/ipv4/tcp_cong.c 	} else if (!try_module_get(ca->owner)) {
ca                228 net/ipv4/tcp_cong.c 		prev = xchg(&net->ipv4.tcp_congestion_control, ca);
ca                232 net/ipv4/tcp_cong.c 		ca->flags |= TCP_CONG_NON_RESTRICTED;
ca                251 net/ipv4/tcp_cong.c 	struct tcp_congestion_ops *ca;
ca                255 net/ipv4/tcp_cong.c 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
ca                258 net/ipv4/tcp_cong.c 				 offs == 0 ? "" : " ", ca->name);
ca                266 net/ipv4/tcp_cong.c 	const struct tcp_congestion_ops *ca;
ca                269 net/ipv4/tcp_cong.c 	ca = rcu_dereference(net->ipv4.tcp_congestion_control);
ca                270 net/ipv4/tcp_cong.c 	strncpy(name, ca->name, TCP_CA_NAME_MAX);
ca                277 net/ipv4/tcp_cong.c 	struct tcp_congestion_ops *ca;
ca                282 net/ipv4/tcp_cong.c 	list_for_each_entry_rcu(ca, &tcp_cong_list, list) {
ca                283 net/ipv4/tcp_cong.c 		if (!(ca->flags & TCP_CONG_NON_RESTRICTED))
ca                287 net/ipv4/tcp_cong.c 				 offs == 0 ? "" : " ", ca->name);
ca                295 net/ipv4/tcp_cong.c 	struct tcp_congestion_ops *ca;
ca                306 net/ipv4/tcp_cong.c 		ca = tcp_ca_find(name);
ca                307 net/ipv4/tcp_cong.c 		if (!ca) {
ca                314 net/ipv4/tcp_cong.c 	list_for_each_entry_rcu(ca, &tcp_cong_list, list)
ca                315 net/ipv4/tcp_cong.c 		ca->flags &= ~TCP_CONG_NON_RESTRICTED;
ca                319 net/ipv4/tcp_cong.c 		ca = tcp_ca_find(name);
ca                320 net/ipv4/tcp_cong.c 		WARN_ON(!ca);
ca                321 net/ipv4/tcp_cong.c 		if (ca)
ca                322 net/ipv4/tcp_cong.c 			ca->flags |= TCP_CONG_NON_RESTRICTED;
ca                340 net/ipv4/tcp_cong.c 	const struct tcp_congestion_ops *ca;
ca                348 net/ipv4/tcp_cong.c 		ca = tcp_ca_find(name);
ca                350 net/ipv4/tcp_cong.c 		ca = tcp_ca_find_autoload(sock_net(sk), name);
ca                353 net/ipv4/tcp_cong.c 	if (ca == icsk->icsk_ca_ops) {
ca                358 net/ipv4/tcp_cong.c 	if (!ca) {
ca                363 net/ipv4/tcp_cong.c 		if (try_module_get(ca->owner)) {
ca                365 net/ipv4/tcp_cong.c 				tcp_reinit_congestion_control(sk, ca);
ca                367 net/ipv4/tcp_cong.c 				icsk->icsk_ca_ops = ca;
ca                373 net/ipv4/tcp_cong.c 	} else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) || cap_net_admin)) {
ca                375 net/ipv4/tcp_cong.c 	} else if (!try_module_get(ca->owner)) {
ca                378 net/ipv4/tcp_cong.c 		tcp_reinit_congestion_control(sk, ca);
ca                105 net/ipv4/tcp_cubic.c static inline void bictcp_reset(struct bictcp *ca)
ca                107 net/ipv4/tcp_cubic.c 	ca->cnt = 0;
ca                108 net/ipv4/tcp_cubic.c 	ca->last_max_cwnd = 0;
ca                109 net/ipv4/tcp_cubic.c 	ca->last_cwnd = 0;
ca                110 net/ipv4/tcp_cubic.c 	ca->last_time = 0;
ca                111 net/ipv4/tcp_cubic.c 	ca->bic_origin_point = 0;
ca                112 net/ipv4/tcp_cubic.c 	ca->bic_K = 0;
ca                113 net/ipv4/tcp_cubic.c 	ca->delay_min = 0;
ca                114 net/ipv4/tcp_cubic.c 	ca->epoch_start = 0;
ca                115 net/ipv4/tcp_cubic.c 	ca->ack_cnt = 0;
ca                116 net/ipv4/tcp_cubic.c 	ca->tcp_cwnd = 0;
ca                117 net/ipv4/tcp_cubic.c 	ca->found = 0;
ca                132 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
ca                134 net/ipv4/tcp_cubic.c 	ca->round_start = ca->last_ack = bictcp_clock();
ca                135 net/ipv4/tcp_cubic.c 	ca->end_seq = tp->snd_nxt;
ca                136 net/ipv4/tcp_cubic.c 	ca->curr_rtt = 0;
ca                137 net/ipv4/tcp_cubic.c 	ca->sample_cnt = 0;
ca                142 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
ca                144 net/ipv4/tcp_cubic.c 	bictcp_reset(ca);
ca                156 net/ipv4/tcp_cubic.c 		struct bictcp *ca = inet_csk_ca(sk);
ca                165 net/ipv4/tcp_cubic.c 		if (ca->epoch_start && delta > 0) {
ca                166 net/ipv4/tcp_cubic.c 			ca->epoch_start += delta;
ca                167 net/ipv4/tcp_cubic.c 			if (after(ca->epoch_start, now))
ca                168 net/ipv4/tcp_cubic.c 				ca->epoch_start = now;
ca                225 net/ipv4/tcp_cubic.c static inline void bictcp_update(struct bictcp *ca, u32 cwnd, u32 acked)
ca                230 net/ipv4/tcp_cubic.c 	ca->ack_cnt += acked;	/* count the number of ACKed packets */
ca                232 net/ipv4/tcp_cubic.c 	if (ca->last_cwnd == cwnd &&
ca                233 net/ipv4/tcp_cubic.c 	    (s32)(tcp_jiffies32 - ca->last_time) <= HZ / 32)
ca                240 net/ipv4/tcp_cubic.c 	if (ca->epoch_start && tcp_jiffies32 == ca->last_time)
ca                243 net/ipv4/tcp_cubic.c 	ca->last_cwnd = cwnd;
ca                244 net/ipv4/tcp_cubic.c 	ca->last_time = tcp_jiffies32;
ca                246 net/ipv4/tcp_cubic.c 	if (ca->epoch_start == 0) {
ca                247 net/ipv4/tcp_cubic.c 		ca->epoch_start = tcp_jiffies32;	/* record beginning */
ca                248 net/ipv4/tcp_cubic.c 		ca->ack_cnt = acked;			/* start counting */
ca                249 net/ipv4/tcp_cubic.c 		ca->tcp_cwnd = cwnd;			/* syn with cubic */
ca                251 net/ipv4/tcp_cubic.c 		if (ca->last_max_cwnd <= cwnd) {
ca                252 net/ipv4/tcp_cubic.c 			ca->bic_K = 0;
ca                253 net/ipv4/tcp_cubic.c 			ca->bic_origin_point = cwnd;
ca                258 net/ipv4/tcp_cubic.c 			ca->bic_K = cubic_root(cube_factor
ca                259 net/ipv4/tcp_cubic.c 					       * (ca->last_max_cwnd - cwnd));
ca                260 net/ipv4/tcp_cubic.c 			ca->bic_origin_point = ca->last_max_cwnd;
ca                278 net/ipv4/tcp_cubic.c 	t = (s32)(tcp_jiffies32 - ca->epoch_start);
ca                279 net/ipv4/tcp_cubic.c 	t += msecs_to_jiffies(ca->delay_min >> 3);
ca                284 net/ipv4/tcp_cubic.c 	if (t < ca->bic_K)		/* t - K */
ca                285 net/ipv4/tcp_cubic.c 		offs = ca->bic_K - t;
ca                287 net/ipv4/tcp_cubic.c 		offs = t - ca->bic_K;
ca                291 net/ipv4/tcp_cubic.c 	if (t < ca->bic_K)                            /* below origin*/
ca                292 net/ipv4/tcp_cubic.c 		bic_target = ca->bic_origin_point - delta;
ca                294 net/ipv4/tcp_cubic.c 		bic_target = ca->bic_origin_point + delta;
ca                298 net/ipv4/tcp_cubic.c 		ca->cnt = cwnd / (bic_target - cwnd);
ca                300 net/ipv4/tcp_cubic.c 		ca->cnt = 100 * cwnd;              /* very small increment*/
ca                307 net/ipv4/tcp_cubic.c 	if (ca->last_max_cwnd == 0 && ca->cnt > 20)
ca                308 net/ipv4/tcp_cubic.c 		ca->cnt = 20;	/* increase cwnd 5% per RTT */
ca                316 net/ipv4/tcp_cubic.c 		while (ca->ack_cnt > delta) {		/* update tcp cwnd */
ca                317 net/ipv4/tcp_cubic.c 			ca->ack_cnt -= delta;
ca                318 net/ipv4/tcp_cubic.c 			ca->tcp_cwnd++;
ca                321 net/ipv4/tcp_cubic.c 		if (ca->tcp_cwnd > cwnd) {	/* if bic is slower than tcp */
ca                322 net/ipv4/tcp_cubic.c 			delta = ca->tcp_cwnd - cwnd;
ca                324 net/ipv4/tcp_cubic.c 			if (ca->cnt > max_cnt)
ca                325 net/ipv4/tcp_cubic.c 				ca->cnt = max_cnt;
ca                332 net/ipv4/tcp_cubic.c 	ca->cnt = max(ca->cnt, 2U);
ca                338 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
ca                344 net/ipv4/tcp_cubic.c 		if (hystart && after(ack, ca->end_seq))
ca                350 net/ipv4/tcp_cubic.c 	bictcp_update(ca, tp->snd_cwnd, acked);
ca                351 net/ipv4/tcp_cubic.c 	tcp_cong_avoid_ai(tp, ca->cnt, acked);
ca                357 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
ca                359 net/ipv4/tcp_cubic.c 	ca->epoch_start = 0;	/* end of epoch */
ca                362 net/ipv4/tcp_cubic.c 	if (tp->snd_cwnd < ca->last_max_cwnd && fast_convergence)
ca                363 net/ipv4/tcp_cubic.c 		ca->last_max_cwnd = (tp->snd_cwnd * (BICTCP_BETA_SCALE + beta))
ca                366 net/ipv4/tcp_cubic.c 		ca->last_max_cwnd = tp->snd_cwnd;
ca                382 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
ca                384 net/ipv4/tcp_cubic.c 	if (ca->found & hystart_detect)
ca                391 net/ipv4/tcp_cubic.c 		if ((s32)(now - ca->last_ack) <= hystart_ack_delta) {
ca                392 net/ipv4/tcp_cubic.c 			ca->last_ack = now;
ca                393 net/ipv4/tcp_cubic.c 			if ((s32)(now - ca->round_start) > ca->delay_min >> 4) {
ca                394 net/ipv4/tcp_cubic.c 				ca->found |= HYSTART_ACK_TRAIN;
ca                407 net/ipv4/tcp_cubic.c 		if (ca->sample_cnt < HYSTART_MIN_SAMPLES) {
ca                408 net/ipv4/tcp_cubic.c 			if (ca->curr_rtt == 0 || ca->curr_rtt > delay)
ca                409 net/ipv4/tcp_cubic.c 				ca->curr_rtt = delay;
ca                411 net/ipv4/tcp_cubic.c 			ca->sample_cnt++;
ca                413 net/ipv4/tcp_cubic.c 			if (ca->curr_rtt > ca->delay_min +
ca                414 net/ipv4/tcp_cubic.c 			    HYSTART_DELAY_THRESH(ca->delay_min >> 3)) {
ca                415 net/ipv4/tcp_cubic.c 				ca->found |= HYSTART_DELAY;
ca                433 net/ipv4/tcp_cubic.c 	struct bictcp *ca = inet_csk_ca(sk);
ca                441 net/ipv4/tcp_cubic.c 	if (ca->epoch_start && (s32)(tcp_jiffies32 - ca->epoch_start) < HZ)
ca                449 net/ipv4/tcp_cubic.c 	if (ca->delay_min == 0 || ca->delay_min > delay)
ca                450 net/ipv4/tcp_cubic.c 		ca->delay_min = delay;
ca                 67 net/ipv4/tcp_dctcp.c static void dctcp_reset(const struct tcp_sock *tp, struct dctcp *ca)
ca                 69 net/ipv4/tcp_dctcp.c 	ca->next_seq = tp->snd_nxt;
ca                 71 net/ipv4/tcp_dctcp.c 	ca->old_delivered = tp->delivered;
ca                 72 net/ipv4/tcp_dctcp.c 	ca->old_delivered_ce = tp->delivered_ce;
ca                 82 net/ipv4/tcp_dctcp.c 		struct dctcp *ca = inet_csk_ca(sk);
ca                 84 net/ipv4/tcp_dctcp.c 		ca->prior_rcv_nxt = tp->rcv_nxt;
ca                 86 net/ipv4/tcp_dctcp.c 		ca->dctcp_alpha = min(dctcp_alpha_on_init, DCTCP_MAX_ALPHA);
ca                 88 net/ipv4/tcp_dctcp.c 		ca->loss_cwnd = 0;
ca                 89 net/ipv4/tcp_dctcp.c 		ca->ce_state = 0;
ca                 91 net/ipv4/tcp_dctcp.c 		dctcp_reset(tp, ca);
ca                104 net/ipv4/tcp_dctcp.c 	struct dctcp *ca = inet_csk_ca(sk);
ca                107 net/ipv4/tcp_dctcp.c 	ca->loss_cwnd = tp->snd_cwnd;
ca                108 net/ipv4/tcp_dctcp.c 	return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U);
ca                114 net/ipv4/tcp_dctcp.c 	struct dctcp *ca = inet_csk_ca(sk);
ca                117 net/ipv4/tcp_dctcp.c 	if (!before(tp->snd_una, ca->next_seq)) {
ca                118 net/ipv4/tcp_dctcp.c 		u32 delivered_ce = tp->delivered_ce - ca->old_delivered_ce;
ca                119 net/ipv4/tcp_dctcp.c 		u32 alpha = ca->dctcp_alpha;
ca                125 net/ipv4/tcp_dctcp.c 			u32 delivered = tp->delivered - ca->old_delivered;
ca                139 net/ipv4/tcp_dctcp.c 		WRITE_ONCE(ca->dctcp_alpha, alpha);
ca                140 net/ipv4/tcp_dctcp.c 		dctcp_reset(tp, ca);
ca                146 net/ipv4/tcp_dctcp.c 	struct dctcp *ca = inet_csk_ca(sk);
ca                149 net/ipv4/tcp_dctcp.c 	ca->loss_cwnd = tp->snd_cwnd;
ca                165 net/ipv4/tcp_dctcp.c 	struct dctcp *ca = inet_csk_ca(sk);
ca                170 net/ipv4/tcp_dctcp.c 		dctcp_ece_ack_update(sk, ev, &ca->prior_rcv_nxt, &ca->ce_state);
ca                184 net/ipv4/tcp_dctcp.c 	const struct dctcp *ca = inet_csk_ca(sk);
ca                195 net/ipv4/tcp_dctcp.c 			info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
ca                196 net/ipv4/tcp_dctcp.c 			info->dctcp.dctcp_alpha = ca->dctcp_alpha;
ca                198 net/ipv4/tcp_dctcp.c 						   (tp->delivered_ce - ca->old_delivered_ce);
ca                200 net/ipv4/tcp_dctcp.c 						   (tp->delivered - ca->old_delivered);
ca                211 net/ipv4/tcp_dctcp.c 	const struct dctcp *ca = inet_csk_ca(sk);
ca                213 net/ipv4/tcp_dctcp.c 	return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd);
ca                103 net/ipv4/tcp_highspeed.c 	struct hstcp *ca = inet_csk_ca(sk);
ca                105 net/ipv4/tcp_highspeed.c 	ca->ai = 0;
ca                115 net/ipv4/tcp_highspeed.c 	struct hstcp *ca = inet_csk_ca(sk);
ca                130 net/ipv4/tcp_highspeed.c 		if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
ca                131 net/ipv4/tcp_highspeed.c 			while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
ca                132 net/ipv4/tcp_highspeed.c 			       ca->ai < HSTCP_AIMD_MAX - 1)
ca                133 net/ipv4/tcp_highspeed.c 				ca->ai++;
ca                134 net/ipv4/tcp_highspeed.c 		} else if (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd) {
ca                135 net/ipv4/tcp_highspeed.c 			while (ca->ai && tp->snd_cwnd <= hstcp_aimd_vals[ca->ai-1].cwnd)
ca                136 net/ipv4/tcp_highspeed.c 				ca->ai--;
ca                142 net/ipv4/tcp_highspeed.c 			tp->snd_cwnd_cnt += ca->ai + 1;
ca                154 net/ipv4/tcp_highspeed.c 	struct hstcp *ca = inet_csk_ca(sk);
ca                157 net/ipv4/tcp_highspeed.c 	return max(tp->snd_cwnd - ((tp->snd_cwnd * hstcp_aimd_vals[ca->ai].md) >> 8), 2U);
ca                 49 net/ipv4/tcp_htcp.c static inline u32 htcp_cong_time(const struct htcp *ca)
ca                 51 net/ipv4/tcp_htcp.c 	return jiffies - ca->last_cong;
ca                 54 net/ipv4/tcp_htcp.c static inline u32 htcp_ccount(const struct htcp *ca)
ca                 56 net/ipv4/tcp_htcp.c 	return htcp_cong_time(ca) / ca->minRTT;
ca                 59 net/ipv4/tcp_htcp.c static inline void htcp_reset(struct htcp *ca)
ca                 61 net/ipv4/tcp_htcp.c 	ca->undo_last_cong = ca->last_cong;
ca                 62 net/ipv4/tcp_htcp.c 	ca->undo_maxRTT = ca->maxRTT;
ca                 63 net/ipv4/tcp_htcp.c 	ca->undo_old_maxB = ca->old_maxB;
ca                 65 net/ipv4/tcp_htcp.c 	ca->last_cong = jiffies;
ca                 70 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
ca                 72 net/ipv4/tcp_htcp.c 	if (ca->undo_last_cong) {
ca                 73 net/ipv4/tcp_htcp.c 		ca->last_cong = ca->undo_last_cong;
ca                 74 net/ipv4/tcp_htcp.c 		ca->maxRTT = ca->undo_maxRTT;
ca                 75 net/ipv4/tcp_htcp.c 		ca->old_maxB = ca->undo_old_maxB;
ca                 76 net/ipv4/tcp_htcp.c 		ca->undo_last_cong = 0;
ca                 85 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
ca                 88 net/ipv4/tcp_htcp.c 	if (ca->minRTT > srtt || !ca->minRTT)
ca                 89 net/ipv4/tcp_htcp.c 		ca->minRTT = srtt;
ca                 93 net/ipv4/tcp_htcp.c 		if (ca->maxRTT < ca->minRTT)
ca                 94 net/ipv4/tcp_htcp.c 			ca->maxRTT = ca->minRTT;
ca                 95 net/ipv4/tcp_htcp.c 		if (ca->maxRTT < srtt &&
ca                 96 net/ipv4/tcp_htcp.c 		    srtt <= ca->maxRTT + msecs_to_jiffies(20))
ca                 97 net/ipv4/tcp_htcp.c 			ca->maxRTT = srtt;
ca                106 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
ca                110 net/ipv4/tcp_htcp.c 		ca->pkts_acked = sample->pkts_acked;
ca                120 net/ipv4/tcp_htcp.c 		ca->packetcount = 0;
ca                121 net/ipv4/tcp_htcp.c 		ca->lasttime = now;
ca                125 net/ipv4/tcp_htcp.c 	ca->packetcount += sample->pkts_acked;
ca                127 net/ipv4/tcp_htcp.c 	if (ca->packetcount >= tp->snd_cwnd - (ca->alpha >> 7 ? : 1) &&
ca                128 net/ipv4/tcp_htcp.c 	    now - ca->lasttime >= ca->minRTT &&
ca                129 net/ipv4/tcp_htcp.c 	    ca->minRTT > 0) {
ca                130 net/ipv4/tcp_htcp.c 		__u32 cur_Bi = ca->packetcount * HZ / (now - ca->lasttime);
ca                132 net/ipv4/tcp_htcp.c 		if (htcp_ccount(ca) <= 3) {
ca                134 net/ipv4/tcp_htcp.c 			ca->minB = ca->maxB = ca->Bi = cur_Bi;
ca                136 net/ipv4/tcp_htcp.c 			ca->Bi = (3 * ca->Bi + cur_Bi) / 4;
ca                137 net/ipv4/tcp_htcp.c 			if (ca->Bi > ca->maxB)
ca                138 net/ipv4/tcp_htcp.c 				ca->maxB = ca->Bi;
ca                139 net/ipv4/tcp_htcp.c 			if (ca->minB > ca->maxB)
ca                140 net/ipv4/tcp_htcp.c 				ca->minB = ca->maxB;
ca                142 net/ipv4/tcp_htcp.c 		ca->packetcount = 0;
ca                143 net/ipv4/tcp_htcp.c 		ca->lasttime = now;
ca                147 net/ipv4/tcp_htcp.c static inline void htcp_beta_update(struct htcp *ca, u32 minRTT, u32 maxRTT)
ca                150 net/ipv4/tcp_htcp.c 		u32 maxB = ca->maxB;
ca                151 net/ipv4/tcp_htcp.c 		u32 old_maxB = ca->old_maxB;
ca                153 net/ipv4/tcp_htcp.c 		ca->old_maxB = ca->maxB;
ca                155 net/ipv4/tcp_htcp.c 			ca->beta = BETA_MIN;
ca                156 net/ipv4/tcp_htcp.c 			ca->modeswitch = 0;
ca                161 net/ipv4/tcp_htcp.c 	if (ca->modeswitch && minRTT > msecs_to_jiffies(10) && maxRTT) {
ca                162 net/ipv4/tcp_htcp.c 		ca->beta = (minRTT << 7) / maxRTT;
ca                163 net/ipv4/tcp_htcp.c 		if (ca->beta < BETA_MIN)
ca                164 net/ipv4/tcp_htcp.c 			ca->beta = BETA_MIN;
ca                165 net/ipv4/tcp_htcp.c 		else if (ca->beta > BETA_MAX)
ca                166 net/ipv4/tcp_htcp.c 			ca->beta = BETA_MAX;
ca                168 net/ipv4/tcp_htcp.c 		ca->beta = BETA_MIN;
ca                169 net/ipv4/tcp_htcp.c 		ca->modeswitch = 1;
ca                173 net/ipv4/tcp_htcp.c static inline void htcp_alpha_update(struct htcp *ca)
ca                175 net/ipv4/tcp_htcp.c 	u32 minRTT = ca->minRTT;
ca                177 net/ipv4/tcp_htcp.c 	u32 diff = htcp_cong_time(ca);
ca                194 net/ipv4/tcp_htcp.c 	ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
ca                195 net/ipv4/tcp_htcp.c 	if (!ca->alpha)
ca                196 net/ipv4/tcp_htcp.c 		ca->alpha = ALPHA_BASE;
ca                210 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
ca                211 net/ipv4/tcp_htcp.c 	u32 minRTT = ca->minRTT;
ca                212 net/ipv4/tcp_htcp.c 	u32 maxRTT = ca->maxRTT;
ca                214 net/ipv4/tcp_htcp.c 	htcp_beta_update(ca, minRTT, maxRTT);
ca                215 net/ipv4/tcp_htcp.c 	htcp_alpha_update(ca);
ca                219 net/ipv4/tcp_htcp.c 		ca->maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
ca                225 net/ipv4/tcp_htcp.c 	const struct htcp *ca = inet_csk_ca(sk);
ca                228 net/ipv4/tcp_htcp.c 	return max((tp->snd_cwnd * ca->beta) >> 7, 2U);
ca                234 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
ca                245 net/ipv4/tcp_htcp.c 		if ((tp->snd_cwnd_cnt * ca->alpha)>>7 >= tp->snd_cwnd) {
ca                249 net/ipv4/tcp_htcp.c 			htcp_alpha_update(ca);
ca                251 net/ipv4/tcp_htcp.c 			tp->snd_cwnd_cnt += ca->pkts_acked;
ca                253 net/ipv4/tcp_htcp.c 		ca->pkts_acked = 1;
ca                259 net/ipv4/tcp_htcp.c 	struct htcp *ca = inet_csk_ca(sk);
ca                261 net/ipv4/tcp_htcp.c 	memset(ca, 0, sizeof(struct htcp));
ca                262 net/ipv4/tcp_htcp.c 	ca->alpha = ALPHA_BASE;
ca                263 net/ipv4/tcp_htcp.c 	ca->beta = BETA_MIN;
ca                264 net/ipv4/tcp_htcp.c 	ca->pkts_acked = 1;
ca                265 net/ipv4/tcp_htcp.c 	ca->last_cong = jiffies;
ca                273 net/ipv4/tcp_htcp.c 			struct htcp *ca = inet_csk_ca(sk);
ca                275 net/ipv4/tcp_htcp.c 			if (ca->undo_last_cong) {
ca                276 net/ipv4/tcp_htcp.c 				ca->last_cong = jiffies;
ca                277 net/ipv4/tcp_htcp.c 				ca->undo_last_cong = 0;
ca                 36 net/ipv4/tcp_hybla.c 	struct hybla *ca = inet_csk_ca(sk);
ca                 38 net/ipv4/tcp_hybla.c 	ca->rho_3ls = max_t(u32,
ca                 41 net/ipv4/tcp_hybla.c 	ca->rho = ca->rho_3ls >> 3;
ca                 42 net/ipv4/tcp_hybla.c 	ca->rho2_7ls = (ca->rho_3ls * ca->rho_3ls) << 1;
ca                 43 net/ipv4/tcp_hybla.c 	ca->rho2 = ca->rho2_7ls >> 7;
ca                 49 net/ipv4/tcp_hybla.c 	struct hybla *ca = inet_csk_ca(sk);
ca                 51 net/ipv4/tcp_hybla.c 	ca->rho = 0;
ca                 52 net/ipv4/tcp_hybla.c 	ca->rho2 = 0;
ca                 53 net/ipv4/tcp_hybla.c 	ca->rho_3ls = 0;
ca                 54 net/ipv4/tcp_hybla.c 	ca->rho2_7ls = 0;
ca                 55 net/ipv4/tcp_hybla.c 	ca->snd_cwnd_cents = 0;
ca                 56 net/ipv4/tcp_hybla.c 	ca->hybla_en = true;
ca                 64 net/ipv4/tcp_hybla.c 	ca->minrtt_us = tp->srtt_us;
ca                 65 net/ipv4/tcp_hybla.c 	tp->snd_cwnd = ca->rho;
ca                 70 net/ipv4/tcp_hybla.c 	struct hybla *ca = inet_csk_ca(sk);
ca                 72 net/ipv4/tcp_hybla.c 	ca->hybla_en = (ca_state == TCP_CA_Open);
ca                 93 net/ipv4/tcp_hybla.c 	struct hybla *ca = inet_csk_ca(sk);
ca                 98 net/ipv4/tcp_hybla.c 	if (tp->srtt_us < ca->minrtt_us) {
ca                100 net/ipv4/tcp_hybla.c 		ca->minrtt_us = tp->srtt_us;
ca                106 net/ipv4/tcp_hybla.c 	if (!ca->hybla_en) {
ca                111 net/ipv4/tcp_hybla.c 	if (ca->rho == 0)
ca                114 net/ipv4/tcp_hybla.c 	rho_fractions = ca->rho_3ls - (ca->rho << 3);
ca                131 net/ipv4/tcp_hybla.c 		increment = ((1 << min(ca->rho, 16U)) *
ca                140 net/ipv4/tcp_hybla.c 		increment = ca->rho2_7ls / tp->snd_cwnd;
ca                147 net/ipv4/tcp_hybla.c 	ca->snd_cwnd_cents += odd;
ca                150 net/ipv4/tcp_hybla.c 	while (ca->snd_cwnd_cents >= 128) {
ca                152 net/ipv4/tcp_hybla.c 		ca->snd_cwnd_cents -= 128;
ca                 60 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
ca                 62 net/ipv4/tcp_illinois.c 	ca->end_seq = tp->snd_nxt;
ca                 63 net/ipv4/tcp_illinois.c 	ca->cnt_rtt = 0;
ca                 64 net/ipv4/tcp_illinois.c 	ca->sum_rtt = 0;
ca                 71 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
ca                 73 net/ipv4/tcp_illinois.c 	ca->alpha = ALPHA_MAX;
ca                 74 net/ipv4/tcp_illinois.c 	ca->beta = BETA_BASE;
ca                 75 net/ipv4/tcp_illinois.c 	ca->base_rtt = 0x7fffffff;
ca                 76 net/ipv4/tcp_illinois.c 	ca->max_rtt = 0;
ca                 78 net/ipv4/tcp_illinois.c 	ca->acked = 0;
ca                 79 net/ipv4/tcp_illinois.c 	ca->rtt_low = 0;
ca                 80 net/ipv4/tcp_illinois.c 	ca->rtt_above = 0;
ca                 88 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
ca                 91 net/ipv4/tcp_illinois.c 	ca->acked = sample->pkts_acked;
ca                102 net/ipv4/tcp_illinois.c 	if (ca->base_rtt > rtt_us)
ca                103 net/ipv4/tcp_illinois.c 		ca->base_rtt = rtt_us;
ca                106 net/ipv4/tcp_illinois.c 	if (ca->max_rtt < rtt_us)
ca                107 net/ipv4/tcp_illinois.c 		ca->max_rtt = rtt_us;
ca                109 net/ipv4/tcp_illinois.c 	++ca->cnt_rtt;
ca                110 net/ipv4/tcp_illinois.c 	ca->sum_rtt += rtt_us;
ca                114 net/ipv4/tcp_illinois.c static inline u32 max_delay(const struct illinois *ca)
ca                116 net/ipv4/tcp_illinois.c 	return ca->max_rtt - ca->base_rtt;
ca                120 net/ipv4/tcp_illinois.c static inline u32 avg_delay(const struct illinois *ca)
ca                122 net/ipv4/tcp_illinois.c 	u64 t = ca->sum_rtt;
ca                124 net/ipv4/tcp_illinois.c 	do_div(t, ca->cnt_rtt);
ca                125 net/ipv4/tcp_illinois.c 	return t - ca->base_rtt;
ca                140 net/ipv4/tcp_illinois.c static u32 alpha(struct illinois *ca, u32 da, u32 dm)
ca                146 net/ipv4/tcp_illinois.c 		if (!ca->rtt_above)
ca                152 net/ipv4/tcp_illinois.c 		if (++ca->rtt_low < theta)
ca                153 net/ipv4/tcp_illinois.c 			return ca->alpha;
ca                155 net/ipv4/tcp_illinois.c 		ca->rtt_low = 0;
ca                156 net/ipv4/tcp_illinois.c 		ca->rtt_above = 0;
ca                160 net/ipv4/tcp_illinois.c 	ca->rtt_above = 1;
ca                225 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
ca                228 net/ipv4/tcp_illinois.c 		ca->alpha = ALPHA_BASE;
ca                229 net/ipv4/tcp_illinois.c 		ca->beta = BETA_BASE;
ca                230 net/ipv4/tcp_illinois.c 	} else if (ca->cnt_rtt > 0) {
ca                231 net/ipv4/tcp_illinois.c 		u32 dm = max_delay(ca);
ca                232 net/ipv4/tcp_illinois.c 		u32 da = avg_delay(ca);
ca                234 net/ipv4/tcp_illinois.c 		ca->alpha = alpha(ca, da, dm);
ca                235 net/ipv4/tcp_illinois.c 		ca->beta = beta(da, dm);
ca                246 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
ca                249 net/ipv4/tcp_illinois.c 		ca->alpha = ALPHA_BASE;
ca                250 net/ipv4/tcp_illinois.c 		ca->beta = BETA_BASE;
ca                251 net/ipv4/tcp_illinois.c 		ca->rtt_low = 0;
ca                252 net/ipv4/tcp_illinois.c 		ca->rtt_above = 0;
ca                263 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
ca                265 net/ipv4/tcp_illinois.c 	if (after(ack, ca->end_seq))
ca                280 net/ipv4/tcp_illinois.c 		tp->snd_cwnd_cnt += ca->acked;
ca                281 net/ipv4/tcp_illinois.c 		ca->acked = 1;
ca                286 net/ipv4/tcp_illinois.c 		delta = (tp->snd_cwnd_cnt * ca->alpha) >> ALPHA_SHIFT;
ca                298 net/ipv4/tcp_illinois.c 	struct illinois *ca = inet_csk_ca(sk);
ca                301 net/ipv4/tcp_illinois.c 	return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->beta) >> BETA_SHIFT), 2U);
ca                308 net/ipv4/tcp_illinois.c 	const struct illinois *ca = inet_csk_ca(sk);
ca                312 net/ipv4/tcp_illinois.c 		info->vegas.tcpv_rttcnt = ca->cnt_rtt;
ca                313 net/ipv4/tcp_illinois.c 		info->vegas.tcpv_minrtt = ca->base_rtt;
ca                317 net/ipv4/tcp_illinois.c 			u64 t = ca->sum_rtt;
ca                413 net/ipv4/tcp_minisocks.c 		const struct tcp_congestion_ops *ca;
ca                416 net/ipv4/tcp_minisocks.c 		ca = tcp_ca_find_key(ca_key);
ca                417 net/ipv4/tcp_minisocks.c 		if (likely(ca && try_module_get(ca->owner))) {
ca                419 net/ipv4/tcp_minisocks.c 			icsk->icsk_ca_ops = ca;
ca                124 net/ipv4/tcp_nv.c static inline void tcpnv_reset(struct tcpnv *ca, struct sock *sk)
ca                128 net/ipv4/tcp_nv.c 	ca->nv_reset = 0;
ca                129 net/ipv4/tcp_nv.c 	ca->nv_no_cong_cnt = 0;
ca                130 net/ipv4/tcp_nv.c 	ca->nv_rtt_cnt = 0;
ca                131 net/ipv4/tcp_nv.c 	ca->nv_last_rtt = 0;
ca                132 net/ipv4/tcp_nv.c 	ca->nv_rtt_max_rate = 0;
ca                133 net/ipv4/tcp_nv.c 	ca->nv_rtt_start_seq = tp->snd_una;
ca                134 net/ipv4/tcp_nv.c 	ca->nv_eval_call_cnt = 0;
ca                135 net/ipv4/tcp_nv.c 	ca->nv_last_snd_una = tp->snd_una;
ca                140 net/ipv4/tcp_nv.c 	struct tcpnv *ca = inet_csk_ca(sk);
ca                143 net/ipv4/tcp_nv.c 	tcpnv_reset(ca, sk);
ca                152 net/ipv4/tcp_nv.c 		ca->nv_base_rtt = base_rtt;
ca                153 net/ipv4/tcp_nv.c 		ca->nv_lower_bound_rtt = (base_rtt * 205) >> 8; /* 80% */
ca                155 net/ipv4/tcp_nv.c 		ca->nv_base_rtt = 0;
ca                156 net/ipv4/tcp_nv.c 		ca->nv_lower_bound_rtt = 0;
ca                159 net/ipv4/tcp_nv.c 	ca->nv_allow_cwnd_growth = 1;
ca                160 net/ipv4/tcp_nv.c 	ca->nv_min_rtt_reset_jiffies = jiffies + 2 * HZ;
ca                161 net/ipv4/tcp_nv.c 	ca->nv_min_rtt = NV_INIT_RTT;
ca                162 net/ipv4/tcp_nv.c 	ca->nv_min_rtt_new = NV_INIT_RTT;
ca                163 net/ipv4/tcp_nv.c 	ca->nv_min_cwnd = NV_MIN_CWND;
ca                164 net/ipv4/tcp_nv.c 	ca->nv_catchup = 0;
ca                165 net/ipv4/tcp_nv.c 	ca->cwnd_growth_factor = 0;
ca                171 net/ipv4/tcp_nv.c inline u32 nv_get_bounded_rtt(struct tcpnv *ca, u32 val)
ca                173 net/ipv4/tcp_nv.c 	if (ca->nv_lower_bound_rtt > 0 && val < ca->nv_lower_bound_rtt)
ca                174 net/ipv4/tcp_nv.c 		return ca->nv_lower_bound_rtt;
ca                175 net/ipv4/tcp_nv.c 	else if (ca->nv_base_rtt > 0 && val > ca->nv_base_rtt)
ca                176 net/ipv4/tcp_nv.c 		return ca->nv_base_rtt;
ca                184 net/ipv4/tcp_nv.c 	struct tcpnv *ca = inet_csk_ca(sk);
ca                191 net/ipv4/tcp_nv.c 	if (!ca->nv_allow_cwnd_growth)
ca                200 net/ipv4/tcp_nv.c 	if (ca->cwnd_growth_factor < 0) {
ca                201 net/ipv4/tcp_nv.c 		cnt = tp->snd_cwnd << -ca->cwnd_growth_factor;
ca                204 net/ipv4/tcp_nv.c 		cnt = max(4U, tp->snd_cwnd >> ca->cwnd_growth_factor);
ca                218 net/ipv4/tcp_nv.c 	struct tcpnv *ca = inet_csk_ca(sk);
ca                220 net/ipv4/tcp_nv.c 	if (new_state == TCP_CA_Open && ca->nv_reset) {
ca                221 net/ipv4/tcp_nv.c 		tcpnv_reset(ca, sk);
ca                224 net/ipv4/tcp_nv.c 		ca->nv_reset = 1;
ca                225 net/ipv4/tcp_nv.c 		ca->nv_allow_cwnd_growth = 0;
ca                228 net/ipv4/tcp_nv.c 			if (ca->cwnd_growth_factor > 0)
ca                229 net/ipv4/tcp_nv.c 				ca->cwnd_growth_factor = 0;
ca                232 net/ipv4/tcp_nv.c 			    ca->cwnd_growth_factor > -8)
ca                233 net/ipv4/tcp_nv.c 				ca->cwnd_growth_factor--;
ca                244 net/ipv4/tcp_nv.c 	struct tcpnv *ca = inet_csk_ca(sk);
ca                261 net/ipv4/tcp_nv.c 	if (ca->nv_catchup && tp->snd_cwnd >= nv_min_cwnd) {
ca                262 net/ipv4/tcp_nv.c 		ca->nv_catchup = 0;
ca                263 net/ipv4/tcp_nv.c 		ca->nv_allow_cwnd_growth = 0;
ca                266 net/ipv4/tcp_nv.c 	bytes_acked = tp->snd_una - ca->nv_last_snd_una;
ca                267 net/ipv4/tcp_nv.c 	ca->nv_last_snd_una = tp->snd_una;
ca                274 net/ipv4/tcp_nv.c 		if (ca->nv_last_rtt > 0) {
ca                276 net/ipv4/tcp_nv.c 				   ((u64)ca->nv_last_rtt)
ca                280 net/ipv4/tcp_nv.c 			ca->nv_min_rtt = avg_rtt << 1;
ca                282 net/ipv4/tcp_nv.c 		ca->nv_last_rtt = avg_rtt;
ca                296 net/ipv4/tcp_nv.c 	if (ca->nv_rtt_max_rate < rate)
ca                297 net/ipv4/tcp_nv.c 		ca->nv_rtt_max_rate = rate;
ca                300 net/ipv4/tcp_nv.c 	if (ca->nv_eval_call_cnt < 255)
ca                301 net/ipv4/tcp_nv.c 		ca->nv_eval_call_cnt++;
ca                304 net/ipv4/tcp_nv.c 	avg_rtt = nv_get_bounded_rtt(ca, avg_rtt);
ca                307 net/ipv4/tcp_nv.c 	if (avg_rtt < ca->nv_min_rtt)
ca                308 net/ipv4/tcp_nv.c 		ca->nv_min_rtt = avg_rtt;
ca                311 net/ipv4/tcp_nv.c 	if (avg_rtt < ca->nv_min_rtt_new)
ca                312 net/ipv4/tcp_nv.c 		ca->nv_min_rtt_new = avg_rtt;
ca                323 net/ipv4/tcp_nv.c 	if (time_after_eq(now, ca->nv_min_rtt_reset_jiffies)) {
ca                326 net/ipv4/tcp_nv.c 		ca->nv_min_rtt = ca->nv_min_rtt_new;
ca                327 net/ipv4/tcp_nv.c 		ca->nv_min_rtt_new = NV_INIT_RTT;
ca                329 net/ipv4/tcp_nv.c 		ca->nv_min_rtt_reset_jiffies =
ca                334 net/ipv4/tcp_nv.c 		ca->nv_min_cwnd = max(ca->nv_min_cwnd / 2, NV_MIN_CWND);
ca                338 net/ipv4/tcp_nv.c 	if (before(ca->nv_rtt_start_seq, tp->snd_una)) {
ca                339 net/ipv4/tcp_nv.c 		ca->nv_rtt_start_seq = tp->snd_nxt;
ca                340 net/ipv4/tcp_nv.c 		if (ca->nv_rtt_cnt < 0xff)
ca                342 net/ipv4/tcp_nv.c 			ca->nv_rtt_cnt++;
ca                349 net/ipv4/tcp_nv.c 		if (ca->nv_eval_call_cnt == 1 &&
ca                350 net/ipv4/tcp_nv.c 		    bytes_acked >= (ca->nv_min_cwnd - 1) * tp->mss_cache &&
ca                351 net/ipv4/tcp_nv.c 		    ca->nv_min_cwnd < (NV_TSO_CWND_BOUND + 1)) {
ca                352 net/ipv4/tcp_nv.c 			ca->nv_min_cwnd = min(ca->nv_min_cwnd
ca                355 net/ipv4/tcp_nv.c 			ca->nv_rtt_start_seq = tp->snd_nxt +
ca                356 net/ipv4/tcp_nv.c 				ca->nv_min_cwnd * tp->mss_cache;
ca                357 net/ipv4/tcp_nv.c 			ca->nv_eval_call_cnt = 0;
ca                358 net/ipv4/tcp_nv.c 			ca->nv_allow_cwnd_growth = 1;
ca                367 net/ipv4/tcp_nv.c 			div64_u64(((u64)ca->nv_rtt_max_rate) * ca->nv_min_rtt,
ca                383 net/ipv4/tcp_nv.c 			if (ca->nv_rtt_cnt < nv_rtt_min_cnt) {
ca                386 net/ipv4/tcp_nv.c 				if (ca->nv_eval_call_cnt <
ca                390 net/ipv4/tcp_nv.c 			} else if (ca->nv_eval_call_cnt <
ca                392 net/ipv4/tcp_nv.c 				if (ca->nv_allow_cwnd_growth &&
ca                393 net/ipv4/tcp_nv.c 				    ca->nv_rtt_cnt > nv_stop_rtt_cnt)
ca                394 net/ipv4/tcp_nv.c 					ca->nv_allow_cwnd_growth = 0;
ca                399 net/ipv4/tcp_nv.c 			ca->nv_allow_cwnd_growth = 0;
ca                412 net/ipv4/tcp_nv.c 			if (ca->cwnd_growth_factor > 0)
ca                413 net/ipv4/tcp_nv.c 				ca->cwnd_growth_factor = 0;
ca                414 net/ipv4/tcp_nv.c 			ca->nv_no_cong_cnt = 0;
ca                417 net/ipv4/tcp_nv.c 			if (ca->nv_eval_call_cnt < nv_inc_eval_min_calls)
ca                420 net/ipv4/tcp_nv.c 			ca->nv_allow_cwnd_growth = 1;
ca                421 net/ipv4/tcp_nv.c 			ca->nv_no_cong_cnt++;
ca                422 net/ipv4/tcp_nv.c 			if (ca->cwnd_growth_factor < 0 &&
ca                424 net/ipv4/tcp_nv.c 			    ca->nv_no_cong_cnt > nv_cwnd_growth_rate_neg) {
ca                425 net/ipv4/tcp_nv.c 				ca->cwnd_growth_factor++;
ca                426 net/ipv4/tcp_nv.c 				ca->nv_no_cong_cnt = 0;
ca                427 net/ipv4/tcp_nv.c 			} else if (ca->cwnd_growth_factor >= 0 &&
ca                429 net/ipv4/tcp_nv.c 				   ca->nv_no_cong_cnt >
ca                431 net/ipv4/tcp_nv.c 				ca->cwnd_growth_factor++;
ca                432 net/ipv4/tcp_nv.c 				ca->nv_no_cong_cnt = 0;
ca                440 net/ipv4/tcp_nv.c 		ca->nv_eval_call_cnt = 0;
ca                441 net/ipv4/tcp_nv.c 		ca->nv_rtt_cnt = 0;
ca                442 net/ipv4/tcp_nv.c 		ca->nv_rtt_max_rate = 0;
ca                457 net/ipv4/tcp_nv.c 	const struct tcpnv *ca = inet_csk_ca(sk);
ca                461 net/ipv4/tcp_nv.c 		info->vegas.tcpv_rttcnt = ca->nv_rtt_cnt;
ca                462 net/ipv4/tcp_nv.c 		info->vegas.tcpv_rtt = ca->nv_last_rtt;
ca                463 net/ipv4/tcp_nv.c 		info->vegas.tcpv_minrtt = ca->nv_min_rtt;
ca               3371 net/ipv4/tcp_output.c 	const struct tcp_congestion_ops *ca;
ca               3378 net/ipv4/tcp_output.c 	ca = tcp_ca_find_key(ca_key);
ca               3379 net/ipv4/tcp_output.c 	if (likely(ca && try_module_get(ca->owner))) {
ca               3382 net/ipv4/tcp_output.c 		icsk->icsk_ca_ops = ca;
ca                293 net/ipv4/tcp_vegas.c 	const struct vegas *ca = inet_csk_ca(sk);
ca                296 net/ipv4/tcp_vegas.c 		info->vegas.tcpv_enabled = ca->doing_vegas_now,
ca                297 net/ipv4/tcp_vegas.c 		info->vegas.tcpv_rttcnt = ca->cntRTT,
ca                298 net/ipv4/tcp_vegas.c 		info->vegas.tcpv_rtt = ca->baseRTT,
ca                299 net/ipv4/tcp_vegas.c 		info->vegas.tcpv_minrtt = ca->minRTT,
ca                264 net/ipv4/tcp_westwood.c 	const struct westwood *ca = inet_csk_ca(sk);
ca                269 net/ipv4/tcp_westwood.c 		info->vegas.tcpv_rtt	= jiffies_to_usecs(ca->rtt);
ca                270 net/ipv4/tcp_westwood.c 		info->vegas.tcpv_minrtt	= jiffies_to_usecs(ca->rtt_min);
ca                299 net/ipv6/sit.c 	unsigned int cmax, c = 0, ca, len;
ca                317 net/ipv6/sit.c 	ca = t->prl_count < cmax ? t->prl_count : cmax;
ca                325 net/ipv6/sit.c 		kp = kcalloc(ca, sizeof(*kp), GFP_ATOMIC);
ca                 37 net/sched/act_connmark.c 	struct tcf_connmark_info *ca = to_connmark(a);
ca                 42 net/sched/act_connmark.c 	spin_lock(&ca->tcf_lock);
ca                 43 net/sched/act_connmark.c 	tcf_lastuse_update(&ca->tcf_tm);
ca                 44 net/sched/act_connmark.c 	bstats_update(&ca->tcf_bstats, skb);
ca                 64 net/sched/act_connmark.c 		ca->tcf_qstats.overlimits++;
ca                 69 net/sched/act_connmark.c 			       proto, ca->net, &tuple))
ca                 72 net/sched/act_connmark.c 	zone.id = ca->zone;
ca                 75 net/sched/act_connmark.c 	thash = nf_conntrack_find_get(ca->net, &zone, &tuple);
ca                 81 net/sched/act_connmark.c 	ca->tcf_qstats.overlimits++;
ca                 86 net/sched/act_connmark.c 	spin_unlock(&ca->tcf_lock);
ca                 87 net/sched/act_connmark.c 	return ca->tcf_action;
ca                 30 net/sched/act_ctinfo.c static void tcf_ctinfo_dscp_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
ca                 47 net/sched/act_ctinfo.c 				ca->stats_dscp_set++;
ca                 49 net/sched/act_ctinfo.c 				ca->stats_dscp_error++;
ca                 60 net/sched/act_ctinfo.c 				ca->stats_dscp_set++;
ca                 62 net/sched/act_ctinfo.c 				ca->stats_dscp_error++;
ca                 71 net/sched/act_ctinfo.c static void tcf_ctinfo_cpmark_set(struct nf_conn *ct, struct tcf_ctinfo *ca,
ca                 75 net/sched/act_ctinfo.c 	ca->stats_cpmark_set++;
ca                 83 net/sched/act_ctinfo.c 	struct tcf_ctinfo *ca = to_ctinfo(a);
ca                 92 net/sched/act_ctinfo.c 	cp = rcu_dereference_bh(ca->params);
ca                 94 net/sched/act_ctinfo.c 	tcf_lastuse_update(&ca->tcf_tm);
ca                 95 net/sched/act_ctinfo.c 	bstats_update(&ca->tcf_bstats, skb);
ca                 96 net/sched/act_ctinfo.c 	action = READ_ONCE(ca->tcf_action);
ca                132 net/sched/act_ctinfo.c 			tcf_ctinfo_dscp_set(ct, ca, cp, skb, wlen, proto);
ca                135 net/sched/act_ctinfo.c 		tcf_ctinfo_cpmark_set(ct, ca, cp, skb);
ca                 55 security/apparmor/audit.c static void audit_pre(struct audit_buffer *ab, void *ca)
ca                 57 security/apparmor/audit.c 	struct common_audit_data *sa = ca;
ca                236 sound/hda/hdmi_chmap.c static int get_channel_allocation_order(int ca)
ca                241 sound/hda/hdmi_chmap.c 		if (channel_allocations[i].ca_index == ca)
ca                272 sound/hda/hdmi_chmap.c 	int ca = 0;
ca                298 sound/hda/hdmi_chmap.c 			ca = channel_allocations[i].ca_index;
ca                303 sound/hda/hdmi_chmap.c 	if (!ca) {
ca                310 sound/hda/hdmi_chmap.c 				ca = channel_allocations[i].ca_index;
ca                318 sound/hda/hdmi_chmap.c 		    ca, channels, buf);
ca                320 sound/hda/hdmi_chmap.c 	return ca;
ca                342 sound/hda/hdmi_chmap.c 				       int ca)
ca                350 sound/hda/hdmi_chmap.c 	order = get_channel_allocation_order(ca);
ca                353 sound/hda/hdmi_chmap.c 	if (hdmi_channel_mapping[ca][1] == 0) {
ca                361 sound/hda/hdmi_chmap.c 			hdmi_channel_mapping[ca][i] = (i << 4) | hdmi_slot++;
ca                366 sound/hda/hdmi_chmap.c 				hdmi_channel_mapping[ca][i++] = (0xf << 4) | hdmi_slot;
ca                377 sound/hda/hdmi_chmap.c 		int slotsetup = non_pcm ? non_pcm_mapping[i] : hdmi_channel_mapping[ca][i];
ca                508 sound/hda/hdmi_chmap.c 					     int ca)
ca                510 sound/hda/hdmi_chmap.c 	int ordered_ca = get_channel_allocation_order(ca);
ca                536 sound/hda/hdmi_chmap.c static void hdmi_setup_fake_chmap(unsigned char *map, int ca)
ca                539 sound/hda/hdmi_chmap.c 	int ordered_ca = get_channel_allocation_order(ca);
ca                544 sound/hda/hdmi_chmap.c 			map[i] = from_cea_slot(ordered_ca, hdmi_channel_mapping[ca][i] & 0x0f);
ca                551 sound/hda/hdmi_chmap.c 				       hda_nid_t pin_nid, bool non_pcm, int ca,
ca                557 sound/hda/hdmi_chmap.c 						  channels, map, ca);
ca                559 sound/hda/hdmi_chmap.c 		hdmi_std_setup_channel_mapping(chmap, pin_nid, non_pcm, ca);
ca                560 sound/hda/hdmi_chmap.c 		hdmi_setup_fake_chmap(map, ca);
ca                567 sound/hda/hdmi_chmap.c int snd_hdac_get_active_channels(int ca)
ca                569 sound/hda/hdmi_chmap.c 	int ordered_ca = get_channel_allocation_order(ca);
ca                581 sound/hda/hdmi_chmap.c struct hdac_cea_channel_speaker_allocation *snd_hdac_get_ch_alloc_from_ca(int ca)
ca                583 sound/hda/hdmi_chmap.c 	return &channel_allocations[get_channel_allocation_order(ca)];
ca                590 sound/hda/hdmi_chmap.c 	int ca;
ca                593 sound/hda/hdmi_chmap.c 		ca = hdmi_manual_channel_allocation(channels, map);
ca                595 sound/hda/hdmi_chmap.c 		ca = hdmi_channel_allocation_spk_alloc_blk(hdac,
ca                598 sound/hda/hdmi_chmap.c 	if (ca < 0)
ca                599 sound/hda/hdmi_chmap.c 		ca = 0;
ca                601 sound/hda/hdmi_chmap.c 	return ca;
ca                765 sound/hda/hdmi_chmap.c 	int i, err, ca, prepared = 0;
ca                794 sound/hda/hdmi_chmap.c 	ca = hdmi_manual_channel_allocation(ARRAY_SIZE(chmap), chmap);
ca                795 sound/hda/hdmi_chmap.c 	if (ca < 0)
ca                798 sound/hda/hdmi_chmap.c 		err = hchmap->ops.chmap_validate(hchmap, ca,
ca                100 sound/pci/ctxfi/cthardware.h 	int (*src_set_ca)(void *blk, unsigned int ca);
ca                 86 sound/pci/ctxfi/cthw20k1.c 		u16 ca:1;
ca                 97 sound/pci/ctxfi/cthw20k1.c 	unsigned int	ca;
ca                293 sound/pci/ctxfi/cthw20k1.c static int src_set_ca(void *blk, unsigned int ca)
ca                297 sound/pci/ctxfi/cthw20k1.c 	set_field(&ctl->ca, SRCCA_CA, ca);
ca                298 sound/pci/ctxfi/cthw20k1.c 	ctl->dirty.bf.ca = 1;
ca                396 sound/pci/ctxfi/cthw20k1.c 	if (ctl->dirty.bf.ca) {
ca                397 sound/pci/ctxfi/cthw20k1.c 		hw_write_20kx(hw, SRCCA+idx*0x100, ctl->ca);
ca                398 sound/pci/ctxfi/cthw20k1.c 		ctl->dirty.bf.ca = 0;
ca                420 sound/pci/ctxfi/cthw20k1.c 	ctl->ca = hw_read_20kx(hw, SRCCA+idx*0x100);
ca                421 sound/pci/ctxfi/cthw20k1.c 	ctl->dirty.bf.ca = 0;
ca                423 sound/pci/ctxfi/cthw20k1.c 	return get_field(ctl->ca, SRCCA_CA);
ca                 86 sound/pci/ctxfi/cthw20k2.c 		u16 ca:1;
ca                 97 sound/pci/ctxfi/cthw20k2.c 	unsigned int	ca;
ca                293 sound/pci/ctxfi/cthw20k2.c static int src_set_ca(void *blk, unsigned int ca)
ca                297 sound/pci/ctxfi/cthw20k2.c 	set_field(&ctl->ca, SRCCA_CA, ca);
ca                298 sound/pci/ctxfi/cthw20k2.c 	ctl->dirty.bf.ca = 1;
ca                396 sound/pci/ctxfi/cthw20k2.c 	if (ctl->dirty.bf.ca) {
ca                397 sound/pci/ctxfi/cthw20k2.c 		hw_write_20kx(hw, SRC_CA+idx*0x100, ctl->ca);
ca                398 sound/pci/ctxfi/cthw20k2.c 		ctl->dirty.bf.ca = 0;
ca                420 sound/pci/ctxfi/cthw20k2.c 	ctl->ca = hw_read_20kx(hw, SRC_CA+idx*0x100);
ca                421 sound/pci/ctxfi/cthw20k2.c 	ctl->dirty.bf.ca = 0;
ca                423 sound/pci/ctxfi/cthw20k2.c 	return get_field(ctl->ca, SRCCA_CA);
ca                124 sound/pci/ctxfi/ctsrc.c static int src_set_ca(struct src *src, unsigned int ca)
ca                129 sound/pci/ctxfi/ctsrc.c 	hw->src_set_ca(src->rsc.ctrl_blk, ca);
ca                 63 sound/pci/ctxfi/ctsrc.h 	int (*set_ca)(struct src *src, unsigned int ca);
ca                110 sound/pci/hda/patch_hdmi.c 				    int ca, int active_channels, int conn_type);
ca                664 sound/pci/hda/patch_hdmi.c 				     int ca, int active_channels,
ca                677 sound/pci/hda/patch_hdmi.c 		hdmi_ai->CA		= ca;
ca                686 sound/pci/hda/patch_hdmi.c 		dp_ai->CA		= ca;
ca                703 sound/pci/hda/patch_hdmi.c 			    active_channels, ca);
ca                721 sound/pci/hda/patch_hdmi.c 	int ca;
ca                734 sound/pci/hda/patch_hdmi.c 	ca = snd_hdac_channel_allocation(&codec->core,
ca                738 sound/pci/hda/patch_hdmi.c 	active_channels = snd_hdac_get_active_channels(ca);
ca                748 sound/pci/hda/patch_hdmi.c 				pin_nid, non_pcm, ca, channels,
ca                751 sound/pci/hda/patch_hdmi.c 	spec->ops.pin_setup_infoframe(codec, pin_nid, ca, active_channels,
ca               3472 sound/pci/hda/patch_hdmi.c 		int ca, int chs, unsigned char *map)
ca               3474 sound/pci/hda/patch_hdmi.c 	if (ca == 0x00 && (map[0] != SNDRV_CHMAP_FL || map[1] != SNDRV_CHMAP_FR))
ca               3721 sound/pci/hda/patch_hdmi.c static void atihdmi_pin_setup_infoframe(struct hda_codec *codec, hda_nid_t pin_nid, int ca,
ca               3724 sound/pci/hda/patch_hdmi.c 	snd_hda_codec_write(codec, pin_nid, 0, ATI_VERB_SET_CHANNEL_ALLOCATION, ca);
ca               3745 sound/pci/hda/patch_hdmi.c 			int ca, int chs, unsigned char *map)
ca               3752 sound/pci/hda/patch_hdmi.c 	cap = snd_hdac_get_ch_alloc_from_ca(ca);
ca                376 sound/soc/codecs/hdac_hdmi.c 	int channels, ca;
ca                378 sound/soc/codecs/hdac_hdmi.c 	ca = snd_hdac_channel_allocation(hdev, port->eld.info.spk_alloc,
ca                381 sound/soc/codecs/hdac_hdmi.c 	channels = snd_hdac_get_active_channels(ca);
ca                384 sound/soc/codecs/hdac_hdmi.c 	snd_hdac_setup_channel_mapping(&hdmi->chmap, pin->nid, false, ca,
ca                395 sound/soc/codecs/hdac_hdmi.c 		frame.channel_allocation = ca;
ca                409 sound/soc/codecs/hdac_hdmi.c 		dp_ai.CA	= ca;
ca                420 sound/x86/intel_hdmi_audio.c 	int ca = 0;
ca                446 sound/x86/intel_hdmi_audio.c 			ca = channel_allocations[i].ca_index;
ca                451 sound/x86/intel_hdmi_audio.c 	dev_dbg(intelhaddata->dev, "select CA 0x%x for %d\n", ca, channels);
ca                453 sound/x86/intel_hdmi_audio.c 	return ca;
ca                599 sound/x86/intel_hdmi_audio.c 	int ca;
ca                605 sound/x86/intel_hdmi_audio.c 	ca = had_channel_allocation(intelhaddata, channels);
ca                608 sound/x86/intel_hdmi_audio.c 		frame2.regval = (substream->runtime->channels - 1) | (ca << 24);
ca                612 sound/x86/intel_hdmi_audio.c 		frame3.regx.chnl_alloc = ca;
ca                134 tools/lib/traceevent/event-parse.c 	const struct tep_cmdline *ca = a;
ca                137 tools/lib/traceevent/event-parse.c 	if (ca->pid < cb->pid)
ca                139 tools/lib/traceevent/event-parse.c 	if (ca->pid > cb->pid)
ca                148 tools/lib/traceevent/event-parse.c 	const struct tep_cmdline *ca = a;
ca                152 tools/lib/traceevent/event-parse.c 	if (ca->pid < cb->pid)
ca                155 tools/lib/traceevent/event-parse.c 	if (ca->pid > cb->pid) {
ca                156 tools/lib/traceevent/event-parse.c 		if (ca->pid <= cb1->pid)
ca                 45 tools/testing/selftests/nsfs/pidns.c 	struct cr_clone_arg ca;
ca                 50 tools/testing/selftests/nsfs/pidns.c 	pid = clone(child, ca.stack_ptr, CLONE_NEWUSER | CLONE_NEWPID | SIGCHLD, NULL);