storage 53 arch/arm/include/asm/ucontext.h struct crunch_state storage; storage 65 arch/arm/include/asm/ucontext.h struct iwmmxt_struct storage; storage 38 arch/arm/kernel/signal.c crunch_task_copy(current_thread_info(), &kframe->storage); storage 57 arch/arm/kernel/signal.c crunch_task_restore(current_thread_info(), &kframe->storage); storage 76 arch/arm/kernel/signal.c iwmmxt_task_copy(current_thread_info(), &kframe->storage); storage 124 arch/arm/kernel/signal.c iwmmxt_task_restore(current_thread_info(), &kframe->storage); storage 141 drivers/iio/dac/ad5446.c #define _AD5446_CHANNEL(bits, storage, _shift, ext) { \ storage 151 drivers/iio/dac/ad5446.c .storagebits = (storage), \ storage 157 drivers/iio/dac/ad5446.c #define AD5446_CHANNEL(bits, storage, shift) \ storage 158 drivers/iio/dac/ad5446.c _AD5446_CHANNEL(bits, storage, shift, NULL) storage 160 drivers/iio/dac/ad5446.c #define AD5446_CHANNEL_POWERDOWN(bits, storage, shift) \ storage 161 drivers/iio/dac/ad5446.c _AD5446_CHANNEL(bits, storage, shift, ad5446_ext_info_powerdown) storage 221 drivers/infiniband/core/cache.c struct roce_gid_ndev_storage *storage = storage 224 drivers/infiniband/core/cache.c WARN_ON(!storage->ndev); storage 229 drivers/infiniband/core/cache.c dev_put(storage->ndev); storage 230 drivers/infiniband/core/cache.c kfree(storage); storage 217 drivers/md/md-bitmap.c struct bitmap_storage *store = &bitmap->storage; storage 292 drivers/md/md-bitmap.c if (bitmap->storage.file == NULL) { storage 431 drivers/md/md-bitmap.c if (bitmap->storage.file) storage 455 drivers/md/md-bitmap.c if (!bitmap->storage.sb_page) /* no superblock */ storage 457 drivers/md/md-bitmap.c sb = kmap_atomic(bitmap->storage.sb_page); storage 478 drivers/md/md-bitmap.c write_page(bitmap, bitmap->storage.sb_page, 1); storage 487 drivers/md/md-bitmap.c if (!bitmap || !bitmap->storage.sb_page) storage 489 drivers/md/md-bitmap.c sb = kmap_atomic(bitmap->storage.sb_page); storage 527 drivers/md/md-bitmap.c bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO); storage 528 drivers/md/md-bitmap.c if (bitmap->storage.sb_page == NULL) storage 530 drivers/md/md-bitmap.c bitmap->storage.sb_page->index = 0; storage 532 drivers/md/md-bitmap.c sb = kmap_atomic(bitmap->storage.sb_page); storage 593 drivers/md/md-bitmap.c if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { storage 605 drivers/md/md-bitmap.c bitmap->storage.sb_page = sb_page; storage 623 drivers/md/md-bitmap.c if (bitmap->storage.file) { storage 624 drivers/md/md-bitmap.c loff_t isize = i_size_read(bitmap->storage.file->f_mapping->host); storage 627 drivers/md/md-bitmap.c err = read_page(bitmap->storage.file, 0, storage 873 drivers/md/md-bitmap.c if (bitmap->storage.file) { storage 876 drivers/md/md-bitmap.c ptr = file_path(bitmap->storage.file, storage 899 drivers/md/md-bitmap.c set_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); storage 905 drivers/md/md-bitmap.c clear_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); storage 911 drivers/md/md-bitmap.c return test_bit((pnum<<2) + attr, bitmap->storage.filemap_attr); storage 918 drivers/md/md-bitmap.c bitmap->storage.filemap_attr); storage 933 drivers/md/md-bitmap.c struct bitmap_storage *store = &bitmap->storage; storage 939 drivers/md/md-bitmap.c page = filemap_get_page(&bitmap->storage, chunk); storage 942 drivers/md/md-bitmap.c bit = file_page_offset(&bitmap->storage, chunk); storage 962 drivers/md/md-bitmap.c struct bitmap_storage *store = &bitmap->storage; storage 968 drivers/md/md-bitmap.c page = filemap_get_page(&bitmap->storage, chunk); storage 971 drivers/md/md-bitmap.c bit = file_page_offset(&bitmap->storage, chunk); storage 992 drivers/md/md-bitmap.c page = filemap_get_page(&bitmap->storage, chunk); storage 995 drivers/md/md-bitmap.c bit = file_page_offset(&bitmap->storage, chunk); storage 1015 drivers/md/md-bitmap.c if (!bitmap || !bitmap->storage.filemap || storage 1021 drivers/md/md-bitmap.c for (i = 0; i < bitmap->storage.file_pages; i++) { storage 1022 drivers/md/md-bitmap.c if (!bitmap->storage.filemap) storage 1035 drivers/md/md-bitmap.c write_page(bitmap, bitmap->storage.filemap[i], 0); storage 1069 drivers/md/md-bitmap.c struct bitmap_storage *store = &bitmap->storage; storage 1111 drivers/md/md-bitmap.c index = file_page_index(&bitmap->storage, i); storage 1112 drivers/md/md-bitmap.c bit = file_page_offset(&bitmap->storage, i); storage 1190 drivers/md/md-bitmap.c if (!bitmap || !bitmap->storage.filemap) storage 1192 drivers/md/md-bitmap.c if (bitmap->storage.file) storage 1196 drivers/md/md-bitmap.c for (i = 0; i < bitmap->storage.file_pages; i++) storage 1266 drivers/md/md-bitmap.c for (j = 0; j < bitmap->storage.file_pages; j++) storage 1278 drivers/md/md-bitmap.c if (bitmap->storage.filemap) { storage 1279 drivers/md/md-bitmap.c sb = kmap_atomic(bitmap->storage.sb_page); storage 1334 drivers/md/md-bitmap.c j < bitmap->storage.file_pages storage 1343 drivers/md/md-bitmap.c write_page(bitmap, bitmap->storage.filemap[j], 0); storage 1755 drivers/md/md-bitmap.c md_bitmap_file_unmap(&bitmap->storage); storage 1851 drivers/md/md-bitmap.c bitmap->storage.file = file; storage 2012 drivers/md/md-bitmap.c for (i = 0; i < bitmap->storage.file_pages; i++) storage 2045 drivers/md/md-bitmap.c if (bitmap->storage.file) { storage 2047 drivers/md/md-bitmap.c seq_file_path(seq, bitmap->storage.file, " \t\n"); storage 2076 drivers/md/md-bitmap.c if (bitmap->storage.file && !init) { storage 2135 drivers/md/md-bitmap.c store.file = bitmap->storage.file; storage 2136 drivers/md/md-bitmap.c bitmap->storage.file = NULL; storage 2138 drivers/md/md-bitmap.c if (store.sb_page && bitmap->storage.sb_page) storage 2140 drivers/md/md-bitmap.c page_address(bitmap->storage.sb_page), storage 2143 drivers/md/md-bitmap.c md_bitmap_file_unmap(&bitmap->storage); storage 2144 drivers/md/md-bitmap.c bitmap->storage = store; storage 2242 drivers/md/md-bitmap.c for (i = 0; i < bitmap->storage.file_pages; i++) storage 2398 drivers/md/md-bitmap.c sectors < (mddev->bitmap->storage.bytes + 511) >> 9) storage 210 drivers/md/md-bitmap.h } storage; storage 1191 drivers/md/md-cluster.c sb = kmap_atomic(bitmap->storage.sb_page); storage 1222 drivers/md/md-cluster.c sb = kmap_atomic(bitmap->storage.sb_page); storage 2133 drivers/md/md.c bitmap->storage.file_pages * (PAGE_SIZE>>9) > new_offset) storage 7185 drivers/md/md.c if (mddev->bitmap->storage.file) { storage 610 drivers/net/ethernet/apm/xgene-v2/main.c struct rtnl_link_stats64 *storage) storage 615 drivers/net/ethernet/apm/xgene-v2/main.c storage->tx_packets += stats->tx_packets; storage 616 drivers/net/ethernet/apm/xgene-v2/main.c storage->tx_bytes += stats->tx_bytes; storage 618 drivers/net/ethernet/apm/xgene-v2/main.c storage->rx_packets += stats->rx_packets; storage 619 drivers/net/ethernet/apm/xgene-v2/main.c storage->rx_bytes += stats->rx_bytes; storage 620 drivers/net/ethernet/apm/xgene-v2/main.c storage->rx_errors += stats->rx_errors; storage 1441 drivers/net/ethernet/calxeda/xgmac.c struct rtnl_link_stats64 *storage) storage 1450 drivers/net/ethernet/calxeda/xgmac.c storage->rx_bytes = readl(base + XGMAC_MMC_RXOCTET_G_LO); storage 1451 drivers/net/ethernet/calxeda/xgmac.c storage->rx_bytes |= (u64)(readl(base + XGMAC_MMC_RXOCTET_G_HI)) << 32; storage 1453 drivers/net/ethernet/calxeda/xgmac.c storage->rx_packets = readl(base + XGMAC_MMC_RXFRAME_GB_LO); storage 1454 drivers/net/ethernet/calxeda/xgmac.c storage->multicast = readl(base + XGMAC_MMC_RXMCFRAME_G); storage 1455 drivers/net/ethernet/calxeda/xgmac.c storage->rx_crc_errors = readl(base + XGMAC_MMC_RXCRCERR); storage 1456 drivers/net/ethernet/calxeda/xgmac.c storage->rx_length_errors = readl(base + XGMAC_MMC_RXLENGTHERR); storage 1457 drivers/net/ethernet/calxeda/xgmac.c storage->rx_missed_errors = readl(base + XGMAC_MMC_RXOVERFLOW); storage 1459 drivers/net/ethernet/calxeda/xgmac.c storage->tx_bytes = readl(base + XGMAC_MMC_TXOCTET_G_LO); storage 1460 drivers/net/ethernet/calxeda/xgmac.c storage->tx_bytes |= (u64)(readl(base + XGMAC_MMC_TXOCTET_G_HI)) << 32; storage 1463 drivers/net/ethernet/calxeda/xgmac.c storage->tx_errors = count - readl(base + XGMAC_MMC_TXFRAME_G_LO); storage 1464 drivers/net/ethernet/calxeda/xgmac.c storage->tx_packets = count; storage 1465 drivers/net/ethernet/calxeda/xgmac.c storage->tx_fifo_errors = readl(base + XGMAC_MMC_TXUNDERFLOW); storage 713 drivers/net/ethernet/mediatek/mtk_eth_soc.c struct rtnl_link_stats64 *storage) storage 728 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->rx_packets = hw_stats->rx_packets; storage 729 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->tx_packets = hw_stats->tx_packets; storage 730 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->rx_bytes = hw_stats->rx_bytes; storage 731 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->tx_bytes = hw_stats->tx_bytes; storage 732 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->collisions = hw_stats->tx_collisions; storage 733 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->rx_length_errors = hw_stats->rx_short_errors + storage 735 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->rx_over_errors = hw_stats->rx_overflow; storage 736 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->rx_crc_errors = hw_stats->rx_fcs_errors; storage 737 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->rx_errors = hw_stats->rx_checksum_errors; storage 738 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->tx_aborted_errors = hw_stats->tx_skip; storage 741 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->tx_errors = dev->stats.tx_errors; storage 742 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->rx_dropped = dev->stats.rx_dropped; storage 743 drivers/net/ethernet/mediatek/mtk_eth_soc.c storage->tx_dropped = dev->stats.tx_dropped; storage 359 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c __mlxsw_item_set32(values->storage.key, storage_item, 0, key_value); storage 360 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c __mlxsw_item_set32(values->storage.mask, storage_item, 0, mask_value); storage 379 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c __mlxsw_item_memcpy_to(values->storage.key, key_value, storage 381 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c __mlxsw_item_memcpy_to(values->storage.mask, mask_value, storage 389 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c char *storage, char *output, int diff) storage 393 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c value = __mlxsw_item_get32(storage, storage_item, 0); storage 399 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c char *storage, char *output) storage 401 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c char *storage_data = __mlxsw_item_data(storage, storage_item, 0); storage 410 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c char *output, char *storage, int u32_diff) storage 417 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c storage, output, u32_diff); storage 420 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c storage, output); storage 450 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c values->storage.key, storage 453 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.c values->storage.mask, 0); storage 237 drivers/net/ethernet/mellanox/mlxsw/core_acl_flex_keys.h } storage; storage 267 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c struct nfp_fl_lag_group *entry, *storage; storage 278 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c list_for_each_entry_safe(entry, storage, &lag->group_list, list) { storage 678 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c struct nfp_fl_lag_group *entry, *storage; storage 686 drivers/net/ethernet/netronome/nfp/flower/lag_conf.c list_for_each_entry_safe(entry, storage, &lag->group_list, list) { storage 231 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c struct list_head *ptr, *storage; storage 234 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { storage 249 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c struct list_head *ptr, *storage; storage 252 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { storage 275 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c struct list_head *ptr, *storage; storage 278 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { storage 429 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c struct list_head *ptr, *storage; storage 435 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { storage 456 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c struct list_head *ptr, *storage; storage 459 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { storage 486 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c struct list_head *ptr, *storage; storage 489 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { storage 1040 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c struct list_head *ptr, *storage; storage 1047 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.ipv4_off_list) { storage 1054 drivers/net/ethernet/netronome/nfp/flower/tunnel_conf.c list_for_each_safe(ptr, storage, &priv->tun.neigh_off_list) { storage 1734 drivers/net/ethernet/nvidia/forcedeth.c struct rtnl_link_stats64 *storage) storage 1749 drivers/net/ethernet/nvidia/forcedeth.c storage->rx_packets += rx_packets; storage 1750 drivers/net/ethernet/nvidia/forcedeth.c storage->rx_bytes += rx_bytes; storage 1751 drivers/net/ethernet/nvidia/forcedeth.c storage->rx_dropped += rx_dropped; storage 1752 drivers/net/ethernet/nvidia/forcedeth.c storage->rx_missed_errors += rx_missed_errors; storage 1761 drivers/net/ethernet/nvidia/forcedeth.c storage->tx_packets += tx_packets; storage 1762 drivers/net/ethernet/nvidia/forcedeth.c storage->tx_bytes += tx_bytes; storage 1763 drivers/net/ethernet/nvidia/forcedeth.c storage->tx_dropped += tx_dropped; storage 1773 drivers/net/ethernet/nvidia/forcedeth.c nv_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *storage) storage 1792 drivers/net/ethernet/nvidia/forcedeth.c nv_get_stats(cpu, np, storage); storage 1801 drivers/net/ethernet/nvidia/forcedeth.c storage->rx_errors = np->estats.rx_errors_total; storage 1802 drivers/net/ethernet/nvidia/forcedeth.c storage->tx_errors = np->estats.tx_errors_total; storage 1805 drivers/net/ethernet/nvidia/forcedeth.c storage->multicast = np->estats.rx_multicast; storage 1808 drivers/net/ethernet/nvidia/forcedeth.c storage->rx_length_errors = np->estats.rx_length_error; storage 1809 drivers/net/ethernet/nvidia/forcedeth.c storage->rx_over_errors = np->estats.rx_over_errors; storage 1810 drivers/net/ethernet/nvidia/forcedeth.c storage->rx_crc_errors = np->estats.rx_crc_errors; storage 1811 drivers/net/ethernet/nvidia/forcedeth.c storage->rx_frame_errors = np->estats.rx_frame_align_error; storage 1812 drivers/net/ethernet/nvidia/forcedeth.c storage->rx_fifo_errors = np->estats.rx_drop_frame; storage 1815 drivers/net/ethernet/nvidia/forcedeth.c storage->tx_carrier_errors = np->estats.tx_carrier_errors; storage 1816 drivers/net/ethernet/nvidia/forcedeth.c storage->tx_fifo_errors = np->estats.tx_fifo_errors; storage 571 drivers/soc/fsl/dpio/qbman-portal.c struct dpaa2_dq *storage, storage 576 drivers/soc/fsl/dpio/qbman-portal.c d->rsp_addr_virt = (u64)(uintptr_t)storage; storage 578 drivers/soc/fsl/dpio/qbman-portal.c if (!storage) { storage 664 drivers/soc/fsl/dpio/qbman-portal.c s->vdq.storage = (void *)(uintptr_t)d->rsp_addr_virt; storage 832 drivers/soc/fsl/dpio/qbman-portal.c if (s->vdq.storage == dq) { storage 833 drivers/soc/fsl/dpio/qbman-portal.c s->vdq.storage = NULL; storage 125 drivers/soc/fsl/dpio/qbman-portal.h struct dpaa2_dq *storage; /* NULL if DQRR */ storage 151 drivers/soc/fsl/dpio/qbman-portal.h struct dpaa2_dq *storage, storage 23 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c queue->storage = kcalloc(size, sizeof(struct vchiq_header *), storage 25 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c if (!queue->storage) { storage 34 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c kfree(queue->storage); storage 52 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c queue->storage[queue->write & (queue->size - 1)] = header; storage 67 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c return queue->storage[queue->read & (queue->size - 1)]; storage 79 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.c header = queue->storage[queue->read & (queue->size - 1)]; storage 36 drivers/staging/vc04_services/interface/vchiq_arm/vchiq_util.h struct vchiq_header **storage; storage 207 drivers/usb/gadget/function/f_fs.c char storage[]; storage 928 drivers/usb/gadget/function/f_fs.c buf->data = buf->storage; storage 929 drivers/usb/gadget/function/f_fs.c memcpy(buf->storage, data + ret, data_len); storage 464 fs/romfs/super.c const char *storage; storage 519 fs/romfs/super.c storage = sb->s_mtd ? "MTD" : "the block layer"; storage 524 fs/romfs/super.c (unsigned) len, (unsigned) len, rsb->name, storage); storage 57 include/linux/bpf-cgroup.h struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]; storage 145 include/linux/bpf-cgroup.h *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) storage 150 include/linux/bpf-cgroup.h this_cpu_write(bpf_cgroup_storage[stype], storage[stype]); storage 155 include/linux/bpf-cgroup.h void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage); storage 156 include/linux/bpf-cgroup.h void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, storage 159 include/linux/bpf-cgroup.h void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage); storage 362 include/linux/bpf-cgroup.h struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {} storage 370 include/linux/bpf-cgroup.h struct bpf_cgroup_storage *storage) {} storage 1279 include/linux/netdevice.h struct rtnl_link_stats64 *storage); storage 4237 include/linux/netdevice.h struct rtnl_link_stats64 *storage); storage 4308 include/net/cfg80211.h unsigned long *storage); storage 54 kernel/bpf/cgroup.c bpf_cgroup_storage_unlink(pl->storage[stype]); storage 55 kernel/bpf/cgroup.c bpf_cgroup_storage_free(pl->storage[stype]); storage 173 kernel/bpf/cgroup.c pl->storage[stype]; storage 306 kernel/bpf/cgroup.c struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = {}; storage 331 kernel/bpf/cgroup.c storage[stype] = bpf_cgroup_storage_alloc(prog, stype); storage 332 kernel/bpf/cgroup.c if (IS_ERR(storage[stype])) { storage 333 kernel/bpf/cgroup.c storage[stype] = NULL; storage 335 kernel/bpf/cgroup.c bpf_cgroup_storage_free(storage[stype]); storage 345 kernel/bpf/cgroup.c bpf_cgroup_storage_free(storage[stype]); storage 353 kernel/bpf/cgroup.c bpf_cgroup_storage_free(storage[stype]); storage 360 kernel/bpf/cgroup.c pl->storage[stype] = storage[stype]; storage 367 kernel/bpf/cgroup.c bpf_cgroup_storage_free(storage[stype]); storage 376 kernel/bpf/cgroup.c old_storage[stype] = pl->storage[stype]; storage 383 kernel/bpf/cgroup.c pl->storage[stype] = storage[stype]; storage 403 kernel/bpf/cgroup.c bpf_cgroup_storage_link(storage[stype], cgrp, type); storage 410 kernel/bpf/cgroup.c bpf_cgroup_storage_free(pl->storage[stype]); storage 411 kernel/bpf/cgroup.c pl->storage[stype] = old_storage[stype]; storage 482 kernel/bpf/cgroup.c bpf_cgroup_storage_unlink(pl->storage[stype]); storage 483 kernel/bpf/cgroup.c bpf_cgroup_storage_free(pl->storage[stype]); storage 340 kernel/bpf/helpers.c struct bpf_cgroup_storage *storage; storage 343 kernel/bpf/helpers.c storage = this_cpu_read(bpf_cgroup_storage[stype]); storage 346 kernel/bpf/helpers.c ptr = &READ_ONCE(storage->buf)->data[0]; storage 348 kernel/bpf/helpers.c ptr = this_cpu_ptr(storage->percpu_buf); storage 60 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage; storage 62 kernel/bpf/local_storage.c storage = container_of(node, struct bpf_cgroup_storage, node); storage 64 kernel/bpf/local_storage.c switch (bpf_cgroup_storage_key_cmp(key, &storage->key)) { storage 74 kernel/bpf/local_storage.c return storage; storage 85 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage) storage 96 kernel/bpf/local_storage.c switch (bpf_cgroup_storage_key_cmp(&storage->key, &this->key)) { storage 108 kernel/bpf/local_storage.c rb_link_node(&storage->node, parent, new); storage 109 kernel/bpf/local_storage.c rb_insert_color(&storage->node, root); storage 118 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage; storage 120 kernel/bpf/local_storage.c storage = cgroup_storage_lookup(map, key, false); storage 121 kernel/bpf/local_storage.c if (!storage) storage 124 kernel/bpf/local_storage.c return &READ_ONCE(storage->buf)->data[0]; storage 131 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage; storage 144 kernel/bpf/local_storage.c storage = cgroup_storage_lookup((struct bpf_cgroup_storage_map *)map, storage 146 kernel/bpf/local_storage.c if (!storage) storage 150 kernel/bpf/local_storage.c copy_map_value_locked(map, storage->buf->data, value, false); storage 164 kernel/bpf/local_storage.c new = xchg(&storage->buf, new); storage 175 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage; storage 180 kernel/bpf/local_storage.c storage = cgroup_storage_lookup(map, key, false); storage 181 kernel/bpf/local_storage.c if (!storage) { storage 193 kernel/bpf/local_storage.c per_cpu_ptr(storage->percpu_buf, cpu), size); storage 205 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage; storage 213 kernel/bpf/local_storage.c storage = cgroup_storage_lookup(map, key, false); storage 214 kernel/bpf/local_storage.c if (!storage) { storage 227 kernel/bpf/local_storage.c bpf_long_memcpy(per_cpu_ptr(storage->percpu_buf, cpu), storage 241 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage; storage 249 kernel/bpf/local_storage.c storage = cgroup_storage_lookup(map, key, true); storage 250 kernel/bpf/local_storage.c if (!storage) storage 253 kernel/bpf/local_storage.c storage = list_next_entry(storage, list); storage 254 kernel/bpf/local_storage.c if (!storage) storage 257 kernel/bpf/local_storage.c storage = list_first_entry(&map->list, storage 262 kernel/bpf/local_storage.c next->attach_type = storage->key.attach_type; storage 263 kernel/bpf/local_storage.c next->cgroup_inode_id = storage->key.cgroup_inode_id; storage 381 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage; storage 385 kernel/bpf/local_storage.c storage = cgroup_storage_lookup(map_to_storage(map), key, false); storage 386 kernel/bpf/local_storage.c if (!storage) { storage 396 kernel/bpf/local_storage.c &READ_ONCE(storage->buf)->data[0], m); storage 403 kernel/bpf/local_storage.c per_cpu_ptr(storage->percpu_buf, cpu), storage 480 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage; storage 495 kernel/bpf/local_storage.c storage = kmalloc_node(sizeof(struct bpf_cgroup_storage), storage 497 kernel/bpf/local_storage.c if (!storage) storage 503 kernel/bpf/local_storage.c storage->buf = kmalloc_node(size, flags, map->numa_node); storage 504 kernel/bpf/local_storage.c if (!storage->buf) storage 506 kernel/bpf/local_storage.c check_and_init_map_lock(map, storage->buf->data); storage 508 kernel/bpf/local_storage.c storage->percpu_buf = __alloc_percpu_gfp(size, 8, flags); storage 509 kernel/bpf/local_storage.c if (!storage->percpu_buf) storage 513 kernel/bpf/local_storage.c storage->map = (struct bpf_cgroup_storage_map *)map; storage 515 kernel/bpf/local_storage.c return storage; storage 519 kernel/bpf/local_storage.c kfree(storage); storage 525 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage = storage 528 kernel/bpf/local_storage.c kfree(storage->buf); storage 529 kernel/bpf/local_storage.c kfree(storage); storage 534 kernel/bpf/local_storage.c struct bpf_cgroup_storage *storage = storage 537 kernel/bpf/local_storage.c free_percpu(storage->percpu_buf); storage 538 kernel/bpf/local_storage.c kfree(storage); storage 541 kernel/bpf/local_storage.c void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage) storage 547 kernel/bpf/local_storage.c if (!storage) storage 550 kernel/bpf/local_storage.c map = &storage->map->map; storage 557 kernel/bpf/local_storage.c call_rcu(&storage->rcu, free_shared_cgroup_storage_rcu); storage 559 kernel/bpf/local_storage.c call_rcu(&storage->rcu, free_percpu_cgroup_storage_rcu); storage 562 kernel/bpf/local_storage.c void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage, storage 568 kernel/bpf/local_storage.c if (!storage) storage 571 kernel/bpf/local_storage.c storage->key.attach_type = type; storage 572 kernel/bpf/local_storage.c storage->key.cgroup_inode_id = cgroup->kn->id.id; storage 574 kernel/bpf/local_storage.c map = storage->map; storage 577 kernel/bpf/local_storage.c WARN_ON(cgroup_storage_insert(map, storage)); storage 578 kernel/bpf/local_storage.c list_add(&storage->list, &map->list); storage 582 kernel/bpf/local_storage.c void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage) storage 587 kernel/bpf/local_storage.c if (!storage) storage 590 kernel/bpf/local_storage.c map = storage->map; storage 594 kernel/bpf/local_storage.c rb_erase(&storage->node, root); storage 596 kernel/bpf/local_storage.c list_del(&storage->list); storage 20 net/bpf/test_run.c struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE] = { NULL }; storage 27 net/bpf/test_run.c storage[stype] = bpf_cgroup_storage_alloc(prog, stype); storage 28 net/bpf/test_run.c if (IS_ERR(storage[stype])) { storage 29 net/bpf/test_run.c storage[stype] = NULL; storage 31 net/bpf/test_run.c bpf_cgroup_storage_free(storage[stype]); storage 43 net/bpf/test_run.c bpf_cgroup_storage_set(storage); storage 71 net/bpf/test_run.c bpf_cgroup_storage_free(storage[stype]); storage 9401 net/core/dev.c struct rtnl_link_stats64 *storage) storage 9406 net/core/dev.c memset(storage, 0, sizeof(*storage)); storage 9407 net/core/dev.c ops->ndo_get_stats64(dev, storage); storage 9409 net/core/dev.c netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev)); storage 9411 net/core/dev.c netdev_stats_to_stats64(storage, &dev->stats); storage 9413 net/core/dev.c storage->rx_dropped += (unsigned long)atomic_long_read(&dev->rx_dropped); storage 9414 net/core/dev.c storage->tx_dropped += (unsigned long)atomic_long_read(&dev->tx_dropped); storage 9415 net/core/dev.c storage->rx_nohandler += (unsigned long)atomic_long_read(&dev->rx_nohandler); storage 9416 net/core/dev.c return storage; storage 160 tools/perf/util/srcline.c long storage; storage 168 tools/perf/util/srcline.c storage = bfd_get_symtab_upper_bound(abfd); storage 169 tools/perf/util/srcline.c if (storage == 0L) { storage 170 tools/perf/util/srcline.c storage = bfd_get_dynamic_symtab_upper_bound(abfd); storage 173 tools/perf/util/srcline.c if (storage < 0L) storage 176 tools/perf/util/srcline.c syms = malloc(storage); storage 55 tools/testing/selftests/bpf/progs/sockopt_inherit.c struct sockopt_inherit *storage; storage 64 tools/testing/selftests/bpf/progs/sockopt_inherit.c storage = get_storage(ctx); storage 65 tools/testing/selftests/bpf/progs/sockopt_inherit.c if (!storage) storage 70 tools/testing/selftests/bpf/progs/sockopt_inherit.c optval[0] = storage->val; storage 80 tools/testing/selftests/bpf/progs/sockopt_inherit.c struct sockopt_inherit *storage; storage 89 tools/testing/selftests/bpf/progs/sockopt_inherit.c storage = get_storage(ctx); storage 90 tools/testing/selftests/bpf/progs/sockopt_inherit.c if (!storage) storage 93 tools/testing/selftests/bpf/progs/sockopt_inherit.c storage->val = optval[0]; storage 30 tools/testing/selftests/bpf/progs/sockopt_sk.c struct sockopt_sk *storage; storage 61 tools/testing/selftests/bpf/progs/sockopt_sk.c storage = bpf_sk_storage_get(&socket_storage_map, ctx->sk, 0, storage 63 tools/testing/selftests/bpf/progs/sockopt_sk.c if (!storage) storage 72 tools/testing/selftests/bpf/progs/sockopt_sk.c optval[0] = storage->val; storage 83 tools/testing/selftests/bpf/progs/sockopt_sk.c struct sockopt_sk *storage; storage 122 tools/testing/selftests/bpf/progs/sockopt_sk.c storage = bpf_sk_storage_get(&socket_storage_map, ctx->sk, 0, storage 124 tools/testing/selftests/bpf/progs/sockopt_sk.c if (!storage) storage 127 tools/testing/selftests/bpf/progs/sockopt_sk.c storage->val = optval[0]; storage 27 tools/testing/selftests/bpf/progs/tcp_rtt.c struct tcp_rtt_storage *storage; storage 36 tools/testing/selftests/bpf/progs/tcp_rtt.c storage = bpf_sk_storage_get(&socket_storage_map, sk, 0, storage 38 tools/testing/selftests/bpf/progs/tcp_rtt.c if (!storage) storage 53 tools/testing/selftests/bpf/progs/tcp_rtt.c storage->invoked++; storage 55 tools/testing/selftests/bpf/progs/tcp_rtt.c storage->dsack_dups = tcp_sk->dsack_dups; storage 56 tools/testing/selftests/bpf/progs/tcp_rtt.c storage->delivered = tcp_sk->delivered; storage 57 tools/testing/selftests/bpf/progs/tcp_rtt.c storage->delivered_ce = tcp_sk->delivered_ce; storage 58 tools/testing/selftests/bpf/progs/tcp_rtt.c storage->icsk_retransmits = tcp_sk->icsk_retransmits;