cls 43 arch/arm64/mm/dma-mapping.c int cls = cache_line_size_of_cpu(); cls 45 arch/arm64/mm/dma-mapping.c WARN_TAINT(!coherent && cls > ARCH_DMA_MINALIGN, cls 49 arch/arm64/mm/dma-mapping.c ARCH_DMA_MINALIGN, cls); cls 3692 arch/mips/include/asm/octeon/cvmx-mio-defs.h uint64_t cls:2; cls 3694 arch/mips/include/asm/octeon/cvmx-mio-defs.h uint64_t cls:2; cls 4107 arch/mips/include/asm/octeon/cvmx-mio-defs.h uint64_t cls:2; cls 4109 arch/mips/include/asm/octeon/cvmx-mio-defs.h uint64_t cls:2; cls 260 arch/mips/include/asm/octeon/cvmx-pci-defs.h uint32_t cls:8; cls 262 arch/mips/include/asm/octeon/cvmx-pci-defs.h uint32_t cls:8; cls 79 arch/mips/include/asm/txx9/tx3927.h endian_def_b4(unused0, ht, mlt, cls); cls 234 drivers/acpi/acpica/nsxfname.c struct acpi_pnp_device_id *cls = NULL; cls 317 drivers/acpi/acpica/nsxfname.c status = acpi_ut_execute_CLS(node, &cls); cls 319 drivers/acpi/acpica/nsxfname.c info_size += cls->length; cls 427 drivers/acpi/acpica/nsxfname.c if (cls) { cls 429 drivers/acpi/acpica/nsxfname.c cls, next_id_string); cls 453 drivers/acpi/acpica/nsxfname.c if (cls) { cls 454 drivers/acpi/acpica/nsxfname.c ACPI_FREE(cls); cls 344 drivers/acpi/acpica/utids.c struct acpi_pnp_device_id *cls; cls 380 drivers/acpi/acpica/utids.c cls = cls 383 drivers/acpi/acpica/utids.c if (!cls) { cls 390 drivers/acpi/acpica/utids.c cls->string = cls 391 drivers/acpi/acpica/utids.c ACPI_ADD_PTR(char, cls, sizeof(struct acpi_pnp_device_id)); cls 395 drivers/acpi/acpica/utids.c acpi_ex_pci_cls_to_string(cls->string, class_code); cls 396 drivers/acpi/acpica/utids.c cls->length = length; cls 397 drivers/acpi/acpica/utids.c *return_id = cls; cls 709 drivers/acpi/bus.c if (!id->cls) cls 719 drivers/acpi/bus.c sprintf(buf, "%02x", (id->cls >> byte_shift) & msk); cls 745 drivers/acpi/bus.c for (id = acpi_ids; id->id[0] || id->cls; id++) { cls 748 drivers/acpi/bus.c if (id->cls && __acpi_match_device_cls(id, hwid)) cls 653 drivers/ata/sata_sil.c u8 cls; cls 658 drivers/ata/sata_sil.c cls = sil_get_device_cache_line(pdev); cls 659 drivers/ata/sata_sil.c if (cls) { cls 660 drivers/ata/sata_sil.c cls >>= 3; cls 661 drivers/ata/sata_sil.c cls++; /* cls = (line_size/8)+1 */ cls 663 drivers/ata/sata_sil.c writew(cls << 8 | cls, cls 334 drivers/ata/sata_vsc.c u8 cls; cls 382 drivers/ata/sata_vsc.c pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cls); cls 383 drivers/ata/sata_vsc.c if (cls == 0x00) cls 87 drivers/base/class.c int class_create_file_ns(struct class *cls, const struct class_attribute *attr, cls 92 drivers/base/class.c if (cls) cls 93 drivers/base/class.c error = sysfs_create_file_ns(&cls->p->subsys.kobj, cls 100 drivers/base/class.c void class_remove_file_ns(struct class *cls, const struct class_attribute *attr, cls 103 drivers/base/class.c if (cls) cls 104 drivers/base/class.c sysfs_remove_file_ns(&cls->p->subsys.kobj, &attr->attr, ns); cls 107 drivers/base/class.c static struct class *class_get(struct class *cls) cls 109 drivers/base/class.c if (cls) cls 110 drivers/base/class.c kset_get(&cls->p->subsys); cls 111 drivers/base/class.c return cls; cls 114 drivers/base/class.c static void class_put(struct class *cls) cls 116 drivers/base/class.c if (cls) cls 117 drivers/base/class.c kset_put(&cls->p->subsys); cls 140 drivers/base/class.c static int class_add_groups(struct class *cls, cls 143 drivers/base/class.c return sysfs_create_groups(&cls->p->subsys.kobj, groups); cls 146 drivers/base/class.c static void class_remove_groups(struct class *cls, cls 149 drivers/base/class.c return sysfs_remove_groups(&cls->p->subsys.kobj, groups); cls 152 drivers/base/class.c int __class_register(struct class *cls, struct lock_class_key *key) cls 157 drivers/base/class.c pr_debug("device class '%s': registering\n", cls->name); cls 166 drivers/base/class.c error = kobject_set_name(&cp->subsys.kobj, "%s", cls->name); cls 173 drivers/base/class.c if (!cls->dev_kobj) cls 174 drivers/base/class.c cls->dev_kobj = sysfs_dev_char_kobj; cls 178 drivers/base/class.c if (!sysfs_deprecated || cls != &block_class) cls 184 drivers/base/class.c cp->class = cls; cls 185 drivers/base/class.c cls->p = cp; cls 192 drivers/base/class.c error = class_add_groups(class_get(cls), cls->class_groups); cls 193 drivers/base/class.c class_put(cls); cls 198 drivers/base/class.c void class_unregister(struct class *cls) cls 200 drivers/base/class.c pr_debug("device class '%s': unregistering\n", cls->name); cls 201 drivers/base/class.c class_remove_groups(cls, cls->class_groups); cls 202 drivers/base/class.c kset_unregister(&cls->p->subsys); cls 205 drivers/base/class.c static void class_create_release(struct class *cls) cls 207 drivers/base/class.c pr_debug("%s called for %s\n", __func__, cls->name); cls 208 drivers/base/class.c kfree(cls); cls 228 drivers/base/class.c struct class *cls; cls 231 drivers/base/class.c cls = kzalloc(sizeof(*cls), GFP_KERNEL); cls 232 drivers/base/class.c if (!cls) { cls 237 drivers/base/class.c cls->name = name; cls 238 drivers/base/class.c cls->owner = owner; cls 239 drivers/base/class.c cls->class_release = class_create_release; cls 241 drivers/base/class.c retval = __class_register(cls, key); cls 245 drivers/base/class.c return cls; cls 248 drivers/base/class.c kfree(cls); cls 260 drivers/base/class.c void class_destroy(struct class *cls) cls 262 drivers/base/class.c if ((cls == NULL) || (IS_ERR(cls))) cls 265 drivers/base/class.c class_unregister(cls); cls 498 drivers/base/class.c struct class_compat *cls; cls 500 drivers/base/class.c cls = kmalloc(sizeof(struct class_compat), GFP_KERNEL); cls 501 drivers/base/class.c if (!cls) cls 503 drivers/base/class.c cls->kobj = kobject_create_and_add(name, &class_kset->kobj); cls 504 drivers/base/class.c if (!cls->kobj) { cls 505 drivers/base/class.c kfree(cls); cls 508 drivers/base/class.c return cls; cls 516 drivers/base/class.c void class_compat_unregister(struct class_compat *cls) cls 518 drivers/base/class.c kobject_put(cls->kobj); cls 519 drivers/base/class.c kfree(cls); cls 530 drivers/base/class.c int class_compat_create_link(struct class_compat *cls, struct device *dev, cls 535 drivers/base/class.c error = sysfs_create_link(cls->kobj, &dev->kobj, dev_name(dev)); cls 548 drivers/base/class.c sysfs_remove_link(cls->kobj, dev_name(dev)); cls 563 drivers/base/class.c void class_compat_remove_link(struct class_compat *cls, struct device *dev, cls 568 drivers/base/class.c sysfs_remove_link(cls->kobj, dev_name(dev)); cls 348 drivers/block/pktcdvd.c static void class_pktcdvd_release(struct class *cls) cls 350 drivers/block/pktcdvd.c kfree(cls); cls 2107 drivers/block/rbd.c osd_data = osd_req_op_data(osd_req, 1, cls, request_data); cls 694 drivers/hid/hid-multitouch.c struct mt_class *cls = &td->mtclass; cls 727 drivers/hid/hid-multitouch.c set_abs(hi->input, code, field, cls->sn_move); cls 752 drivers/hid/hid-multitouch.c set_abs(hi->input, code, field, cls->sn_move); cls 768 drivers/hid/hid-multitouch.c if ((cls->name == MT_CLS_WIN_8 || cls 769 drivers/hid/hid-multitouch.c cls->name == MT_CLS_WIN_8_DUAL) && cls 795 drivers/hid/hid-multitouch.c cls->sn_width); cls 801 drivers/hid/hid-multitouch.c cls->sn_height); cls 816 drivers/hid/hid-multitouch.c cls->sn_pressure); cls 837 drivers/hid/hid-multitouch.c cls->sn_move ? cls 838 drivers/hid/hid-multitouch.c field->logical_maximum / cls->sn_move : 0, 0); cls 1248 drivers/hid/hid-multitouch.c struct mt_class *cls = &td->mtclass; cls 1259 drivers/hid/hid-multitouch.c if (cls->is_indirect) cls 1419 drivers/hid/hid-multitouch.c struct mt_class *cls = &td->mtclass; cls 1436 drivers/hid/hid-multitouch.c if (cls->quirks & MT_QUIRK_FORCE_GET_FEATURE) { cls 1455 drivers/hid/hid-multitouch.c if (cls->maxcontacts) { cls 1457 drivers/hid/hid-multitouch.c cls->maxcontacts); cls 724 drivers/hwmon/asus_atk0110.c u8 cls; cls 732 drivers/hwmon/asus_atk0110.c cls = (data->debugfs.id & 0xff000000) >> 24; cls 733 drivers/hwmon/asus_atk0110.c ret = atk_ggrp(data, cls); cls 155 drivers/isdn/mISDN/core.c static void mISDN_class_release(struct class *cls) cls 473 drivers/media/platform/ti-vpe/vpdma_priv.h static inline u32 cfd_pkt_payload_len(bool direct, int cls, int dest, cls 478 drivers/media/platform/ti-vpe/vpdma_priv.h (cls << CFD_CLASS_SHFT) | cls 34 drivers/net/bonding/bond_sysfs.c static ssize_t bonding_show_bonds(struct class *cls, cls 78 drivers/net/bonding/bond_sysfs.c static ssize_t bonding_store_bonds(struct class *cls, cls 84 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_cls_offload *cls, cls 87 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_rule *rule = flow_cls_offload_flow_rule(cls); cls 227 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_cls_offload *cls) cls 229 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_rule *rule = flow_cls_offload_flow_rule(cls); cls 382 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_cls_offload *cls, cls 385 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_rule *rule = flow_cls_offload_flow_rule(cls); cls 548 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_cls_offload *cls) cls 550 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_rule *rule = flow_cls_offload_flow_rule(cls); cls 637 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_cls_offload *cls) cls 646 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c if (cxgb4_validate_flow_actions(dev, cls)) cls 649 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c if (cxgb4_validate_flow_match(dev, cls)) cls 660 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c cxgb4_process_flow_match(dev, cls, fs); cls 661 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c cxgb4_process_flow_actions(dev, cls, fs); cls 695 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c ch_flower->tc_flower_cookie = cls->cookie; cls 713 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_cls_offload *cls) cls 719 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c ch_flower = ch_flower_lookup(adap, cls->cookie); cls 787 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c struct flow_cls_offload *cls) cls 796 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c ch_flower = ch_flower_lookup(adap, cls->cookie); cls 813 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.c flow_stats_update(&cls->stats, bytes - ofld_stats->byte_count, cls 112 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h struct flow_cls_offload *cls); cls 114 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h struct flow_cls_offload *cls); cls 116 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_flower.h struct flow_cls_offload *cls); cls 45 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c struct tc_cls_u32_offload *cls, cls 54 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c for (i = 0; i < cls->knode.sel->nkeys; i++) { cls 55 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c off = cls->knode.sel->keys[i].off; cls 56 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c val = cls->knode.sel->keys[i].val; cls 57 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c mask = cls->knode.sel->keys[i].mask; cls 61 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c if (!cls->knode.sel->keys[i].offmask) cls 65 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c if (cls->knode.sel->keys[i].offmask) cls 91 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c struct tc_cls_u32_offload *cls) cls 98 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c exts = cls->knode.exts; cls 148 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) cls 152 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c __be16 protocol = cls->common.protocol; cls 168 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c filter_id = cls->knode.handle & 0xFFFFF; cls 178 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c uhtid = TC_U32_USERHTID(cls->knode.handle); cls 179 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); cls 233 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c if (next[i].offoff != cls->knode.sel->offoff || cls 234 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c next[i].shift != cls->knode.sel->offshift || cls 235 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c next[i].mask != cls->knode.sel->offmask || cls 236 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c next[i].offset != cls->knode.sel->off) cls 243 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c for (j = 0; j < cls->knode.sel->nkeys; j++) { cls 244 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c off = cls->knode.sel->keys[j].off; cls 245 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c val = cls->knode.sel->keys[j].val; cls 246 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c mask = cls->knode.sel->keys[j].mask; cls 264 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c ret = fill_match_fields(adapter, &fs, cls, cls 271 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c link->link_handle = cls->knode.handle; cls 290 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c ret = fill_match_fields(adapter, &fs, cls, cls 296 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c ret = fill_match_fields(adapter, &fs, cls, start, false); cls 303 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c ret = fill_action_fields(adapter, &fs, cls); cls 340 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls) cls 353 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c filter_id = cls->knode.handle & 0xFFFFF; cls 363 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c handle = cls->knode.handle; cls 364 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.c uhtid = TC_U32_USERHTID(cls->knode.handle); cls 47 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h int cxgb4_config_knode(struct net_device *dev, struct tc_cls_u32_offload *cls); cls 48 drivers/net/ethernet/chelsio/cxgb4/cxgb4_tc_u32.h int cxgb4_delete_knode(struct net_device *dev, struct tc_cls_u32_offload *cls); cls 80 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c cbd.cls = 1; cls 97 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c cbd.cls = 1; cls 124 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c cbd.cls = 4; cls 181 drivers/net/ethernet/freescale/enetc/enetc_cbdr.c cbd.cls = 3; cls 454 drivers/net/ethernet/freescale/enetc/enetc_hw.h u8 cls; cls 9165 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct tc_cls_u32_offload *cls) cls 9167 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 hdl = cls->knode.handle; cls 9168 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 uhtid = TC_U32_USERHTID(cls->knode.handle); cls 9169 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 loc = cls->knode.handle & 0xfffff; cls 9222 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct tc_cls_u32_offload *cls) cls 9224 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); cls 9232 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (cls->hnode.divisor > 0) cls 9240 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct tc_cls_u32_offload *cls) cls 9242 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 uhtid = TC_U32_USERHTID(cls->hnode.handle); cls 9354 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct tc_cls_u32_offload *cls, cls 9362 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c for (i = 0; i < cls->knode.sel->nkeys; i++) { cls 9363 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c off = cls->knode.sel->keys[i].off; cls 9364 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c val = cls->knode.sel->keys[i].val; cls 9365 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c m = cls->knode.sel->keys[i].mask; cls 9378 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (nexthdr->off == cls->knode.sel->keys[i].off && cls 9380 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c (__force u32)cls->knode.sel->keys[i].val && cls 9382 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c (__force u32)cls->knode.sel->keys[i].mask) cls 9405 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c struct tc_cls_u32_offload *cls) cls 9407 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c __be16 protocol = cls->common.protocol; cls 9408 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c u32 loc = cls->knode.handle & 0xfffff; cls 9418 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c uhtid = TC_U32_USERHTID(cls->knode.handle); cls 9419 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c link_uhtid = TC_U32_USERHTID(cls->knode.link_handle); cls 9484 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c if (nexthdr[i].o != cls->knode.sel->offoff || cls 9485 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c nexthdr[i].s != cls->knode.sel->offshift || cls 9487 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c (__force u32)cls->knode.sel->offmask) cls 9505 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c jump->link_hdl = cls->knode.handle; cls 9507 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c err = ixgbe_clsu32_build_input(input, mask, cls, cls 9553 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c err = ixgbe_clsu32_build_input(input, mask, cls, field_ptr, NULL); cls 9557 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c err = parse_tc_actions(adapter, cls->knode.exts, &input->action, cls 1375 drivers/net/ethernet/mellanox/mlxsw/spectrum.c struct tc_cls_matchall_offload *cls, cls 521 drivers/net/ethernet/stmicro/stmmac/hwif.h struct tc_cls_u32_offload *cls); cls 525 drivers/net/ethernet/stmicro/stmmac/hwif.h struct flow_cls_offload *cls); cls 1299 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c struct flow_cls_offload *cls; cls 1323 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls = kzalloc(sizeof(*cls), GFP_KERNEL); cls 1324 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c if (!cls) { cls 1329 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls->common.chain_index = 0; cls 1330 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls->command = FLOW_CLS_REPLACE; cls 1331 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls->cookie = dummy_cookie; cls 1348 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls->rule = rule; cls 1362 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c ret = stmmac_tc_setup_cls(priv, priv, cls); cls 1370 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls->command = FLOW_CLS_DESTROY; cls 1371 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c stmmac_tc_setup_cls(priv, priv, cls); cls 1375 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c kfree(cls); cls 1424 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c struct flow_cls_offload *cls; cls 1450 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls = kzalloc(sizeof(*cls), GFP_KERNEL); cls 1451 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c if (!cls) { cls 1456 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls->common.chain_index = 0; cls 1457 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls->command = FLOW_CLS_REPLACE; cls 1458 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls->cookie = dummy_cookie; cls 1476 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls->rule = rule; cls 1492 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c ret = stmmac_tc_setup_cls(priv, priv, cls); cls 1500 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c cls->command = FLOW_CLS_DESTROY; cls 1501 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c stmmac_tc_setup_cls(priv, priv, cls); cls 1505 drivers/net/ethernet/stmicro/stmmac/stmmac_selftests.c kfree(cls); cls 29 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct tc_cls_u32_offload *cls, cls 33 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c u32 loc = cls->knode.handle; cls 59 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct tc_cls_u32_offload *cls) cls 66 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c exts = cls->knode.exts; cls 92 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct tc_cls_u32_offload *cls) cls 95 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct tc_u32_sel *sel = cls->knode.sel; cls 97 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c u32 prio = cls->common.prio << 16; cls 108 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c switch (ntohs(cls->common.protocol)) { cls 124 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c entry = tc_find_entry(priv, cls, true); cls 129 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c frag = tc_find_entry(priv, cls, true); cls 158 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ret = tc_fill_actions(entry, frag, cls); cls 172 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct tc_cls_u32_offload *cls) cls 176 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c entry = tc_find_entry(priv, cls, false); cls 189 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct tc_cls_u32_offload *cls) cls 193 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ret = tc_fill_entry(priv, cls); cls 205 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c tc_unfill_entry(priv, cls); cls 210 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct tc_cls_u32_offload *cls) cls 215 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c tc_unfill_entry(priv, cls); cls 226 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct tc_cls_u32_offload *cls) cls 228 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c switch (cls->command) { cls 230 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c tc_unfill_entry(priv, cls); cls 233 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c return tc_config_knode(priv, cls); cls 235 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c return tc_delete_knode(priv, cls); cls 395 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_cls_offload *cls, cls 398 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_rule *rule = flow_cls_offload_flow_rule(cls); cls 412 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_cls_offload *cls, cls 415 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_rule *rule = flow_cls_offload_flow_rule(cls); cls 447 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_cls_offload *cls, cls 450 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_rule *rule = flow_cls_offload_flow_rule(cls); cls 496 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_cls_offload *cls, cls 504 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c if (entry->cookie == cls->cookie) cls 514 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c int (*fn)(struct stmmac_priv *priv, struct flow_cls_offload *cls, cls 523 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_cls_offload *cls) cls 525 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false); cls 526 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_rule *rule = flow_cls_offload_flow_rule(cls); cls 530 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c entry = tc_find_flow(priv, cls, true); cls 540 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ret = tc_flow_parsers[i].fn(priv, cls, entry); cls 550 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c entry->cookie = cls->cookie; cls 555 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_cls_offload *cls) cls 557 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct stmmac_flow_entry *entry = tc_find_flow(priv, cls, false); cls 578 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c struct flow_cls_offload *cls) cls 586 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c switch (cls->command) { cls 588 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ret = tc_add_flow(priv, cls); cls 591 drivers/net/ethernet/stmicro/stmmac/stmmac_tc.c ret = tc_del_flow(priv, cls); cls 157 drivers/pci/quirks.c u8 cls = 0; cls 173 drivers/pci/quirks.c if (!cls) cls 174 drivers/pci/quirks.c cls = tmp; cls 175 drivers/pci/quirks.c if (!tmp || cls == tmp) cls 179 drivers/pci/quirks.c cls << 2, tmp << 2, cls 186 drivers/pci/quirks.c pr_info("PCI: CLS %u bytes, default %u\n", cls << 2, cls 188 drivers/pci/quirks.c pci_cache_line_size = cls ? cls : pci_dfl_cache_line_size; cls 1547 drivers/perf/xgene_pmu.c for (id = ids; id->id[0] || id->cls; id++) { cls 73 drivers/staging/media/ipu3/ipu3-css-fw.c enum imgu_abi_param_class cls, cls 82 drivers/staging/media/ipu3/ipu3-css-fw.c bi->info.isp.sp.mem_initializers.params[cls][mem].size) cls 183 drivers/staging/media/ipu3/ipu3-css-fw.h enum imgu_abi_param_class cls, cls 157 fs/cifs/asn1.c unsigned int *cls, unsigned int *con, unsigned int *tag) cls 164 fs/cifs/asn1.c *cls = (ch & 0xC0) >> 6; cls 214 fs/cifs/asn1.c unsigned int *cls, unsigned int *con, unsigned int *tag) cls 219 fs/cifs/asn1.c if (!asn1_id_decode(ctx, cls, con, tag)) cls 490 fs/cifs/asn1.c unsigned int cls, con, tag, oidlen, rc; cls 497 fs/cifs/asn1.c if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { cls 500 fs/cifs/asn1.c } else if ((cls != ASN1_APL) || (con != ASN1_CON) cls 502 fs/cifs/asn1.c cifs_dbg(FYI, "cls = %d con = %d tag = %d\n", cls, con, tag); cls 507 fs/cifs/asn1.c rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag); cls 510 fs/cifs/asn1.c (cls == ASN1_UNI)) { cls 528 fs/cifs/asn1.c if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { cls 531 fs/cifs/asn1.c } else if ((cls != ASN1_CTX) || (con != ASN1_CON) cls 534 fs/cifs/asn1.c cls, con, tag, end, *end); cls 539 fs/cifs/asn1.c if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { cls 542 fs/cifs/asn1.c } else if ((cls != ASN1_UNI) || (con != ASN1_CON) cls 545 fs/cifs/asn1.c cls, con, tag, end, *end); cls 550 fs/cifs/asn1.c if (asn1_header_decode(&ctx, &end, &cls, &con, &tag) == 0) { cls 553 fs/cifs/asn1.c } else if ((cls != ASN1_CTX) || (con != ASN1_CON) cls 556 fs/cifs/asn1.c cls, con, tag, end, *end); cls 562 fs/cifs/asn1.c (&ctx, &sequence_end, &cls, &con, &tag) == 0) { cls 565 fs/cifs/asn1.c } else if ((cls != ASN1_UNI) || (con != ASN1_CON) cls 568 fs/cifs/asn1.c cls, con, tag, end, *end); cls 574 fs/cifs/asn1.c rc = asn1_header_decode(&ctx, &end, &cls, &con, &tag); cls 82 include/linux/acpi.h #define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (_cls), .cls_msk = (_msk), cls 672 include/linux/acpi.h #define ACPI_DEVICE_CLASS(_cls, _msk) .cls = (0), .cls_msk = (0), cls 118 include/linux/ceph/osd_client.h } cls; cls 495 include/linux/ceph/rados.h } __attribute__ ((packed)) cls; cls 620 include/linux/device.h void class_compat_unregister(struct class_compat *cls); cls 621 include/linux/device.h int class_compat_create_link(struct class_compat *cls, struct device *dev, cls 623 include/linux/device.h void class_compat_remove_link(struct class_compat *cls, struct device *dev, cls 774 include/linux/device.h extern void class_destroy(struct class *cls); cls 1572 include/linux/device.h struct device *device_create_vargs(struct class *cls, struct device *parent, cls 1576 include/linux/device.h struct device *device_create(struct class *cls, struct device *parent, cls 1580 include/linux/device.h struct device *device_create_with_groups(struct class *cls, cls 1584 include/linux/device.h extern void device_destroy(struct class *cls, dev_t devt); cls 213 include/linux/mod_devicetable.h __u32 cls; cls 27 include/linux/transport_class.h #define DECLARE_TRANSPORT_CLASS(cls, nm, su, rm, cfg) \ cls 28 include/linux/transport_class.h struct transport_class cls = { \ cls 43 include/linux/transport_class.h #define DECLARE_ANON_TRANSPORT_CLASS(cls, mtch, cfg) \ cls 44 include/linux/transport_class.h struct anon_transport_class cls = { \ cls 273 net/ceph/osd_client.c osd_data = osd_req_op_data(osd_req, which, cls, request_info); cls 283 net/ceph/osd_client.c osd_data = osd_req_op_data(osd_req, which, cls, request_data); cls 285 net/ceph/osd_client.c osd_req->r_ops[which].cls.indata_len += pagelist->length; cls 296 net/ceph/osd_client.c osd_data = osd_req_op_data(osd_req, which, cls, request_data); cls 299 net/ceph/osd_client.c osd_req->r_ops[which].cls.indata_len += length; cls 315 net/ceph/osd_client.c osd_data = osd_req_op_data(osd_req, which, cls, request_data); cls 317 net/ceph/osd_client.c osd_req->r_ops[which].cls.indata_len += bytes; cls 328 net/ceph/osd_client.c osd_data = osd_req_op_data(osd_req, which, cls, response_data); cls 384 net/ceph/osd_client.c ceph_osd_data_release(&op->cls.request_info); cls 385 net/ceph/osd_client.c ceph_osd_data_release(&op->cls.request_data); cls 386 net/ceph/osd_client.c ceph_osd_data_release(&op->cls.response_data); cls 852 net/ceph/osd_client.c op->cls.class_name = class; cls 855 net/ceph/osd_client.c op->cls.class_len = size; cls 861 net/ceph/osd_client.c op->cls.method_name = method; cls 864 net/ceph/osd_client.c op->cls.method_len = size; cls 999 net/ceph/osd_client.c dst->cls.class_len = src->cls.class_len; cls 1000 net/ceph/osd_client.c dst->cls.method_len = src->cls.method_len; cls 1001 net/ceph/osd_client.c dst->cls.indata_len = cpu_to_le32(src->cls.indata_len); cls 1990 net/ceph/osd_client.c WARN_ON(op->indata_len != op->cls.class_len + cls 1991 net/ceph/osd_client.c op->cls.method_len + cls 1992 net/ceph/osd_client.c op->cls.indata_len); cls 1994 net/ceph/osd_client.c &op->cls.request_info); cls 1997 net/ceph/osd_client.c &op->cls.request_data); cls 2000 net/ceph/osd_client.c &op->cls.response_data); cls 862 net/dsa/slave.c struct tc_cls_matchall_offload *cls, cls 868 net/dsa/slave.c __be16 protocol = cls->common.protocol; cls 877 net/dsa/slave.c if (!flow_offload_has_one_action(&cls->rule->action)) cls 880 net/dsa/slave.c act = &cls->rule->action.entries[0]; cls 895 net/dsa/slave.c mall_tc_entry->cookie = cls->cookie; cls 917 net/dsa/slave.c struct tc_cls_matchall_offload *cls) cls 926 net/dsa/slave.c mall_tc_entry = dsa_slave_mall_tc_entry_find(dev, cls->cookie); cls 944 net/dsa/slave.c struct tc_cls_matchall_offload *cls, cls 947 net/dsa/slave.c if (cls->common.chain_index) cls 950 net/dsa/slave.c switch (cls->command) { cls 952 net/dsa/slave.c return dsa_slave_add_cls_matchall(dev, cls, ingress); cls 954 net/dsa/slave.c dsa_slave_del_cls_matchall(dev, cls); cls 67 scripts/mod/devicetable-offsets.c DEVID_FIELD(acpi_device_id, cls); cls 545 scripts/mod/file2alias.c DEF_FIELD_ADDR(symval, acpi_device_id, cls); cls 550 scripts/mod/file2alias.c else if (cls) { cls 561 scripts/mod/file2alias.c (*cls >> byte_shift) & 0xFF); cls 55 tools/testing/selftests/bpf/progs/test_spin_lock.c struct cls_elem *cls; cls 93 tools/testing/selftests/bpf/progs/test_spin_lock.c cls = bpf_get_local_storage(&cls_map, 0); cls 94 tools/testing/selftests/bpf/progs/test_spin_lock.c bpf_spin_lock(&cls->lock); cls 95 tools/testing/selftests/bpf/progs/test_spin_lock.c cls->cnt++; cls 96 tools/testing/selftests/bpf/progs/test_spin_lock.c bpf_spin_unlock(&cls->lock);