to_dev 68 drivers/dma/ioat/dma.c dev_err(to_dev(ioat_chan), "Err(%d): %s\n", to_dev 165 drivers/dma/ioat/dma.c dev_dbg(to_dev(ioat_chan), to_dev 201 drivers/dma/ioat/dma.c dev_err(to_dev(ioat_chan), to_dev 206 drivers/dma/ioat/dma.c dev_dbg(to_dev(ioat_chan), to_dev 244 drivers/dma/ioat/dma.c dev_dbg(to_dev(ioat_chan), to_dev 305 drivers/dma/ioat/dma.c dev_dbg(to_dev(ioat_chan), "%s: cookie: %d\n", __func__, cookie); to_dev 378 drivers/dma/ioat/dma.c descs->virt = dma_alloc_coherent(to_dev(ioat_chan), to_dev 385 drivers/dma/ioat/dma.c dma_free_coherent(to_dev(ioat_chan), SZ_2M, to_dev 406 drivers/dma/ioat/dma.c dma_free_coherent(to_dev(ioat_chan), to_dev 458 drivers/dma/ioat/dma.c dev_dbg(to_dev(ioat_chan), "%s: num_descs: %d (%x:%x:%x)\n", to_dev 466 drivers/dma/ioat/dma.c dev_dbg_ratelimited(to_dev(ioat_chan), to_dev 523 drivers/dma/ioat/dma.c dev_dbg(to_dev(ioat_chan), "%s: phys_complete: %#llx\n", __func__, to_dev 584 drivers/dma/ioat/dma.c dev_dbg(to_dev(ioat_chan), "%s: head: %#x tail: %#x issued: %#x\n", to_dev 642 drivers/dma/ioat/dma.c dev_dbg(to_dev(ioat_chan), "%s: cancel completion timeout\n", to_dev 779 drivers/dma/ioat/dma.c dev_dbg(to_dev(ioat_chan), "%s: error = %x:%x\n", to_dev 821 drivers/dma/ioat/dma.c dev_err(to_dev(ioat_chan), "%s: fatal error (%x:%x)\n", to_dev 823 drivers/dma/ioat/dma.c dev_err(to_dev(ioat_chan), "Errors handled:\n"); to_dev 825 drivers/dma/ioat/dma.c dev_err(to_dev(ioat_chan), "Errors not handled:\n"); to_dev 885 drivers/dma/ioat/dma.c dev_err(to_dev(ioat_chan), "%s: Channel halted (%x)\n", to_dev 887 drivers/dma/ioat/dma.c dev_err(to_dev(ioat_chan), "Errors:\n"); to_dev 897 drivers/dma/ioat/dma.c dev_warn(to_dev(ioat_chan), "Reset channel...\n"); to_dev 899 drivers/dma/ioat/dma.c dev_warn(to_dev(ioat_chan), "Restart channel...\n"); to_dev 932 drivers/dma/ioat/dma.c dev_err(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n", to_dev 934 drivers/dma/ioat/dma.c dev_err(to_dev(ioat_chan), "Errors:\n"); to_dev 937 drivers/dma/ioat/dma.c dev_dbg(to_dev(ioat_chan), "Active descriptors: %d\n", to_dev 945 drivers/dma/ioat/dma.c dev_warn(to_dev(ioat_chan), "Resetting channel...\n"); to_dev 947 drivers/dma/ioat/dma.c dev_warn(to_dev(ioat_chan), "Restarting channel...\n"); to_dev 220 drivers/dma/ioat/dma.h struct device *dev = to_dev(ioat_chan); to_dev 637 drivers/dma/ioat/init.c dev_dbg(to_dev(ioat_chan), "freeing %d idle descriptors\n", descs); to_dev 644 drivers/dma/ioat/init.c dev_err(to_dev(ioat_chan), "Freeing %d in use descriptors!\n", to_dev 654 drivers/dma/ioat/init.c dma_free_coherent(to_dev(ioat_chan), SZ_2M, to_dev 758 drivers/dma/ioat/init.c dev_WARN(to_dev(ioat_chan), to_dev 279 drivers/dma/ioat/prep.c struct device *dev = to_dev(ioat_chan); to_dev 306 drivers/dma/ioat/prep.c struct device *dev = to_dev(ioat_chan); to_dev 358 drivers/dma/ioat/prep.c dev_dbg(to_dev(ioat_chan), "%s\n", __func__); to_dev 480 drivers/dma/ioat/prep.c dev_dbg(to_dev(ioat_chan), "%s\n", __func__); to_dev 507 drivers/dma/ioat/prep.c dev_err(to_dev(ioat_chan), to_dev 88 drivers/dma/ioat/sysfs.c dev_warn(to_dev(ioat_chan), to_dev 354 drivers/gpu/drm/amd/amdkfd/kfd_crat.c struct kfd_topology_device *dev, *to_dev; to_dev 405 drivers/gpu/drm/amd/amdkfd/kfd_crat.c to_dev = kfd_topology_device_by_proximity_domain(id_to); to_dev 406 drivers/gpu/drm/amd/amdkfd/kfd_crat.c if (!to_dev) to_dev 413 drivers/gpu/drm/amd/amdkfd/kfd_crat.c to_dev->io_link_count++; to_dev 414 drivers/gpu/drm/amd/amdkfd/kfd_crat.c to_dev->node_props.io_links_count++; to_dev 415 drivers/gpu/drm/amd/amdkfd/kfd_crat.c list_add_tail(&props2->list, &to_dev->io_link_props); to_dev 175 drivers/media/i2c/mt9m032.c dev_dbg(to_dev(sensor), "MT9M032 line time: %u ns\n", ns); to_dev 55 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c mlxsw_sp_span_entry_phys_parms(const struct net_device *to_dev, to_dev 58 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c sparmsp->dest_port = netdev_priv(to_dev); to_dev 294 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c mlxsw_sp_span_gretap4_route(const struct net_device *to_dev, to_dev 297 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct ip_tunnel *tun = netdev_priv(to_dev); to_dev 306 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c parms = mlxsw_sp_ipip_netdev_parms4(to_dev); to_dev 331 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c mlxsw_sp_span_entry_gretap4_parms(const struct net_device *to_dev, to_dev 334 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct ip_tunnel_parm tparm = mlxsw_sp_ipip_netdev_parms4(to_dev); to_dev 342 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c if (!(to_dev->flags & IFF_UP) || to_dev 351 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c l3edev = mlxsw_sp_span_gretap4_route(to_dev, &saddr.addr4, &gw.addr4); to_dev 399 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c mlxsw_sp_span_gretap6_route(const struct net_device *to_dev, to_dev 403 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct ip6_tnl *t = netdev_priv(to_dev); to_dev 432 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c mlxsw_sp_span_entry_gretap6_parms(const struct net_device *to_dev, to_dev 435 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c struct __ip6_tnl_parm tparm = mlxsw_sp_ipip_netdev_parms6(to_dev); to_dev 443 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c if (!(to_dev->flags & IFF_UP) || to_dev 452 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c l3edev = mlxsw_sp_span_gretap6_route(to_dev, &saddr.addr6, &gw.addr6); to_dev 506 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c mlxsw_sp_span_entry_vlan_parms(const struct net_device *to_dev, to_dev 512 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c if (!(to_dev->flags & IFF_UP)) to_dev 515 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c real_dev = mlxsw_sp_span_entry_vlan(to_dev, &vid); to_dev 566 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c mlxsw_sp_span_entry_nop_parms(const struct net_device *to_dev, to_dev 597 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c netdev_err(span_entry->to_dev, "Cannot mirror to %s, which belongs to a different mlxsw instance", to_dev 601 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c netdev_err(span_entry->to_dev, "Failed to offload mirror to %s", to_dev 619 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c const struct net_device *to_dev, to_dev 638 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c span_entry->to_dev = to_dev; to_dev 651 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c const struct net_device *to_dev) to_dev 658 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c if (curr->ref_count && curr->to_dev == to_dev) to_dev 687 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c const struct net_device *to_dev, to_dev 693 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c span_entry = mlxsw_sp_span_entry_find_by_port(mlxsw_sp, to_dev); to_dev 700 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c return mlxsw_sp_span_entry_create(mlxsw_sp, to_dev, ops, sparms); to_dev 892 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c const struct net_device *to_dev) to_dev 897 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c if (mlxsw_sp_span_entry_types[i]->can_handle(to_dev)) to_dev 904 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c const struct net_device *to_dev, to_dev 914 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c ops = mlxsw_sp_span_entry_ops(mlxsw_sp, to_dev); to_dev 916 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c netdev_err(to_dev, "Cannot mirror to %s", to_dev->name); to_dev 920 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c err = ops->parms(to_dev, &sparms); to_dev 924 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c span_entry = mlxsw_sp_span_entry_get(mlxsw_sp, to_dev, ops, sparms); to_dev 972 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.c err = curr->ops->parms(curr->to_dev, &sparms); to_dev 42 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h const struct net_device *to_dev; to_dev 51 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h bool (*can_handle)(const struct net_device *to_dev); to_dev 52 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h int (*parms)(const struct net_device *to_dev, to_dev 64 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h const struct net_device *to_dev, to_dev 71 drivers/net/ethernet/mellanox/mlxsw/spectrum_span.h const struct net_device *to_dev); to_dev 68 drivers/nvdimm/btt.c dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512), to_dev 70 drivers/nvdimm/btt.c dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512), to_dev 100 drivers/nvdimm/btt.c dev_err_ratelimited(to_dev(arena), to_dev 141 drivers/nvdimm/btt.c dev_err_ratelimited(to_dev(arena), to_dev 159 drivers/nvdimm/btt.c dev_err_ratelimited(to_dev(arena), to_dev 340 drivers/nvdimm/btt.c dev_err(to_dev(arena), to_dev 423 drivers/nvdimm/btt.c dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512), to_dev 429 drivers/nvdimm/btt.c dev_WARN_ONCE(to_dev(arena), size < 512, to_dev 467 drivers/nvdimm/btt.c dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512), to_dev 473 drivers/nvdimm/btt.c dev_WARN_ONCE(to_dev(arena), size < 512, to_dev 568 drivers/nvdimm/btt.c dev_err_ratelimited(to_dev(arena), to_dev 710 drivers/nvdimm/btt.c dev_err(to_dev(arena), "Found an unknown padding scheme\n"); to_dev 716 drivers/nvdimm/btt.c dev_dbg(to_dev(arena), "log_index_0 = %d\n", log_index[0]); to_dev 717 drivers/nvdimm/btt.c dev_dbg(to_dev(arena), "log_index_1 = %d\n", log_index[1]); to_dev 876 drivers/nvdimm/btt.c dev_info(to_dev(arena), "No existing arenas\n"); to_dev 879 drivers/nvdimm/btt.c dev_err(to_dev(arena), to_dev 891 drivers/nvdimm/btt.c dev_err(to_dev(arena), to_dev 1266 drivers/nvdimm/btt.c dev_warn_ratelimited(to_dev(arena), to_dev 44 drivers/nvdimm/pmem.c return to_nd_region(to_dev(pmem)->parent); to_dev 74 drivers/nvdimm/pmem.c struct device *dev = to_dev(pmem); to_dev 265 include/trace/events/xdp.h const struct net_device *to_dev, int err), to_dev 267 include/trace/events/xdp.h TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err), to_dev 287 include/trace/events/xdp.h __entry->to_ifindex = to_dev->ifindex;