route 37 arch/powerpc/platforms/512x/mpc5121_ads_cpld.c u8 route; route 184 arch/powerpc/platforms/512x/mpc5121_ads_cpld.c out_8(&cpld_regs->route, 0xfd); route 66 arch/powerpc/platforms/cell/spu_priv1_mmio.c u64 route; route 77 arch/powerpc/platforms/cell/spu_priv1_mmio.c route = target << 48 | target << 32 | target << 16; route 78 arch/powerpc/platforms/cell/spu_priv1_mmio.c out_be64(&spu->priv1->int_route_RW, route); route 40 drivers/gpu/drm/i2c/tda998x_drv.c const struct tda998x_audio_route *route; route 900 drivers/gpu/drm/i2c/tda998x_drv.c unsigned int route) route 902 drivers/gpu/drm/i2c/tda998x_drv.c s->route = &tda998x_audio_route[route]; route 903 drivers/gpu/drm/i2c/tda998x_drv.c s->ena_ap = priv->audio_port_enable[route]; route 1015 drivers/gpu/drm/i2c/tda998x_drv.c reg_write(priv, REG_ENA_ACLK, settings->route->ena_aclk); route 1016 drivers/gpu/drm/i2c/tda998x_drv.c reg_write(priv, REG_MUX_AP, settings->route->mux_ap); route 1018 drivers/gpu/drm/i2c/tda998x_drv.c reg_write(priv, REG_AIP_CLKSEL, settings->route->aip_clksel); route 1700 drivers/gpu/drm/i2c/tda998x_drv.c unsigned int route; route 1706 drivers/gpu/drm/i2c/tda998x_drv.c route = AUDIO_ROUTE_I2S; route 1709 drivers/gpu/drm/i2c/tda998x_drv.c route = AUDIO_ROUTE_SPDIF; route 1722 drivers/gpu/drm/i2c/tda998x_drv.c if (priv->audio_port_enable[route]) { route 1725 drivers/gpu/drm/i2c/tda998x_drv.c route == AUDIO_ROUTE_SPDIF ? "SPDIF" : "I2S"); route 1729 drivers/gpu/drm/i2c/tda998x_drv.c priv->audio_port_enable[route] = ena_ap; route 1751 drivers/gpu/drm/i2c/tda998x_drv.c unsigned int ratio, route; route 1754 drivers/gpu/drm/i2c/tda998x_drv.c route = AUDIO_ROUTE_I2S + spdif; route 1756 drivers/gpu/drm/i2c/tda998x_drv.c priv->audio.route = &tda998x_audio_route[route]; route 11 drivers/gpu/drm/nouveau/include/nvif/client.h u8 route; route 10 drivers/gpu/drm/nouveau/include/nvif/event.h __u8 route; route 18 drivers/gpu/drm/nouveau/include/nvif/event.h __u8 route; route 29 drivers/gpu/drm/nouveau/include/nvif/ioctl.h __u8 route; route 55 drivers/gpu/drm/nouveau/include/nvif/ioctl.h __u8 route; route 17 drivers/gpu/drm/nouveau/include/nvkm/core/object.h u8 route; route 24 drivers/gpu/drm/nouveau/include/nvkm/core/oclass.h u8 route; route 504 drivers/gpu/drm/nouveau/nouveau_abi16.c client->route = NVDRM_OBJECT_ABI16; route 507 drivers/gpu/drm/nouveau/nouveau_abi16.c client->route = NVDRM_OBJECT_NVIF; route 570 drivers/gpu/drm/nouveau/nouveau_abi16.c client->route = NVDRM_OBJECT_ABI16; route 576 drivers/gpu/drm/nouveau/nouveau_abi16.c client->route = NVDRM_OBJECT_NVIF; route 80 drivers/gpu/drm/nouveau/nouveau_nvif.c u8 route; route 83 drivers/gpu/drm/nouveau/nouveau_nvif.c route = args->v0.route; route 89 drivers/gpu/drm/nouveau/nouveau_nvif.c switch (route) { route 48 drivers/gpu/drm/nouveau/nouveau_usif.c u8 route; route 86 drivers/gpu/drm/nouveau/nouveau_usif.c BUG_ON(rep->v0.route != NVDRM_NOTIFY_USIF); route 101 drivers/gpu/drm/nouveau/nouveau_usif.c rep->route = ntfy->route; route 149 drivers/gpu/drm/nouveau/nouveau_usif.c ntfy->route = req->v0.route; route 151 drivers/gpu/drm/nouveau/nouveau_usif.c req->v0.route = NVDRM_NOTIFY_USIF; route 155 drivers/gpu/drm/nouveau/nouveau_usif.c req->v0.route = ntfy->route; route 252 drivers/gpu/drm/nouveau/nouveau_usif.c u8 route; route 279 drivers/gpu/drm/nouveau/nouveau_usif.c object->route = args->v0.route; route 281 drivers/gpu/drm/nouveau/nouveau_usif.c args->v0.route = NVDRM_OBJECT_USIF; route 285 drivers/gpu/drm/nouveau/nouveau_usif.c args->v0.route = object->route; route 327 drivers/gpu/drm/nouveau/nouveau_usif.c if (argv->v0.route) { route 328 drivers/gpu/drm/nouveau/nouveau_usif.c if (ret = -EINVAL, argv->v0.route == 0xff) route 356 drivers/gpu/drm/nouveau/nouveau_usif.c if (argv->v0.route == NVDRM_OBJECT_USIF) { route 358 drivers/gpu/drm/nouveau/nouveau_usif.c argv->v0.route = object->route; route 365 drivers/gpu/drm/nouveau/nouveau_usif.c argv->v0.route = NVIF_IOCTL_V0_ROUTE_HIDDEN; route 81 drivers/gpu/drm/nouveau/nvif/client.c client->route = NVIF_IOCTL_V0_ROUTE_NVIF; route 121 drivers/gpu/drm/nouveau/nvif/notify.c if (WARN_ON(args->v0.route)) route 198 drivers/gpu/drm/nouveau/nvif/notify.c args->req.route = 0; route 287 drivers/gpu/drm/nouveau/nvif/object.c args->new.route = parent->client->route; route 55 drivers/gpu/drm/nouveau/nvkm/core/client.c client->object.route = oclass->route; route 155 drivers/gpu/drm/nouveau/nvkm/core/client.c req->v0.reply, req->v0.route, req->v0.token); route 159 drivers/gpu/drm/nouveau/nvkm/core/client.c notify->rep.v0.route = req->v0.route; route 98 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c args->v0.route, args->v0.token, args->v0.object); route 110 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c oclass.route = args->v0.route; route 402 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c void *data, u32 size, u8 owner, u8 *route, u64 *token) route 413 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c if (owner != NVIF_IOCTL_V0_OWNER_ANY && owner != object->route) { route 417 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c *route = object->route; route 448 drivers/gpu/drm/nouveau/nvkm/core/ioctl.c &args->v0.route, &args->v0.token); route 302 drivers/gpu/drm/nouveau/nvkm/core/object.c object->route = oclass->route; route 55 drivers/gpu/drm/nouveau/nvkm/engine/disp/ior.h } route; route 40 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c if (ior->func->route.set) route 41 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c ior->func->route.set(outp, NULL); route 50 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c if (ior->func->route.set) route 51 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c ior->func->route.set(outp, ior); route 152 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c (ior->func->route.set || ior->id == __ffs(outp->info.or))) route 161 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c (ior->func->route.set || ior->id == __ffs(outp->info.or))) route 196 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c if (ior->func->route.get) { route 197 drivers/gpu/drm/nouveau/nvkm/engine/disp/outp.c id = ior->func->route.get(outp, &link); route 93 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgm200.c .route = { route 82 drivers/gpu/drm/nouveau/nvkm/engine/disp/sorgv100.c .route = { route 66 drivers/gpu/drm/nouveau/nvkm/engine/disp/sortu102.c .route = { route 741 drivers/gpu/drm/rcar-du/rcar_du_kms.c const struct rcar_du_output_routing *route = route 744 drivers/gpu/drm/rcar-du/rcar_du_kms.c encoder->possible_crtcs = route->possible_crtcs; route 495 drivers/infiniband/core/cma.c id_priv->id.route.addr.dev_addr.transport = route 538 drivers/infiniband/core/cma.c return (struct sockaddr *) &id_priv->id.route.addr.src_addr; route 543 drivers/infiniband/core/cma.c return (struct sockaddr *) &id_priv->id.route.addr.dst_addr; route 548 drivers/infiniband/core/cma.c return id_priv->id.route.addr.src_addr.ss_family; route 573 drivers/infiniband/core/cma.c ib_addr_get_mgid(&id_priv->id.route.addr.dev_addr, &rec.mgid); route 613 drivers/infiniband/core/cma.c struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; route 619 drivers/infiniband/core/cma.c if (!rdma_dev_access_netns(device, id_priv->id.route.addr.dev_addr.net)) route 645 drivers/infiniband/core/cma.c WARN_ON(id_priv->id.route.addr.dev_addr.sgid_attr); route 646 drivers/infiniband/core/cma.c id_priv->id.route.addr.dev_addr.sgid_attr = sgid_attr; route 660 drivers/infiniband/core/cma.c struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; route 672 drivers/infiniband/core/cma.c rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, route 715 drivers/infiniband/core/cma.c struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; route 725 drivers/infiniband/core/cma.c rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, route 752 drivers/infiniband/core/cma.c struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; route 865 drivers/infiniband/core/cma.c cma_translate_ib(addr, &id_priv->id.route.addr.dev_addr); route 904 drivers/infiniband/core/cma.c id_priv->id.route.addr.dev_addr.net = get_net(net); route 1078 drivers/infiniband/core/cma.c struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; route 1241 drivers/infiniband/core/cma.c listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr; route 1600 drivers/infiniband/core/cma.c const struct rdma_addr *addr = &id->route.addr; route 1771 drivers/infiniband/core/cma.c rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); route 1788 drivers/infiniband/core/cma.c struct net *net = id_priv->id.route.addr.dev_addr.net; route 1805 drivers/infiniband/core/cma.c struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; route 1871 drivers/infiniband/core/cma.c kfree(id_priv->id.route.path_rec); route 1873 drivers/infiniband/core/cma.c if (id_priv->id.route.addr.dev_addr.sgid_attr) route 1874 drivers/infiniband/core/cma.c rdma_put_gid_attr(id_priv->id.route.addr.dev_addr.sgid_attr); route 1876 drivers/infiniband/core/cma.c put_net(id_priv->id.route.addr.dev_addr.net); route 2011 drivers/infiniband/core/cma.c const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; route 2018 drivers/infiniband/core/cma.c id = __rdma_create_id(listen_id->route.addr.dev_addr.net, route 2026 drivers/infiniband/core/cma.c if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, route 2027 drivers/infiniband/core/cma.c (struct sockaddr *)&id->route.addr.dst_addr, route 2031 drivers/infiniband/core/cma.c rt = &id->route; route 2074 drivers/infiniband/core/cma.c const sa_family_t ss_family = listen_id->route.addr.src_addr.ss_family; route 2075 drivers/infiniband/core/cma.c struct net *net = listen_id->route.addr.dev_addr.net; route 2086 drivers/infiniband/core/cma.c if (cma_save_net_info((struct sockaddr *)&id->route.addr.src_addr, route 2087 drivers/infiniband/core/cma.c (struct sockaddr *)&id->route.addr.dst_addr, route 2093 drivers/infiniband/core/cma.c rdma_copy_src_l2_addr(&id->route.addr.dev_addr, net_dev); route 2097 drivers/infiniband/core/cma.c &id->route.addr.dev_addr); route 2241 drivers/infiniband/core/cma.c struct rdma_addr *addr = &cm_id->route.addr; route 2354 drivers/infiniband/core/cma.c new_cm_id = __rdma_create_id(listen_id->id.route.addr.dev_addr.net, route 2367 drivers/infiniband/core/cma.c ret = rdma_translate_ip(laddr, &conn_id->id.route.addr.dev_addr); route 2473 drivers/infiniband/core/cma.c struct net *net = id_priv->id.route.addr.dev_addr.net; route 2557 drivers/infiniband/core/cma.c struct rdma_route *route; route 2559 drivers/infiniband/core/cma.c route = &work->id->id.route; route 2562 drivers/infiniband/core/cma.c route->num_paths = 1; route 2563 drivers/infiniband/core/cma.c *route->path_rec = *path_rec; route 2579 drivers/infiniband/core/cma.c struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; route 2698 drivers/infiniband/core/cma.c struct rdma_route *route = &id_priv->id.route; route 2708 drivers/infiniband/core/cma.c route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); route 2709 drivers/infiniband/core/cma.c if (!route->path_rec) { route 2720 drivers/infiniband/core/cma.c kfree(route->path_rec); route 2721 drivers/infiniband/core/cma.c route->path_rec = NULL; route 2749 drivers/infiniband/core/cma.c struct rdma_route *route = &id_priv->id.route; route 2751 drivers/infiniband/core/cma.c struct rdma_addr *addr = &route->addr; route 2771 drivers/infiniband/core/cma.c route->path_rec->rec_type = sa_conv_gid_to_pathrec_type(gid_type); route 2773 drivers/infiniband/core/cma.c route->path_rec->roce.route_resolved = true; route 2774 drivers/infiniband/core/cma.c sa_path_set_dmac(route->path_rec, addr->dev_addr.dst_dev_addr); route 2790 drivers/infiniband/core/cma.c id->route.path_rec = kmemdup(path_rec, sizeof(*path_rec), route 2792 drivers/infiniband/core/cma.c if (!id->route.path_rec) { route 2806 drivers/infiniband/core/cma.c id->route.num_paths = 1; route 2810 drivers/infiniband/core/cma.c kfree(id->route.path_rec); route 2811 drivers/infiniband/core/cma.c id->route.path_rec = NULL; route 2851 drivers/infiniband/core/cma.c struct rdma_route *route = &id_priv->id.route; route 2852 drivers/infiniband/core/cma.c struct rdma_addr *addr = &route->addr; route 2866 drivers/infiniband/core/cma.c route->path_rec = kzalloc(sizeof *route->path_rec, GFP_KERNEL); route 2867 drivers/infiniband/core/cma.c if (!route->path_rec) { route 2872 drivers/infiniband/core/cma.c route->num_paths = 1; route 2880 drivers/infiniband/core/cma.c rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, route 2881 drivers/infiniband/core/cma.c &route->path_rec->sgid); route 2882 drivers/infiniband/core/cma.c rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.dst_addr, route 2883 drivers/infiniband/core/cma.c &route->path_rec->dgid); route 2885 drivers/infiniband/core/cma.c if (((struct sockaddr *)&id_priv->id.route.addr.dst_addr)->sa_family != AF_IB) route 2887 drivers/infiniband/core/cma.c route->path_rec->hop_limit = addr->dev_addr.hoplimit; route 2889 drivers/infiniband/core/cma.c route->path_rec->hop_limit = 1; route 2890 drivers/infiniband/core/cma.c route->path_rec->reversible = 1; route 2891 drivers/infiniband/core/cma.c route->path_rec->pkey = cpu_to_be16(0xffff); route 2892 drivers/infiniband/core/cma.c route->path_rec->mtu_selector = IB_SA_EQ; route 2893 drivers/infiniband/core/cma.c route->path_rec->sl = iboe_tos_to_sl(ndev, tos); route 2894 drivers/infiniband/core/cma.c route->path_rec->traffic_class = tos; route 2895 drivers/infiniband/core/cma.c route->path_rec->mtu = iboe_get_mtu(ndev->mtu); route 2896 drivers/infiniband/core/cma.c route->path_rec->rate_selector = IB_SA_EQ; route 2897 drivers/infiniband/core/cma.c route->path_rec->rate = iboe_get_rate(ndev); route 2899 drivers/infiniband/core/cma.c route->path_rec->packet_life_time_selector = IB_SA_EQ; route 2900 drivers/infiniband/core/cma.c route->path_rec->packet_life_time = CMA_IBOE_PACKET_LIFETIME; route 2901 drivers/infiniband/core/cma.c if (!route->path_rec->mtu) { route 2912 drivers/infiniband/core/cma.c kfree(route->path_rec); route 2913 drivers/infiniband/core/cma.c route->path_rec = NULL; route 2914 drivers/infiniband/core/cma.c route->num_paths = 0; route 3011 drivers/infiniband/core/cma.c id_priv->id.route.addr.dev_addr.dev_type = route 3015 drivers/infiniband/core/cma.c rdma_addr_set_sgid(&id_priv->id.route.addr.dev_addr, &gid); route 3016 drivers/infiniband/core/cma.c ib_addr_set_pkey(&id_priv->id.route.addr.dev_addr, pkey); route 3092 drivers/infiniband/core/cma.c rdma_addr_get_sgid(&id_priv->id.route.addr.dev_addr, &gid); route 3093 drivers/infiniband/core/cma.c rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, &gid); route 3119 drivers/infiniband/core/cma.c rdma_addr_set_dgid(&id_priv->id.route.addr.dev_addr, (union ib_gid *) route 3120 drivers/infiniband/core/cma.c &(((struct sockaddr_ib *) &id_priv->id.route.addr.dst_addr)->sib_addr)); route 3135 drivers/infiniband/core/cma.c src_addr = (struct sockaddr *) &id->route.addr.src_addr; route 3143 drivers/infiniband/core/cma.c id->route.addr.dev_addr.bound_dev_if = dst_addr6->sin6_scope_id; route 3186 drivers/infiniband/core/cma.c &id->route.addr.dev_addr, route 3280 drivers/infiniband/core/cma.c ret = cma_ps_alloc(id_priv->id.route.addr.dev_addr.net, ps, bind_list, route 3339 drivers/infiniband/core/cma.c struct net *net = id_priv->id.route.addr.dev_addr.net; route 3422 drivers/infiniband/core/cma.c bind_list = cma_ps_find(id_priv->id.route.addr.dev_addr.net, ps, snum); route 3543 drivers/infiniband/core/cma.c id->route.addr.src_addr.ss_family = AF_INET; route 3597 drivers/infiniband/core/cma.c ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); route 3603 drivers/infiniband/core/cma.c ret = cma_translate_addr(addr, &id->route.addr.dev_addr); route 3617 drivers/infiniband/core/cma.c struct net *net = id_priv->id.route.addr.dev_addr.net; route 3708 drivers/infiniband/core/cma.c id_priv->id.route.path_rec, route 3780 drivers/infiniband/core/cma.c req.path = id_priv->id.route.path_rec; route 3781 drivers/infiniband/core/cma.c req.sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; route 3800 drivers/infiniband/core/cma.c struct rdma_route *route; route 3831 drivers/infiniband/core/cma.c route = &id_priv->id.route; route 3839 drivers/infiniband/core/cma.c req.primary_path = &route->path_rec[0]; route 3840 drivers/infiniband/core/cma.c if (route->num_paths == 2) route 3841 drivers/infiniband/core/cma.c req.alternate_path = &route->path_rec[1]; route 3843 drivers/infiniband/core/cma.c req.ppath_sgid_attr = id_priv->id.route.addr.dev_addr.sgid_attr; route 4181 drivers/infiniband/core/cma.c &id_priv->id.route.addr.dev_addr; route 4223 drivers/infiniband/core/cma.c struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; route 4253 drivers/infiniband/core/cma.c struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; route 4349 drivers/infiniband/core/cma.c struct rdma_dev_addr *dev_addr = &id_priv->id.route.addr.dev_addr; route 4407 drivers/infiniband/core/cma.c rdma_ip2gid((struct sockaddr *)&id_priv->id.route.addr.src_addr, route 4510 drivers/infiniband/core/cma.c dev_addr = &id_priv->id.route.addr.dev_addr; route 847 drivers/infiniband/core/mad.c ? opa_smp->route.dr.dr_dlid : opa_smp->route.dr.dr_slid) == route 856 drivers/infiniband/core/mad.c opa_drslid = be32_to_cpu(opa_smp->route.dr.dr_slid); route 515 drivers/infiniband/core/nldev.c if (cm_id->route.addr.src_addr.ss_family && route 517 drivers/infiniband/core/nldev.c sizeof(cm_id->route.addr.src_addr), route 518 drivers/infiniband/core/nldev.c &cm_id->route.addr.src_addr)) route 520 drivers/infiniband/core/nldev.c if (cm_id->route.addr.dst_addr.ss_family && route 522 drivers/infiniband/core/nldev.c sizeof(cm_id->route.addr.dst_addr), route 523 drivers/infiniband/core/nldev.c &cm_id->route.addr.dst_addr)) route 146 drivers/infiniband/core/smi.c smp->route.dr.initial_path, route 147 drivers/infiniband/core/smi.c smp->route.dr.return_path, route 149 drivers/infiniband/core/smi.c smp->route.dr.dr_dlid == route 151 drivers/infiniband/core/smi.c smp->route.dr.dr_slid == route 261 drivers/infiniband/core/smi.c smp->route.dr.initial_path, route 262 drivers/infiniband/core/smi.c smp->route.dr.return_path, route 264 drivers/infiniband/core/smi.c smp->route.dr.dr_dlid == route 266 drivers/infiniband/core/smi.c smp->route.dr.dr_slid == route 314 drivers/infiniband/core/smi.c smp->route.dr.dr_dlid == route 316 drivers/infiniband/core/smi.c smp->route.dr.dr_slid == route 336 drivers/infiniband/core/smi.c return !opa_get_smp_direction(smp) ? smp->route.dr.initial_path[smp->hop_ptr+1] : route 337 drivers/infiniband/core/smi.c smp->route.dr.return_path[smp->hop_ptr-1]; route 775 drivers/infiniband/core/ucma.c struct rdma_route *route) route 779 drivers/infiniband/core/ucma.c resp->num_paths = route->num_paths; route 780 drivers/infiniband/core/ucma.c switch (route->num_paths) { route 782 drivers/infiniband/core/ucma.c dev_addr = &route->addr.dev_addr; route 791 drivers/infiniband/core/ucma.c &route->path_rec[1]); route 795 drivers/infiniband/core/ucma.c &route->path_rec[0]); route 803 drivers/infiniband/core/ucma.c struct rdma_route *route) route 806 drivers/infiniband/core/ucma.c resp->num_paths = route->num_paths; route 807 drivers/infiniband/core/ucma.c switch (route->num_paths) { route 809 drivers/infiniband/core/ucma.c rdma_ip2gid((struct sockaddr *)&route->addr.dst_addr, route 811 drivers/infiniband/core/ucma.c rdma_ip2gid((struct sockaddr *)&route->addr.src_addr, route 817 drivers/infiniband/core/ucma.c &route->path_rec[1]); route 821 drivers/infiniband/core/ucma.c &route->path_rec[0]); route 829 drivers/infiniband/core/ucma.c struct rdma_route *route) route 833 drivers/infiniband/core/ucma.c dev_addr = &route->addr.dev_addr; route 860 drivers/infiniband/core/ucma.c addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; route 864 drivers/infiniband/core/ucma.c addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; route 875 drivers/infiniband/core/ucma.c ucma_copy_ib_route(&resp, &ctx->cm_id->route); route 877 drivers/infiniband/core/ucma.c ucma_copy_iboe_route(&resp, &ctx->cm_id->route); route 879 drivers/infiniband/core/ucma.c ucma_copy_iw_route(&resp, &ctx->cm_id->route); route 900 drivers/infiniband/core/ucma.c ib_addr_get_pkey(&cm_id->route.addr.dev_addr)); route 915 drivers/infiniband/core/ucma.c addr = (struct sockaddr *) &ctx->cm_id->route.addr.src_addr; route 919 drivers/infiniband/core/ucma.c addr = (struct sockaddr *) &ctx->cm_id->route.addr.dst_addr; route 944 drivers/infiniband/core/ucma.c resp->num_paths = ctx->cm_id->route.num_paths; route 948 drivers/infiniband/core/ucma.c struct sa_path_rec *rec = &ctx->cm_id->route.path_rec[i]; route 986 drivers/infiniband/core/ucma.c if (ctx->cm_id->route.addr.src_addr.ss_family == AF_IB) { route 987 drivers/infiniband/core/ucma.c memcpy(addr, &ctx->cm_id->route.addr.src_addr, resp.src_size); route 994 drivers/infiniband/core/ucma.c &ctx->cm_id->route.addr.src_addr); route 999 drivers/infiniband/core/ucma.c if (ctx->cm_id->route.addr.dst_addr.ss_family == AF_IB) { route 1000 drivers/infiniband/core/ucma.c memcpy(addr, &ctx->cm_id->route.addr.dst_addr, resp.dst_size); route 1007 drivers/infiniband/core/ucma.c &ctx->cm_id->route.addr.dst_addr); route 1067 drivers/infiniband/core/ucma.c dst->qkey = (id->route.addr.src_addr.ss_family == AF_IB) ? src->qkey : 0; route 371 drivers/infiniband/hw/hfi1/mad.c memcpy(smp->route.lid.data, &trap->data, trap->len); route 4404 drivers/infiniband/hw/hfi1/mad.c smp->route.dr.dr_slid == OPA_LID_PERMISSIVE && route 4405 drivers/infiniband/hw/hfi1/mad.c smp->route.dr.dr_dlid == OPA_LID_PERMISSIVE); route 4536 drivers/infiniband/hw/hfi1/mad.c smp->route.dr.dr_slid, smp->route.dr.return_path, route 4555 drivers/infiniband/hw/hfi1/mad.c smp->mkey, smp->route.dr.dr_slid, route 4556 drivers/infiniband/hw/hfi1/mad.c smp->route.dr.return_path, route 777 drivers/infiniband/ulp/iser/iscsi_iser.c &iser_conn->ib_conn.cma_id->route.addr.dst_addr, route 2450 drivers/infiniband/ulp/isert/ib_isert.c struct rdma_route *cm_route = &cm_id->route; route 2554 drivers/infiniband/ulp/srpt/ib_srpt.c &cm_id->route.addr.src_addr); route 2557 drivers/infiniband/ulp/srpt/ib_srpt.c cm_id->route.path_rec->pkey, &req, src_addr); route 1738 drivers/media/platform/am437x/am437x-vpfe.c struct vpfe_route *route; route 1760 drivers/media/platform/am437x/am437x-vpfe.c route = &sdinfo->routes[inp_index]; route 1761 drivers/media/platform/am437x/am437x-vpfe.c if (route && sdinfo->can_route) { route 1762 drivers/media/platform/am437x/am437x-vpfe.c input = route->input; route 1763 drivers/media/platform/am437x/am437x-vpfe.c output = route->output; route 1023 drivers/media/platform/davinci/vpfe_capture.c struct vpfe_route *route; route 1053 drivers/media/platform/davinci/vpfe_capture.c route = &sdinfo->routes[inp_index]; route 1054 drivers/media/platform/davinci/vpfe_capture.c if (route && sdinfo->can_route) { route 1055 drivers/media/platform/davinci/vpfe_capture.c input = route->input; route 1056 drivers/media/platform/davinci/vpfe_capture.c output = route->output; route 71 drivers/media/platform/rcar-vin/rcar-core.c const struct rvin_group_route *route; route 74 drivers/media/platform/rcar-vin/rcar-core.c for (route = vin->info->routes; route->mask; route++) { route 75 drivers/media/platform/rcar-vin/rcar-core.c if (route->vin == vin->id && route 76 drivers/media/platform/rcar-vin/rcar-core.c route->csi == csi_id && route 77 drivers/media/platform/rcar-vin/rcar-core.c route->channel == channel) { route 80 drivers/media/platform/rcar-vin/rcar-core.c route->vin, route->csi, route->channel); route 81 drivers/media/platform/rcar-vin/rcar-core.c mask |= route->mask; route 681 drivers/media/platform/rcar-vin/rcar-core.c const struct rvin_group_route *route; route 703 drivers/media/platform/rcar-vin/rcar-core.c for (route = vin->info->routes; route->mask; route++) { route 709 drivers/media/platform/rcar-vin/rcar-core.c if (!vin->group->vin[route->vin]) route 713 drivers/media/platform/rcar-vin/rcar-core.c if (!vin->group->vin[rvin_group_id_to_master(route->vin)]) route 717 drivers/media/platform/rcar-vin/rcar-core.c if (!vin->group->csi[route->csi].subdev) route 720 drivers/media/platform/rcar-vin/rcar-core.c source = &vin->group->csi[route->csi].subdev->entity; route 721 drivers/media/platform/rcar-vin/rcar-core.c source_idx = rvin_group_csi_channel_to_pad(route->channel); route 724 drivers/media/platform/rcar-vin/rcar-core.c sink = &vin->group->vin[route->vin]->vdev.entity; route 555 drivers/media/platform/vsp1/vsp1_drm.c vsp1_dl_body_write(dlb, entity->route->reg, route 28 drivers/media/platform/vsp1/vsp1_entity.c u32 route; route 39 drivers/media/platform/vsp1/vsp1_entity.c | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT); route 52 drivers/media/platform/vsp1/vsp1_entity.c | (source->route->output << VI6_DPR_SMPPT_PT_SHIFT); route 59 drivers/media/platform/vsp1/vsp1_entity.c if (source->route->reg == 0) route 62 drivers/media/platform/vsp1/vsp1_entity.c route = source->sink->route->inputs[source->sink_pad]; route 68 drivers/media/platform/vsp1/vsp1_entity.c route |= VI6_DPR_ROUTE_BRSSEL; route 69 drivers/media/platform/vsp1/vsp1_entity.c vsp1_dl_body_write(dlb, source->route->reg, route); route 446 drivers/media/platform/vsp1/vsp1_entity.c if (!source->route) route 620 drivers/media/platform/vsp1/vsp1_entity.c entity->route = &vsp1_routes[i]; route 103 drivers/media/platform/vsp1/vsp1_entity.h const struct vsp1_route *route; route 343 drivers/media/platform/vsp1/vsp1_pipe.c if (entity->route && entity->route->reg) route 344 drivers/media/platform/vsp1/vsp1_pipe.c vsp1_write(vsp1, entity->route->reg, route 100 drivers/media/usb/stk1160/stk1160-core.c int route; route 106 drivers/media/usb/stk1160/stk1160-core.c route = SAA7115_SVIDEO3; route 108 drivers/media/usb/stk1160/stk1160-core.c route = SAA7115_COMPOSITE0; route 112 drivers/media/usb/stk1160/stk1160-core.c route, 0, 0); route 318 drivers/misc/vmw_vmci/vmci_datagram.c enum vmci_route route; route 329 drivers/misc/vmw_vmci/vmci_datagram.c retval = vmci_route(&dg->src, &dg->dst, from_guest, &route); route 336 drivers/misc/vmw_vmci/vmci_datagram.c if (VMCI_ROUTE_AS_HOST == route) { route 342 drivers/misc/vmw_vmci/vmci_datagram.c if (VMCI_ROUTE_AS_GUEST == route) route 345 drivers/misc/vmw_vmci/vmci_datagram.c pr_warn("Unknown route (%d) for datagram\n", route); route 582 drivers/misc/vmw_vmci/vmci_doorbell.c enum vmci_route route; route 590 drivers/misc/vmw_vmci/vmci_doorbell.c retval = vmci_route(&src, &dst, false, &route); route 594 drivers/misc/vmw_vmci/vmci_doorbell.c if (VMCI_ROUTE_AS_HOST == route) route 598 drivers/misc/vmw_vmci/vmci_doorbell.c if (VMCI_ROUTE_AS_GUEST == route) route 601 drivers/misc/vmw_vmci/vmci_doorbell.c pr_warn("Unknown route (%d) for doorbell\n", route); route 2665 drivers/misc/vmw_vmci/vmci_queue_pair.c enum vmci_route route; route 2690 drivers/misc/vmw_vmci/vmci_queue_pair.c retval = vmci_route(&src, &dst, false, &route); route 2692 drivers/misc/vmw_vmci/vmci_queue_pair.c route = vmci_guest_code_active() ? route 2713 drivers/misc/vmw_vmci/vmci_queue_pair.c if (VMCI_ROUTE_AS_HOST == route) { route 23 drivers/misc/vmw_vmci/vmci_route.c enum vmci_route *route) route 28 drivers/misc/vmw_vmci/vmci_route.c *route = VMCI_ROUTE_NONE; route 80 drivers/misc/vmw_vmci/vmci_route.c *route = VMCI_ROUTE_AS_GUEST; route 104 drivers/misc/vmw_vmci/vmci_route.c *route = VMCI_ROUTE_AS_HOST; route 117 drivers/misc/vmw_vmci/vmci_route.c *route = VMCI_ROUTE_AS_GUEST; route 143 drivers/misc/vmw_vmci/vmci_route.c *route = VMCI_ROUTE_AS_HOST; route 180 drivers/misc/vmw_vmci/vmci_route.c *route = VMCI_ROUTE_AS_HOST; route 216 drivers/misc/vmw_vmci/vmci_route.c *route = VMCI_ROUTE_AS_GUEST; route 20 drivers/misc/vmw_vmci/vmci_route.h bool from_guest, enum vmci_route *route); route 91 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c struct mlxsw_sp1_mr_tcam_route *route, route 101 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c &route->parman_item); route 105 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c route->parman_prio = &tcam_region->parman_prios[prio]; route 111 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c struct mlxsw_sp1_mr_tcam_route *route, route 118 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c route->parman_prio, &route->parman_item); route 128 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c struct mlxsw_sp1_mr_tcam_route *route = route_priv; route 132 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c err = mlxsw_sp1_mr_tcam_route_parman_item_add(mr_tcam, route, route 137 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c err = mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, route 144 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key); route 153 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c struct mlxsw_sp1_mr_tcam_route *route = route_priv; route 156 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c mlxsw_sp1_mr_tcam_route_remove(mlxsw_sp, &route->parman_item, key); route 157 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c mlxsw_sp1_mr_tcam_route_parman_item_remove(mr_tcam, route, key); route 166 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c struct mlxsw_sp1_mr_tcam_route *route = route_priv; route 168 drivers/net/ethernet/mellanox/mlxsw/spectrum1_mr_tcam.c return mlxsw_sp1_mr_tcam_route_replace(mlxsw_sp, &route->parman_item, route 284 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c struct mlxsw_sp_mr_tcam_route *route = route_priv; route 288 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->key = route_params->key; route 289 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->irif_index = route_params->value.irif_index; route 290 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->min_mtu = route_params->value.min_mtu; route 291 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->action = route_params->value.route_action; route 294 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_erif_list_init(&route->erif_list); route 295 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c err = mlxsw_sp_mr_tcam_erif_populate(mlxsw_sp, &route->erif_list, route 301 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c err = mlxsw_sp_flow_counter_alloc(mlxsw_sp, &route->counter_index); route 306 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route 307 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->action, route 308 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->irif_index, route 309 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->counter_index, route 310 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->min_mtu, route 311 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c &route->erif_list); route 312 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c if (IS_ERR(route->afa_block)) { route 313 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c err = PTR_ERR(route->afa_block); route 317 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->priv = kzalloc(ops->route_priv_size, GFP_KERNEL); route 318 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c if (!route->priv) { route 324 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c err = ops->route_create(mlxsw_sp, mr_tcam->priv, route->priv, route 325 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c &route->key, route->afa_block, route 332 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c kfree(route->priv); route 334 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); route 336 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); route 339 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); route 347 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c struct mlxsw_sp_mr_tcam_route *route = route_priv; route 350 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c ops->route_destroy(mlxsw_sp, mr_tcam->priv, route->priv, &route->key); route 351 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c kfree(route->priv); route 352 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); route 353 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_flow_counter_free(mlxsw_sp, route->counter_index); route 354 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); route 361 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c struct mlxsw_sp_mr_tcam_route *route = route_priv; route 363 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c return mlxsw_sp_flow_counter_get(mlxsw_sp, route->counter_index, route 373 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c struct mlxsw_sp_mr_tcam_route *route = route_priv; route 379 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->irif_index, route 380 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->counter_index, route 381 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->min_mtu, route 382 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c &route->erif_list); route 387 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); route 392 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); route 393 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->afa_block = afa_block; route 394 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->action = route_action; route 405 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c struct mlxsw_sp_mr_tcam_route *route = route_priv; route 411 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->action, route 412 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->irif_index, route 413 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->counter_index, route 415 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c &route->erif_list); route 420 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); route 425 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); route 426 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->afa_block = afa_block; route 427 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->min_mtu = min_mtu; route 437 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c struct mlxsw_sp_mr_tcam_route *route = route_priv; route 439 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP) route 441 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->irif_index = irif_index; route 448 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c struct mlxsw_sp_mr_tcam_route *route = route_priv; route 451 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c err = mlxsw_sp_mr_erif_list_add(mlxsw_sp, &route->erif_list, route 457 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c if (route->action != MLXSW_SP_MR_ROUTE_ACTION_TRAP) route 459 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c &route->erif_list); route 467 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c struct mlxsw_sp_mr_tcam_route *route = route_priv; route 476 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c list_for_each_entry(erif_sublist, &route->erif_list.erif_sublists, list) { route 490 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c afa_block = mlxsw_sp_mr_tcam_afa_block_create(mlxsw_sp, route->action, route 491 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->irif_index, route 492 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->counter_index, route 493 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->min_mtu, route 501 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); route 505 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); route 506 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); route 507 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->afa_block = afa_block; route 508 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list); route 524 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c struct mlxsw_sp_mr_tcam_route *route = route_priv; route 539 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->counter_index, route 548 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c err = ops->route_update(mlxsw_sp, route->priv, &route->key, afa_block); route 552 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_tcam_afa_block_destroy(route->afa_block); route 553 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_erif_list_flush(mlxsw_sp, &route->erif_list); route 554 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->afa_block = afa_block; route 555 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c mlxsw_sp_mr_erif_list_move(&route->erif_list, &erif_list); route 556 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->action = route_info->route_action; route 557 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->irif_index = route_info->irif_index; route 558 drivers/net/ethernet/mellanox/mlxsw/spectrum_mr_tcam.c route->min_mtu = route_info->min_mtu; route 45 drivers/net/thunderbolt.c #define TBNET_L0_PORT_NUM(route) ((route) & GENMASK(5, 0)) route 211 drivers/net/thunderbolt.c static void tbnet_fill_header(struct thunderbolt_ip_header *hdr, u64 route, route 221 drivers/net/thunderbolt.c hdr->route_hi = upper_32_bits(route); route 222 drivers/net/thunderbolt.c hdr->route_lo = lower_32_bits(route); route 231 drivers/net/thunderbolt.c static int tbnet_login_response(struct tbnet *net, u64 route, u8 sequence, route 238 drivers/net/thunderbolt.c tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, route 255 drivers/net/thunderbolt.c tbnet_fill_header(&request.hdr, xd->route, sequence, xd->local_uuid, route 268 drivers/net/thunderbolt.c static int tbnet_logout_response(struct tbnet *net, u64 route, u8 sequence, route 275 drivers/net/thunderbolt.c tbnet_fill_header(&reply.hdr, route, sequence, xd->local_uuid, route 289 drivers/net/thunderbolt.c tbnet_fill_header(&request.hdr, xd->route, 0, xd->local_uuid, route 398 drivers/net/thunderbolt.c u64 route; route 408 drivers/net/thunderbolt.c route = ((u64)pkg->hdr.route_hi << 32) | pkg->hdr.route_lo; route 409 drivers/net/thunderbolt.c route &= ~BIT_ULL(63); route 410 drivers/net/thunderbolt.c if (route != net->xd->route) route 422 drivers/net/thunderbolt.c ret = tbnet_login_response(net, route, sequence, route 446 drivers/net/thunderbolt.c ret = tbnet_logout_response(net, route, sequence, command_id); route 1180 drivers/net/thunderbolt.c phy_port = tb_phy_port_from_link(TBNET_L0_PORT_NUM(xd->route)); route 1593 drivers/nvme/target/rdma.c if (inet_addr_is_any((struct sockaddr *)&cm_id->route.addr.src_addr)) { route 1597 drivers/nvme/target/rdma.c struct sockaddr *addr = (void *)&req_cm_id->route.addr.src_addr; route 186 drivers/pci/controller/pcie-cadence.h #define CDNS_PCIE_NORMAL_MSG_ROUTING(route) \ route 187 drivers/pci/controller/pcie-cadence.h (((route) << 5) & CDNS_PCIE_NORMAL_MSG_ROUTING_MASK) route 216 drivers/pci/controller/pcie-rockchip.h #define ROCKCHIP_PCIE_MSG_ROUTING(route) \ route 217 drivers/pci/controller/pcie-rockchip.h (((route) << 5) & ROCKCHIP_PCIE_MSG_ROUTING_MASK) route 72 drivers/s390/char/sclp_diag.h u16 route; route 57 drivers/s390/char/sclp_ftp.c diag->route != SCLP_DIAG_FTP_ROUTE || route 105 drivers/s390/char/sclp_ftp.c sccb->evbuf.route = SCLP_DIAG_FTP_ROUTE; route 31 drivers/s390/net/qeth_l3_sys.c struct qeth_routing_info *route, char *buf) route 33 drivers/s390/net/qeth_l3_sys.c switch (route->type) { route 70 drivers/s390/net/qeth_l3_sys.c struct qeth_routing_info *route, enum qeth_prot_versions prot, route 73 drivers/s390/net/qeth_l3_sys.c enum qeth_routing_types old_route_type = route->type; route 78 drivers/s390/net/qeth_l3_sys.c route->type = NO_ROUTER; route 80 drivers/s390/net/qeth_l3_sys.c route->type = PRIMARY_CONNECTOR; route 82 drivers/s390/net/qeth_l3_sys.c route->type = SECONDARY_CONNECTOR; route 84 drivers/s390/net/qeth_l3_sys.c route->type = PRIMARY_ROUTER; route 86 drivers/s390/net/qeth_l3_sys.c route->type = SECONDARY_ROUTER; route 88 drivers/s390/net/qeth_l3_sys.c route->type = MULTICAST_ROUTER; route 94 drivers/s390/net/qeth_l3_sys.c (old_route_type != route->type)) { route 102 drivers/s390/net/qeth_l3_sys.c route->type = old_route_type; route 342 drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c int idx, enum snd_bcm2835_route route, route 356 drivers/staging/vc04_services/bcm2835-audio/bcm2835-pcm.c chip->dest = route; route 57 drivers/staging/vc04_services/bcm2835-audio/bcm2835.c enum snd_bcm2835_route route, route 69 drivers/staging/vc04_services/bcm2835-audio/bcm2835.c enum snd_bcm2835_route route; route 74 drivers/staging/vc04_services/bcm2835-audio/bcm2835.c enum snd_bcm2835_route route, route 93 drivers/staging/vc04_services/bcm2835-audio/bcm2835.c enum snd_bcm2835_route route, route 96 drivers/staging/vc04_services/bcm2835-audio/bcm2835.c return snd_bcm2835_new_pcm(chip, name, 0, route, numchannels, false); route 121 drivers/staging/vc04_services/bcm2835-audio/bcm2835.c .route = AUDIO_DEST_HDMI route 134 drivers/staging/vc04_services/bcm2835-audio/bcm2835.c .route = AUDIO_DEST_HEADPHONES route 193 drivers/staging/vc04_services/bcm2835-audio/bcm2835.c audio_driver->route, route 88 drivers/staging/vc04_services/bcm2835-audio/bcm2835.h int idx, enum snd_bcm2835_route route, route 170 drivers/thunderbolt/ctl.c enum tb_cfg_pkg_type type, u64 route) route 190 drivers/thunderbolt/ctl.c if (WARN(route != tb_cfg_get_route(header), route 192 drivers/thunderbolt/ctl.c route, tb_cfg_get_route(header))) route 241 drivers/thunderbolt/ctl.c enum tb_cfg_pkg_type type, u64 route) route 251 drivers/thunderbolt/ctl.c res.err = check_header(pkg, len, type, route); route 715 drivers/thunderbolt/ctl.c int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port, route 719 drivers/thunderbolt/ctl.c .header = tb_cfg_make_header(route), route 723 drivers/thunderbolt/ctl.c tb_ctl_dbg(ctl, "resetting error on %llx:%x.\n", route, port); route 730 drivers/thunderbolt/ctl.c u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); route 737 drivers/thunderbolt/ctl.c if (route != tb_cfg_get_route(req->request)) route 777 drivers/thunderbolt/ctl.c struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, route 780 drivers/thunderbolt/ctl.c struct cfg_reset_pkg request = { .header = tb_cfg_make_header(route) }; route 813 drivers/thunderbolt/ctl.c u64 route, u32 port, enum tb_cfg_space space, route 818 drivers/thunderbolt/ctl.c .header = tb_cfg_make_header(route), route 876 drivers/thunderbolt/ctl.c u64 route, u32 port, enum tb_cfg_space space, route 881 drivers/thunderbolt/ctl.c .header = tb_cfg_make_header(route), route 950 drivers/thunderbolt/ctl.c int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, route 953 drivers/thunderbolt/ctl.c struct tb_cfg_result res = tb_cfg_read_raw(ctl, buffer, route, port, route 976 drivers/thunderbolt/ctl.c int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, route 979 drivers/thunderbolt/ctl.c struct tb_cfg_result res = tb_cfg_write_raw(ctl, buffer, route, port, route 1011 drivers/thunderbolt/ctl.c int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route) route 1014 drivers/thunderbolt/ctl.c struct tb_cfg_result res = tb_cfg_read_raw(ctl, &dummy, route, 0, route 115 drivers/thunderbolt/ctl.h static inline struct tb_cfg_header tb_cfg_make_header(u64 route) route 118 drivers/thunderbolt/ctl.h .route_hi = route >> 32, route 119 drivers/thunderbolt/ctl.h .route_lo = route, route 122 drivers/thunderbolt/ctl.h WARN_ON(tb_cfg_get_route(&header) != route); route 126 drivers/thunderbolt/ctl.h int tb_cfg_error(struct tb_ctl *ctl, u64 route, u32 port, route 128 drivers/thunderbolt/ctl.h struct tb_cfg_result tb_cfg_reset(struct tb_ctl *ctl, u64 route, route 131 drivers/thunderbolt/ctl.h u64 route, u32 port, route 135 drivers/thunderbolt/ctl.h u64 route, u32 port, route 138 drivers/thunderbolt/ctl.h int tb_cfg_read(struct tb_ctl *ctl, void *buffer, u64 route, u32 port, route 140 drivers/thunderbolt/ctl.h int tb_cfg_write(struct tb_ctl *ctl, const void *buffer, u64 route, u32 port, route 142 drivers/thunderbolt/ctl.h int tb_cfg_get_upstream_port(struct tb_ctl *ctl, u64 route); route 68 drivers/thunderbolt/dma_port.c u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63); route 74 drivers/thunderbolt/dma_port.c if (route != tb_cfg_get_route(req->request)) route 88 drivers/thunderbolt/dma_port.c static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route, route 92 drivers/thunderbolt/dma_port.c .header = tb_cfg_make_header(route), route 129 drivers/thunderbolt/dma_port.c static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route, route 133 drivers/thunderbolt/dma_port.c .header = tb_cfg_make_header(route), route 86 drivers/thunderbolt/icm.c int (*get_route)(struct tb *tb, u8 link, u8 depth, u64 *route); route 155 drivers/thunderbolt/icm.c static inline u8 phy_port_from_route(u64 route, u8 depth) route 159 drivers/thunderbolt/icm.c link = depth ? route >> ((depth - 1) * 8) : route; route 173 drivers/thunderbolt/icm.c static inline u64 get_parent_route(u64 route) route 175 drivers/thunderbolt/icm.c int depth = tb_route_length(route); route 176 drivers/thunderbolt/icm.c return depth ? route & ~(0xffULL << (depth - 1) * TB_ROUTE_SHIFT) : 0; route 358 drivers/thunderbolt/icm.c static int icm_fr_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) route 402 drivers/thunderbolt/icm.c *route = get_route(sw->route_hi, sw->route_lo); route 565 drivers/thunderbolt/icm.c static struct tb_switch *add_switch(struct tb_switch *parent_sw, u64 route, route 578 drivers/thunderbolt/icm.c sw = tb_switch_alloc(parent_sw->tb, &parent_sw->dev, route); route 602 drivers/thunderbolt/icm.c tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); route 603 drivers/thunderbolt/icm.c tb_upstream_port(sw)->remote = tb_port_at(route, parent_sw); route 620 drivers/thunderbolt/icm.c u64 route, u8 connection_id, u8 connection_key, route 626 drivers/thunderbolt/icm.c tb_port_at(route, parent_sw)->remote = tb_upstream_port(sw); route 629 drivers/thunderbolt/icm.c sw->config.route_hi = upper_32_bits(route); route 630 drivers/thunderbolt/icm.c sw->config.route_lo = lower_32_bits(route); route 653 drivers/thunderbolt/icm.c static void add_xdomain(struct tb_switch *sw, u64 route, route 661 drivers/thunderbolt/icm.c xd = tb_xdomain_alloc(sw->tb, &sw->dev, route, local_uuid, remote_uuid); route 668 drivers/thunderbolt/icm.c tb_port_at(route, sw)->xdomain = xd; route 677 drivers/thunderbolt/icm.c static void update_xdomain(struct tb_xdomain *xd, u64 route, u8 link) route 680 drivers/thunderbolt/icm.c xd->route = route; route 689 drivers/thunderbolt/icm.c tb_port_at(xd->route, sw)->xdomain = NULL; route 705 drivers/thunderbolt/icm.c u64 route; route 747 drivers/thunderbolt/icm.c ret = icm->get_route(tb, link, depth, &route); route 755 drivers/thunderbolt/icm.c route = tb_route(sw); route 758 drivers/thunderbolt/icm.c update_switch(parent_sw, sw, route, pkg->connection_id, route 806 drivers/thunderbolt/icm.c ret = icm->get_route(tb, link, depth, &route); route 814 drivers/thunderbolt/icm.c add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, route 858 drivers/thunderbolt/icm.c u64 route; route 869 drivers/thunderbolt/icm.c route = get_route(pkg->local_route_hi, pkg->local_route_lo); route 875 drivers/thunderbolt/icm.c xd_phy_port = phy_port_from_route(xd->route, xd->depth); route 876 drivers/thunderbolt/icm.c phy_port = phy_port_from_route(route, depth); route 879 drivers/thunderbolt/icm.c update_xdomain(xd, route, link); route 918 drivers/thunderbolt/icm.c sw = tb_switch_find_by_route(tb, route); route 931 drivers/thunderbolt/icm.c add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, link, route 1081 drivers/thunderbolt/icm.c request.route_hi = upper_32_bits(xd->route); route 1082 drivers/thunderbolt/icm.c request.route_lo = lower_32_bits(xd->route); route 1111 drivers/thunderbolt/icm.c request.route_hi = upper_32_bits(xd->route); route 1112 drivers/thunderbolt/icm.c request.route_lo = lower_32_bits(xd->route); route 1149 drivers/thunderbolt/icm.c u64 route; route 1161 drivers/thunderbolt/icm.c route = get_route(pkg->route_hi, pkg->route_lo); route 1169 drivers/thunderbolt/icm.c route); route 1176 drivers/thunderbolt/icm.c if (tb_route(sw) == route && !!sw->authorized == authorized) { route 1178 drivers/thunderbolt/icm.c update_switch(parent_sw, sw, route, pkg->connection_id, route 1189 drivers/thunderbolt/icm.c sw = tb_switch_find_by_route(tb, route); route 1196 drivers/thunderbolt/icm.c xd = tb_xdomain_find_by_route(tb, route); route 1202 drivers/thunderbolt/icm.c parent_sw = tb_switch_find_by_route(tb, get_parent_route(route)); route 1204 drivers/thunderbolt/icm.c tb_err(tb, "failed to find parent switch for %llx\n", route); route 1208 drivers/thunderbolt/icm.c sw = add_switch(parent_sw, route, &pkg->ep_uuid, (const u8 *)pkg->ep_name, route 1229 drivers/thunderbolt/icm.c u64 route; route 1231 drivers/thunderbolt/icm.c route = get_route(pkg->route_hi, pkg->route_lo); route 1233 drivers/thunderbolt/icm.c sw = tb_switch_find_by_route(tb, route); route 1235 drivers/thunderbolt/icm.c tb_warn(tb, "no switch exists at %llx, ignoring\n", route); route 1250 drivers/thunderbolt/icm.c u64 route; route 1255 drivers/thunderbolt/icm.c route = get_route(pkg->local_route_hi, pkg->local_route_lo); route 1259 drivers/thunderbolt/icm.c if (xd->route == route) { route 1260 drivers/thunderbolt/icm.c update_xdomain(xd, route, 0); route 1270 drivers/thunderbolt/icm.c xd = tb_xdomain_find_by_route(tb, route); route 1281 drivers/thunderbolt/icm.c sw = tb_switch_find_by_route(tb, route); route 1287 drivers/thunderbolt/icm.c sw = tb_switch_find_by_route(tb, get_parent_route(route)); route 1289 drivers/thunderbolt/icm.c tb_warn(tb, "no switch exists at %llx, ignoring\n", route); route 1293 drivers/thunderbolt/icm.c add_xdomain(sw, route, &pkg->local_uuid, &pkg->remote_uuid, 0, 0); route 1303 drivers/thunderbolt/icm.c u64 route; route 1305 drivers/thunderbolt/icm.c route = get_route(pkg->route_hi, pkg->route_lo); route 1307 drivers/thunderbolt/icm.c xd = tb_xdomain_find_by_route(tb, route); route 1430 drivers/thunderbolt/icm.c static int icm_ar_get_route(struct tb *tb, u8 link, u8 depth, u64 *route) route 1448 drivers/thunderbolt/icm.c *route = get_route(reply.route_hi, reply.route_lo); route 984 drivers/thunderbolt/switch.c int tb_switch_reset(struct tb *tb, u64 route) route 988 drivers/thunderbolt/switch.c header.route_hi = route >> 32, route 989 drivers/thunderbolt/switch.c header.route_lo = route, route 992 drivers/thunderbolt/switch.c tb_dbg(tb, "resetting switch at %llx\n", route); route 993 drivers/thunderbolt/switch.c res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, route 997 drivers/thunderbolt/switch.c res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); route 1521 drivers/thunderbolt/switch.c u64 route) route 1528 drivers/thunderbolt/switch.c depth = tb_route_length(route); route 1532 drivers/thunderbolt/switch.c upstream_port = tb_cfg_get_upstream_port(tb->ctl, route); route 1541 drivers/thunderbolt/switch.c ret = tb_cfg_read(tb->ctl, &sw->config, route, 0, TB_CFG_SWITCH, 0, 5); route 1551 drivers/thunderbolt/switch.c sw->config.route_hi = upper_32_bits(route); route 1552 drivers/thunderbolt/switch.c sw->config.route_lo = lower_32_bits(route); route 1583 drivers/thunderbolt/switch.c if (!route) route 1617 drivers/thunderbolt/switch.c tb_switch_alloc_safe_mode(struct tb *tb, struct device *parent, u64 route) route 1626 drivers/thunderbolt/switch.c sw->config.depth = tb_route_length(route); route 1627 drivers/thunderbolt/switch.c sw->config.route_hi = upper_32_bits(route); route 1628 drivers/thunderbolt/switch.c sw->config.route_lo = lower_32_bits(route); route 1654 drivers/thunderbolt/switch.c u64 route; route 1657 drivers/thunderbolt/switch.c route = tb_route(sw); route 1659 drivers/thunderbolt/switch.c route, tb_route_length(route), sw->config.upstream_port_number); route 2028 drivers/thunderbolt/switch.c u64 route; route 2044 drivers/thunderbolt/switch.c if (lookup->route) { route 2045 drivers/thunderbolt/switch.c return sw->config.route_lo == lower_32_bits(lookup->route) && route 2046 drivers/thunderbolt/switch.c sw->config.route_hi == upper_32_bits(lookup->route); route 2114 drivers/thunderbolt/switch.c struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route) route 2119 drivers/thunderbolt/switch.c if (!route) route 2124 drivers/thunderbolt/switch.c lookup.route = route; route 34 drivers/thunderbolt/tb.c u64 route; route 41 drivers/thunderbolt/tb.c static void tb_queue_hotplug(struct tb *tb, u64 route, u8 port, bool unplug) route 50 drivers/thunderbolt/tb.c ev->route = route; route 109 drivers/thunderbolt/tb.c u64 route; route 111 drivers/thunderbolt/tb.c route = tb_downstream_route(port); route 112 drivers/thunderbolt/tb.c xd = tb_xdomain_find_by_route(tb, route); route 118 drivers/thunderbolt/tb.c xd = tb_xdomain_alloc(tb, &sw->dev, route, tb->root_switch->uuid, route 121 drivers/thunderbolt/tb.c tb_port_at(route, sw)->xdomain = xd; route 443 drivers/thunderbolt/tb.c dst_port = tb_port_at(xd->route, sw); route 474 drivers/thunderbolt/tb.c dst_port = tb_port_at(xd->route, sw); route 512 drivers/thunderbolt/tb.c sw = tb_switch_find_by_route(tb, ev->route); route 516 drivers/thunderbolt/tb.c ev->route, ev->port, ev->unplug); route 522 drivers/thunderbolt/tb.c ev->route, ev->port, ev->unplug); route 528 drivers/thunderbolt/tb.c ev->route, ev->port, ev->unplug); route 591 drivers/thunderbolt/tb.c u64 route; route 598 drivers/thunderbolt/tb.c route = tb_cfg_get_route(&pkg->header); route 600 drivers/thunderbolt/tb.c if (tb_cfg_error(tb->ctl, route, pkg->port, route 602 drivers/thunderbolt/tb.c tb_warn(tb, "could not ack plug event on %llx:%x\n", route, route 606 drivers/thunderbolt/tb.c tb_queue_hotplug(tb, route, pkg->port, pkg->unplug); route 335 drivers/thunderbolt/tb.h static inline struct tb_port *tb_port_at(u64 route, struct tb_switch *sw) route 339 drivers/thunderbolt/tb.h port = route >> (sw->config.depth * 8); route 518 drivers/thunderbolt/tb.h u64 route); route 520 drivers/thunderbolt/tb.h struct device *parent, u64 route); route 526 drivers/thunderbolt/tb.h int tb_switch_reset(struct tb *tb, u64 route); route 531 drivers/thunderbolt/tb.h struct tb_switch *tb_switch_find_by_route(struct tb *tb, u64 route); route 638 drivers/thunderbolt/tb.h static inline int tb_route_length(u64 route) route 640 drivers/thunderbolt/tb.h return (fls64(route) + TB_ROUTE_SHIFT - 1) / TB_ROUTE_SHIFT; route 659 drivers/thunderbolt/tb.h u64 route, const uuid_t *local_uuid, route 187 drivers/thunderbolt/xdomain.c static inline void tb_xdp_fill_header(struct tb_xdp_header *hdr, u64 route, route 195 drivers/thunderbolt/xdomain.c hdr->xd_hdr.route_hi = upper_32_bits(route); route 196 drivers/thunderbolt/xdomain.c hdr->xd_hdr.route_lo = lower_32_bits(route); route 226 drivers/thunderbolt/xdomain.c static int tb_xdp_uuid_request(struct tb_ctl *ctl, u64 route, int retry, route 234 drivers/thunderbolt/xdomain.c tb_xdp_fill_header(&req.hdr, route, retry % 4, UUID_REQUEST, route 253 drivers/thunderbolt/xdomain.c static int tb_xdp_uuid_response(struct tb_ctl *ctl, u64 route, u8 sequence, route 259 drivers/thunderbolt/xdomain.c tb_xdp_fill_header(&res.hdr, route, sequence, UUID_RESPONSE, route 263 drivers/thunderbolt/xdomain.c res.src_route_hi = upper_32_bits(route); route 264 drivers/thunderbolt/xdomain.c res.src_route_lo = lower_32_bits(route); route 270 drivers/thunderbolt/xdomain.c static int tb_xdp_error_response(struct tb_ctl *ctl, u64 route, u8 sequence, route 276 drivers/thunderbolt/xdomain.c tb_xdp_fill_header(&res.hdr, route, sequence, ERROR_RESPONSE, route 284 drivers/thunderbolt/xdomain.c static int tb_xdp_properties_request(struct tb_ctl *ctl, u64 route, route 301 drivers/thunderbolt/xdomain.c tb_xdp_fill_header(&req.hdr, route, retry % 4, PROPERTIES_REQUEST, route 377 drivers/thunderbolt/xdomain.c u64 route, u8 sequence, const uuid_t *src_uuid, route 391 drivers/thunderbolt/xdomain.c tb_xdp_error_response(ctl, route, sequence, route 413 drivers/thunderbolt/xdomain.c tb_xdp_fill_header(&res->hdr, route, sequence, PROPERTIES_RESPONSE, route 431 drivers/thunderbolt/xdomain.c static int tb_xdp_properties_changed_request(struct tb_ctl *ctl, u64 route, route 439 drivers/thunderbolt/xdomain.c tb_xdp_fill_header(&req.hdr, route, retry % 4, route 455 drivers/thunderbolt/xdomain.c tb_xdp_properties_changed_response(struct tb_ctl *ctl, u64 route, u8 sequence) route 460 drivers/thunderbolt/xdomain.c tb_xdp_fill_header(&res.hdr, route, sequence, route 514 drivers/thunderbolt/xdomain.c u64 route; route 516 drivers/thunderbolt/xdomain.c route = ((u64)xhdr->route_hi << 32 | xhdr->route_lo) & ~BIT_ULL(63); route 528 drivers/thunderbolt/xdomain.c tb_xdp_error_response(ctl, route, sequence, ERROR_NOT_READY); route 534 drivers/thunderbolt/xdomain.c ret = tb_xdp_properties_response(tb, ctl, route, sequence, uuid, route 543 drivers/thunderbolt/xdomain.c ret = tb_xdp_properties_changed_response(ctl, route, sequence); route 562 drivers/thunderbolt/xdomain.c ret = tb_xdp_uuid_response(ctl, route, sequence, uuid); route 566 drivers/thunderbolt/xdomain.c tb_xdp_error_response(ctl, route, sequence, route 902 drivers/thunderbolt/xdomain.c ret = tb_xdp_uuid_request(tb->ctl, xd->route, xd->uuid_retries, &uuid); route 954 drivers/thunderbolt/xdomain.c ret = tb_xdp_properties_request(tb->ctl, xd->route, xd->local_uuid, route 1043 drivers/thunderbolt/xdomain.c ret = tb_xdp_properties_changed_request(xd->tb->ctl, xd->route, route 1220 drivers/thunderbolt/xdomain.c u64 route, const uuid_t *local_uuid, route 1230 drivers/thunderbolt/xdomain.c xd->route = route; route 1256 drivers/thunderbolt/xdomain.c dev_set_name(&xd->dev, "%u-%llx", tb->index, route); route 1401 drivers/thunderbolt/xdomain.c u64 route; route 1424 drivers/thunderbolt/xdomain.c } else if (lookup->route && route 1425 drivers/thunderbolt/xdomain.c lookup->route == xd->route) { route 1511 drivers/thunderbolt/xdomain.c struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route) route 1517 drivers/thunderbolt/xdomain.c lookup.route = route; route 560 drivers/tty/serial/efm32-uart.c u32 route, clkdiv, frame; route 566 drivers/tty/serial/efm32-uart.c route = efm32_uart_read32(efm_port, UARTn_ROUTE); route 567 drivers/tty/serial/efm32-uart.c if (!(route & UARTn_ROUTE_TXPEN)) route 637 drivers/usb/core/usb.c dev->route = 0; route 649 drivers/usb/core/usb.c dev->route = 0; route 655 drivers/usb/core/usb.c dev->route = parent->route + route 658 drivers/usb/core/usb.c dev->route = parent->route + route 1112 drivers/usb/host/xhci-mem.c slot_ctx->dev_info |= cpu_to_le32(LAST_CTX(1) | udev->route); route 869 drivers/vme/vme.c struct vme_resource *vme_dma_request(struct vme_dev *vdev, u32 route) route 897 drivers/vme/vme.c if (((dma_ctrlr->route_attr & route) == route) && route 49 include/linux/netfilter_ipv6.h int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl, route 107 include/linux/netfilter_ipv6.h return v6ops->route(net, dst, fl, strict); route 220 include/linux/thunderbolt.h u64 route; route 251 include/linux/thunderbolt.h struct tb_xdomain *tb_xdomain_find_by_route(struct tb *tb, u64 route); route 266 include/linux/thunderbolt.h tb_xdomain_find_by_route_locked(struct tb *tb, u64 route) route 271 include/linux/thunderbolt.h xd = tb_xdomain_find_by_route(tb, route); route 634 include/linux/usb.h u32 route; route 90 include/net/netfilter/nf_flow_table.h struct nf_flow_route *route); route 932 include/net/xfrm.h struct dst_entry *route; route 977 include/net/xfrm.h dst_release(xdst->route); route 74 include/rdma/opa_smi.h } route; route 130 include/rdma/opa_smi.h return smp->route.dr.data; route 132 include/rdma/opa_smi.h return smp->route.lid.data; route 138 include/rdma/opa_smi.h return sizeof(smp->route.dr.data); route 140 include/rdma/opa_smi.h return sizeof(smp->route.lid.data); route 146 include/rdma/opa_smi.h return sizeof(*smp) - sizeof(smp->route.dr.data); route 148 include/rdma/opa_smi.h return sizeof(*smp) - sizeof(smp->route.lid.data); route 133 include/rdma/rdma_cm.h struct rdma_route route; route 1538 include/sound/emu10k1.h #define snd_emu10k1_compose_send_routing(route) \ route 1539 include/sound/emu10k1.h ((route[0] | (route[1] << 4) | (route[2] << 8) | (route[3] << 12)) << 16) route 1541 include/sound/emu10k1.h #define snd_emu10k1_compose_audigy_fxrt1(route) \ route 1542 include/sound/emu10k1.h ((unsigned int)route[0] | ((unsigned int)route[1] << 8) | ((unsigned int)route[2] << 16) | ((unsigned int)route[3] << 24)) route 1544 include/sound/emu10k1.h #define snd_emu10k1_compose_audigy_fxrt2(route) \ route 1545 include/sound/emu10k1.h ((unsigned int)route[4] | ((unsigned int)route[5] << 8) | ((unsigned int)route[6] << 16) | ((unsigned int)route[7] << 24)) route 426 include/sound/soc-dapm.h const struct snd_soc_dapm_route *route, int num); route 428 include/sound/soc-dapm.h const struct snd_soc_dapm_route *route, int num); route 430 include/sound/soc-dapm.h const struct snd_soc_dapm_route *route, int num); route 121 include/sound/soc-topology.h struct snd_soc_dapm_route *route); route 317 include/trace/events/ib_mad.h __entry->dr_slid = smp->route.dr.dr_slid; route 318 include/trace/events/ib_mad.h __entry->dr_dlid = smp->route.dr.dr_dlid; route 319 include/trace/events/ib_mad.h memcpy(__entry->initial_path, smp->route.dr.initial_path, route 321 include/trace/events/ib_mad.h memcpy(__entry->return_path, smp->route.dr.return_path, route 103 net/ax25/ax25_ip.c ax25_route *route; route 115 net/ax25/ax25_ip.c route = ax25_get_route(dst, NULL); route 116 net/ax25/ax25_ip.c if (route) { route 117 net/ax25/ax25_ip.c digipeat = route->digipeat; route 118 net/ax25/ax25_ip.c dev = route->dev; route 119 net/ax25/ax25_ip.c ip_mode = route->ip_mode; route 196 net/ax25/ax25_ip.c if ((ourskb = ax25_rt_build_path(skb, src, dst, route->digipeat)) == NULL) { route 72 net/ax25/ax25_route.c static int __must_check ax25_rt_add(struct ax25_routes_struct *route) route 78 net/ax25/ax25_route.c if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL) route 80 net/ax25/ax25_route.c if (route->digi_count > AX25_MAX_DIGIS) route 87 net/ax25/ax25_route.c if (ax25cmp(&ax25_rt->callsign, &route->dest_addr) == 0 && route 91 net/ax25/ax25_route.c if (route->digi_count != 0) { route 97 net/ax25/ax25_route.c ax25_rt->digipeat->ndigi = route->digi_count; route 98 net/ax25/ax25_route.c for (i = 0; i < route->digi_count; i++) { route 100 net/ax25/ax25_route.c ax25_rt->digipeat->calls[i] = route->digi_addr[i]; route 115 net/ax25/ax25_route.c ax25_rt->callsign = route->dest_addr; route 119 net/ax25/ax25_route.c if (route->digi_count != 0) { route 126 net/ax25/ax25_route.c ax25_rt->digipeat->ndigi = route->digi_count; route 127 net/ax25/ax25_route.c for (i = 0; i < route->digi_count; i++) { route 129 net/ax25/ax25_route.c ax25_rt->digipeat->calls[i] = route->digi_addr[i]; route 145 net/ax25/ax25_route.c static int ax25_rt_del(struct ax25_routes_struct *route) route 150 net/ax25/ax25_route.c if ((ax25_dev = ax25_addr_ax25dev(&route->port_addr)) == NULL) route 160 net/ax25/ax25_route.c ax25cmp(&route->dest_addr, &s->callsign) == 0) { route 224 net/ax25/ax25_route.c struct ax25_routes_struct route; route 228 net/ax25/ax25_route.c if (copy_from_user(&route, arg, sizeof(route))) route 230 net/ax25/ax25_route.c return ax25_rt_add(&route); route 233 net/ax25/ax25_route.c if (copy_from_user(&route, arg, sizeof(route))) route 235 net/ax25/ax25_route.c return ax25_rt_del(&route); route 74 net/ipv4/xfrm4_policy.c struct rtable *rt = (struct rtable *)xdst->route; route 107 net/ipv4/xfrm4_policy.c struct dst_entry *path = xdst->route; route 116 net/ipv4/xfrm4_policy.c struct dst_entry *path = xdst->route; route 239 net/ipv6/netfilter.c .route = __nf_ip6_route, route 74 net/ipv6/xfrm6_policy.c struct rt6_info *rt = (struct rt6_info *)xdst->route; route 105 net/ipv6/xfrm6_policy.c struct dst_entry *path = xdst->route; route 114 net/ipv6/xfrm6_policy.c struct dst_entry *path = xdst->route; route 28 net/netfilter/nf_flow_table_core.c struct nf_flow_route *route, route 33 net/netfilter/nf_flow_table_core.c struct dst_entry *other_dst = route->tuple[!dir].dst; route 34 net/netfilter/nf_flow_table_core.c struct dst_entry *dst = route->tuple[dir].dst; route 61 net/netfilter/nf_flow_table_core.c flow_offload_alloc(struct nf_conn *ct, struct nf_flow_route *route) route 76 net/netfilter/nf_flow_table_core.c if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst)) route 79 net/netfilter/nf_flow_table_core.c if (!dst_hold_safe(route->tuple[FLOW_OFFLOAD_DIR_REPLY].dst)) route 84 net/netfilter/nf_flow_table_core.c flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_ORIGINAL); route 85 net/netfilter/nf_flow_table_core.c flow_offload_fill_dir(flow, ct, route, FLOW_OFFLOAD_DIR_REPLY); route 95 net/netfilter/nf_flow_table_core.c dst_release(route->tuple[FLOW_OFFLOAD_DIR_ORIGINAL].dst); route 164 net/netfilter/nf_nat_core.c dst = ((struct xfrm_dst *)dst)->route; route 24 net/netfilter/nft_flow_offload.c struct nf_flow_route *route, route 47 net/netfilter/nft_flow_offload.c route->tuple[dir].dst = this_dst; route 48 net/netfilter/nft_flow_offload.c route->tuple[!dir].dst = other_dst; route 78 net/netfilter/nft_flow_offload.c struct nf_flow_route route; route 115 net/netfilter/nft_flow_offload.c if (nft_flow_route(pkt, ct, &route, dir) < 0) route 118 net/netfilter/nft_flow_offload.c flow = flow_offload_alloc(ct, &route); route 131 net/netfilter/nft_flow_offload.c dst_release(route.tuple[!dir].dst); route 137 net/netfilter/nft_flow_offload.c dst_release(route.tuple[!dir].dst); route 156 net/rds/ib_cm.c ic->i_sl = ic->i_cm_id->route.path_rec->sl; route 724 net/rds/ib_cm.c __be64 lguid = cm_id->route.path_rec->sgid.global.interface_id; route 725 net/rds/ib_cm.c __be64 fguid = cm_id->route.path_rec->dgid.global.interface_id; route 104 net/rds/rdma_transport.c cm_id->route.path_rec[0].sl = route 212 net/sunrpc/xprtrdma/svc_rdma_transport.c sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; route 220 net/sunrpc/xprtrdma/svc_rdma_transport.c sa = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; route 242 net/sunrpc/xprtrdma/svc_rdma_transport.c struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.src_addr; route 267 net/sunrpc/xprtrdma/svc_rdma_transport.c struct sockaddr *sap = (struct sockaddr *)&cma_id->route.addr.dst_addr; route 364 net/sunrpc/xprtrdma/svc_rdma_transport.c sa = (struct sockaddr *)&cma_xprt->sc_cm_id->route.addr.src_addr; route 533 net/sunrpc/xprtrdma/svc_rdma_transport.c sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.src_addr; route 535 net/sunrpc/xprtrdma/svc_rdma_transport.c sap = (struct sockaddr *)&newxprt->sc_cm_id->route.addr.dst_addr; route 2588 net/xfrm/xfrm_policy.c xdst->route = dst; route 2891 net/xfrm/xfrm_policy.c xdst->route = dst; route 3022 net/xfrm/xfrm_policy.c struct dst_entry *dst, *route; route 3029 net/xfrm/xfrm_policy.c route = NULL; route 3064 net/xfrm/xfrm_policy.c route = xdst->route; route 3090 net/xfrm/xfrm_policy.c route = xdst->route; route 3094 net/xfrm/xfrm_policy.c if (route == NULL && num_xfrms > 0) { route 3771 net/xfrm/xfrm_policy.c route_mtu_cached = dst_mtu(xdst->route); route 3820 net/xfrm/xfrm_policy.c if (!dst_check(xdst->route, xdst->route_cookie)) route 3822 net/xfrm/xfrm_policy.c mtu = dst_mtu(xdst->route); route 136 samples/bpf/xdp_router_ipv4_user.c } route; route 157 samples/bpf/xdp_router_ipv4_user.c memset(&route, 0, sizeof(route)); route 190 samples/bpf/xdp_router_ipv4_user.c route.dst = atoi(dsts); route 191 samples/bpf/xdp_router_ipv4_user.c route.dst_len = atoi(dsts_len); route 192 samples/bpf/xdp_router_ipv4_user.c route.gw = atoi(gws); route 193 samples/bpf/xdp_router_ipv4_user.c route.iface = atoi(ifs); route 194 samples/bpf/xdp_router_ipv4_user.c route.metric = atoi(metrics); route 195 samples/bpf/xdp_router_ipv4_user.c route.iface_name = alloca(sizeof(char *) * IFNAMSIZ); route 196 samples/bpf/xdp_router_ipv4_user.c route.iface_name = if_indextoname(route.iface, route.iface_name); route 197 samples/bpf/xdp_router_ipv4_user.c route.mac = getmac(route.iface_name); route 198 samples/bpf/xdp_router_ipv4_user.c if (route.mac == -1) route 201 samples/bpf/xdp_router_ipv4_user.c &route.iface, &route.iface, 0) == 0); route 215 samples/bpf/xdp_router_ipv4_user.c prefix_key->prefixlen = route.dst_len; route 216 samples/bpf/xdp_router_ipv4_user.c direct_entry.mac = route.mac & 0xffffffffffff; route 217 samples/bpf/xdp_router_ipv4_user.c direct_entry.ifindex = route.iface; route 220 samples/bpf/xdp_router_ipv4_user.c if (route.dst_len == 32) { route 223 samples/bpf/xdp_router_ipv4_user.c &route.dst) == 0); route 226 samples/bpf/xdp_router_ipv4_user.c &route.dst, route 228 samples/bpf/xdp_router_ipv4_user.c direct_entry.arp.dst = route.dst; route 230 samples/bpf/xdp_router_ipv4_user.c &route.dst, route 235 samples/bpf/xdp_router_ipv4_user.c prefix_key->data[i] = (route.dst >> i * 8) & 0xff; route 242 samples/bpf/xdp_router_ipv4_user.c route.gw, route.dst_len, route 243 samples/bpf/xdp_router_ipv4_user.c route.metric, route 244 samples/bpf/xdp_router_ipv4_user.c route.iface_name); route 249 samples/bpf/xdp_router_ipv4_user.c prefix_value->value = route.mac & 0xffffffffffff; route 250 samples/bpf/xdp_router_ipv4_user.c prefix_value->ifindex = route.iface; route 251 samples/bpf/xdp_router_ipv4_user.c prefix_value->gw = route.gw; route 252 samples/bpf/xdp_router_ipv4_user.c prefix_value->metric = route.metric; route 284 samples/bpf/xdp_router_ipv4_user.c route.metric >= prefix_value->metric) { route 291 samples/bpf/xdp_router_ipv4_user.c route.mac & 0xffffffffffff; route 292 samples/bpf/xdp_router_ipv4_user.c prefix_value->ifindex = route.iface; route 293 samples/bpf/xdp_router_ipv4_user.c prefix_value->gw = route.gw; route 294 samples/bpf/xdp_router_ipv4_user.c prefix_value->metric = route.metric; route 302 samples/bpf/xdp_router_ipv4_user.c memset(&route, 0, sizeof(route)); route 307 samples/bpf/xdp_router_ipv4_user.c memset(&route, 0, sizeof(route)); route 548 sound/core/oss/mixer_oss.c int route) route 575 sound/core/oss/mixer_oss.c if (uinfo->count > 1 && !uctl->value.integer.value[route ? 3 : 1]) route 653 sound/core/oss/mixer_oss.c int route) route 676 sound/core/oss/mixer_oss.c uctl->value.integer.value[route ? 3 : 1] = right > 0 ? 1 : 0; route 677 sound/core/oss/mixer_oss.c if (route) { route 1630 sound/pci/au88x0/au88x0_core.c ADBRamLink * route, int rnum) route 1634 sound/pci/au88x0/au88x0_core.c if ((rnum <= 0) || (route == NULL)) route 1639 sound/pci/au88x0/au88x0_core.c VORTEX_ADB_RTBASE + ((route[rnum] & ADB_MASK) << 2), route 1644 sound/pci/au88x0/au88x0_core.c ((route[rnum - 1] & ADB_MASK) << 2), route[rnum]); route 1654 sound/pci/au88x0/au88x0_core.c route[0]); route 1667 sound/pci/au88x0/au88x0_core.c *route); route 1672 sound/pci/au88x0/au88x0_core.c hwwrite(vortex->mmio, VORTEX_ADB_RTBASE + (prev << 2), route[0]); route 1719 sound/pci/au88x0/au88x0_core.c ADBRamLink route; route 1721 sound/pci/au88x0/au88x0_core.c route = ((source & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK); route 1723 sound/pci/au88x0/au88x0_core.c vortex_adb_addroutes(vortex, channel, &route, 1); route 1733 sound/pci/au88x0/au88x0_core.c vortex_adb_delroutes(vortex, channel, route, route); route 1750 sound/pci/au88x0/au88x0_core.c ADBRamLink route[2]; route 1752 sound/pci/au88x0/au88x0_core.c route[0] = ((source & ADB_MASK) << ADB_SHIFT) | (dest0 & ADB_MASK); route 1753 sound/pci/au88x0/au88x0_core.c route[1] = ((source & ADB_MASK) << ADB_SHIFT) | (dest1 & ADB_MASK); route 1756 sound/pci/au88x0/au88x0_core.c vortex_adb_addroutes(vortex, channel, route, 2); route 1766 sound/pci/au88x0/au88x0_core.c vortex_adb_delroutes(vortex, channel, route[0], route[1]); route 1785 sound/pci/au88x0/au88x0_core.c ADBRamLink route[2]; route 1787 sound/pci/au88x0/au88x0_core.c route[0] = ((source0 & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK); route 1788 sound/pci/au88x0/au88x0_core.c route[1] = ((source1 & ADB_MASK) << ADB_SHIFT) | (dest & ADB_MASK); route 1791 sound/pci/au88x0/au88x0_core.c route[1] = (route[1] & ~ADB_MASK) | (dest + 0x20); /* fifo A */ route 1794 sound/pci/au88x0/au88x0_core.c vortex_adb_addroutes(vortex, ch, route, 2); route 1809 sound/pci/au88x0/au88x0_core.c vortex_adb_delroutes(vortex, ch, route[0], route[1]); route 1187 sound/pci/emu10k1/emumixer.c static void update_emu10k1_fxrt(struct snd_emu10k1 *emu, int voice, unsigned char *route) route 1191 sound/pci/emu10k1/emumixer.c snd_emu10k1_compose_audigy_fxrt1(route)); route 1193 sound/pci/emu10k1/emumixer.c snd_emu10k1_compose_audigy_fxrt2(route)); route 1196 sound/pci/emu10k1/emumixer.c snd_emu10k1_compose_send_routing(route)); route 1094 sound/pci/es1968.c int mode, int route) route 1132 sound/pci/es1968.c apu_set_register(chip, apu, 11, route); route 1950 sound/soc/codecs/ab8500-codec.c const struct snd_soc_dapm_route *route; route 1972 sound/soc/codecs/ab8500-codec.c route = &ab8500_dapm_routes_mic1a_vamicx[amics->mic1a_micbias]; route 1973 sound/soc/codecs/ab8500-codec.c status = snd_soc_dapm_add_routes(dapm, route, 1); route 1976 sound/soc/codecs/ab8500-codec.c route = &ab8500_dapm_routes_mic1b_vamicx[amics->mic1b_micbias]; route 1977 sound/soc/codecs/ab8500-codec.c status |= snd_soc_dapm_add_routes(dapm, route, 1); route 1980 sound/soc/codecs/ab8500-codec.c route = &ab8500_dapm_routes_mic2_vamicx[amics->mic2_micbias]; route 1981 sound/soc/codecs/ab8500-codec.c status |= snd_soc_dapm_add_routes(dapm, route, 1); route 707 sound/soc/codecs/hdac_hdmi.c static void hdac_hdmi_fill_route(struct snd_soc_dapm_route *route, route 712 sound/soc/codecs/hdac_hdmi.c route->sink = sink; route 713 sound/soc/codecs/hdac_hdmi.c route->source = src; route 714 sound/soc/codecs/hdac_hdmi.c route->control = control; route 715 sound/soc/codecs/hdac_hdmi.c route->connected = handler; route 1032 sound/soc/codecs/hdac_hdmi.c struct snd_soc_dapm_route *route, int rindex) route 1044 sound/soc/codecs/hdac_hdmi.c hdac_hdmi_fill_route(&route[rindex], route 1075 sound/soc/codecs/hdac_hdmi.c struct snd_soc_dapm_route *route; route 1144 sound/soc/codecs/hdac_hdmi.c route = devm_kzalloc(dapm->dev, (sizeof(*route) * num_routes), route 1146 sound/soc/codecs/hdac_hdmi.c if (!route) route 1157 sound/soc/codecs/hdac_hdmi.c hdac_hdmi_fill_route(&route[i], route 1164 sound/soc/codecs/hdac_hdmi.c hdac_hdmi_add_pinmux_cvt_route(hdev, widgets, route, i); route 1169 sound/soc/codecs/hdac_hdmi.c snd_soc_dapm_add_routes(dapm, route, num_routes); route 1722 sound/soc/codecs/hdac_hdmi.c struct snd_soc_dapm_route *route; route 1732 sound/soc/codecs/hdac_hdmi.c route = devm_kcalloc(dapm->dev, hdmi->num_ports, route 1733 sound/soc/codecs/hdac_hdmi.c sizeof(*route), GFP_KERNEL); route 1734 sound/soc/codecs/hdac_hdmi.c if (!route) route 1753 sound/soc/codecs/hdac_hdmi.c hdac_hdmi_fill_route(&route[i], pin->ports[j].jack_pin, route 1765 sound/soc/codecs/hdac_hdmi.c ret = snd_soc_dapm_add_routes(dapm, route, hdmi->num_ports); route 650 sound/soc/codecs/hdmi-codec.c struct snd_soc_dapm_route route = { route 657 sound/soc/codecs/hdmi-codec.c ret = snd_soc_dapm_add_routes(dapm, &route, 1); route 464 sound/soc/qcom/qdsp6/q6adm.c struct q6adm_cmd_matrix_map_routings_v5 *route; route 473 sound/soc/qcom/qdsp6/q6adm.c pkt_size = (APR_HDR_SIZE + sizeof(*route) + sizeof(*node) + route 481 sound/soc/qcom/qdsp6/q6adm.c route = matrix_map + APR_HDR_SIZE; route 482 sound/soc/qcom/qdsp6/q6adm.c node = matrix_map + APR_HDR_SIZE + sizeof(*route); route 483 sound/soc/qcom/qdsp6/q6adm.c copps_list = matrix_map + APR_HDR_SIZE + sizeof(*route) + sizeof(*node); route 491 sound/soc/qcom/qdsp6/q6adm.c route->num_sessions = 1; route 495 sound/soc/qcom/qdsp6/q6adm.c route->matrix_id = ADM_MATRIX_ID_AUDIO_RX; route 498 sound/soc/qcom/qdsp6/q6adm.c route->matrix_id = ADM_MATRIX_ID_AUDIO_TX; route 201 sound/soc/sh/rcar/src.c u32 cr, route; route 255 sound/soc/sh/rcar/src.c route = 0x0; route 257 sound/soc/sh/rcar/src.c route = 0x1; route 261 sound/soc/sh/rcar/src.c route |= rsnd_io_is_play(io) ? route 323 sound/soc/sh/rcar/src.c rsnd_mod_write(mod, SRC_ROUTE_MODE0, route); route 2895 sound/soc/soc-dapm.c const struct snd_soc_dapm_route *route) route 2911 sound/soc/soc-dapm.c prefix, route->sink); route 2914 sound/soc/soc-dapm.c prefix, route->source); route 2917 sound/soc/soc-dapm.c sink = route->sink; route 2918 sound/soc/soc-dapm.c source = route->source; route 2968 sound/soc/soc-dapm.c route->source); route 2973 sound/soc/soc-dapm.c route->sink); route 2981 sound/soc/soc-dapm.c ret = snd_soc_dapm_add_path(dapm, wsource, wsink, route->control, route 2982 sound/soc/soc-dapm.c route->connected); route 2989 sound/soc/soc-dapm.c source, route->control, sink); route 2994 sound/soc/soc-dapm.c const struct snd_soc_dapm_route *route) route 3004 sound/soc/soc-dapm.c if (route->control) { route 3013 sound/soc/soc-dapm.c prefix, route->sink); route 3016 sound/soc/soc-dapm.c prefix, route->source); route 3019 sound/soc/soc-dapm.c sink = route->sink; route 3020 sound/soc/soc-dapm.c source = route->source; route 3069 sound/soc/soc-dapm.c const struct snd_soc_dapm_route *route, int num) route 3075 sound/soc/soc-dapm.c r = snd_soc_dapm_add_route(dapm, route); route 3078 sound/soc/soc-dapm.c route->source, route 3079 sound/soc/soc-dapm.c route->control ? route->control : "direct", route 3080 sound/soc/soc-dapm.c route->sink); route 3083 sound/soc/soc-dapm.c route++; route 3100 sound/soc/soc-dapm.c const struct snd_soc_dapm_route *route, int num) route 3106 sound/soc/soc-dapm.c snd_soc_dapm_del_route(dapm, route); route 3107 sound/soc/soc-dapm.c route++; route 3116 sound/soc/soc-dapm.c const struct snd_soc_dapm_route *route) route 3119 sound/soc/soc-dapm.c route->source, route 3122 sound/soc/soc-dapm.c route->sink, route 3129 sound/soc/soc-dapm.c route->source); route 3135 sound/soc/soc-dapm.c route->sink); route 3139 sound/soc/soc-dapm.c if (route->control || route->connected) route 3141 sound/soc/soc-dapm.c route->source, route->sink); route 3152 sound/soc/soc-dapm.c route->source, route->sink); route 3155 sound/soc/soc-dapm.c count, route->source, route->sink); route 3177 sound/soc/soc-dapm.c const struct snd_soc_dapm_route *route, int num) route 3184 sound/soc/soc-dapm.c err = snd_soc_dapm_weak_route(dapm, route); route 3187 sound/soc/soc-dapm.c route++; route 435 sound/soc/soc-topology.c struct snd_soc_dapm_route *route = route 445 sound/soc/soc-topology.c kfree(route); route 1186 sound/soc/soc-topology.c struct snd_soc_dapm_route *route) route 1190 sound/soc/soc-topology.c route); route 136 sound/soc/sof/pm.c sroute->route->sink, route 137 sound/soc/sof/pm.c sroute->route->control ? sroute->route->control route 139 sound/soc/sof/pm.c sroute->route->source); route 342 sound/soc/sof/sof-priv.h struct snd_soc_dapm_route *route; route 2992 sound/soc/sof/topology.c struct snd_soc_dapm_route *route) route 2997 sound/soc/sof/topology.c struct snd_soc_dobj *dobj = &route->dobj; route 3019 sound/soc/sof/topology.c route->sink, route->control ? route->control : "none", route 3020 sound/soc/sof/topology.c route->source); route 3023 sound/soc/sof/topology.c source_swidget = snd_sof_find_swidget(sdev, (char *)route->source); route 3026 sound/soc/sof/topology.c route->source); route 3044 sound/soc/sof/topology.c sink_swidget = snd_sof_find_swidget(sdev, (char *)route->sink); route 3047 sound/soc/sof/topology.c route->sink); route 3071 sound/soc/sof/topology.c route->source, route->sink); route 3083 sound/soc/sof/topology.c route->sink, route 3084 sound/soc/sof/topology.c route->control ? route->control : "none", route 3085 sound/soc/sof/topology.c route->source); route 3092 sound/soc/sof/topology.c route->sink, route 3093 sound/soc/sof/topology.c route->control ? route->control : "none", route 3094 sound/soc/sof/topology.c route->source, reply.error); route 3099 sound/soc/sof/topology.c sroute->route = route; route 51 virt/kvm/irqchip.c struct kvm_kernel_irq_routing_entry route; route 56 virt/kvm/irqchip.c route.msi.address_lo = msi->address_lo; route 57 virt/kvm/irqchip.c route.msi.address_hi = msi->address_hi; route 58 virt/kvm/irqchip.c route.msi.data = msi->data; route 59 virt/kvm/irqchip.c route.msi.flags = msi->flags; route 60 virt/kvm/irqchip.c route.msi.devid = msi->devid; route 62 virt/kvm/irqchip.c return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1, false);