tmp_dev            97 drivers/crypto/qat/qat_common/qat_crypto.c 	struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
tmp_dev           101 drivers/crypto/qat/qat_common/qat_crypto.c 	list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
tmp_dev           104 drivers/crypto/qat/qat_common/qat_crypto.c 		if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
tmp_dev           105 drivers/crypto/qat/qat_common/qat_crypto.c 		     dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
tmp_dev           106 drivers/crypto/qat/qat_common/qat_crypto.c 		    adf_dev_started(tmp_dev) &&
tmp_dev           107 drivers/crypto/qat/qat_common/qat_crypto.c 		    !list_empty(&tmp_dev->crypto_list)) {
tmp_dev           108 drivers/crypto/qat/qat_common/qat_crypto.c 			ctr = atomic_read(&tmp_dev->ref_count);
tmp_dev           110 drivers/crypto/qat/qat_common/qat_crypto.c 				accel_dev = tmp_dev;
tmp_dev           119 drivers/crypto/qat/qat_common/qat_crypto.c 		list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
tmp_dev           120 drivers/crypto/qat/qat_common/qat_crypto.c 			if (adf_dev_started(tmp_dev) &&
tmp_dev           121 drivers/crypto/qat/qat_common/qat_crypto.c 			    !list_empty(&tmp_dev->crypto_list)) {
tmp_dev           122 drivers/crypto/qat/qat_common/qat_crypto.c 				accel_dev = tmp_dev;
tmp_dev           187 drivers/crypto/virtio/virtio_crypto_mgr.c 	struct virtio_crypto *vcrypto_dev = NULL, *tmp_dev;
tmp_dev           192 drivers/crypto/virtio/virtio_crypto_mgr.c 	list_for_each_entry(tmp_dev, virtcrypto_devmgr_get_head(), list) {
tmp_dev           194 drivers/crypto/virtio/virtio_crypto_mgr.c 		if ((node == dev_to_node(&tmp_dev->vdev->dev) ||
tmp_dev           195 drivers/crypto/virtio/virtio_crypto_mgr.c 		     dev_to_node(&tmp_dev->vdev->dev) < 0) &&
tmp_dev           196 drivers/crypto/virtio/virtio_crypto_mgr.c 		    virtcrypto_dev_started(tmp_dev) &&
tmp_dev           197 drivers/crypto/virtio/virtio_crypto_mgr.c 		    virtcrypto_algo_is_supported(tmp_dev, service, algo)) {
tmp_dev           198 drivers/crypto/virtio/virtio_crypto_mgr.c 			ctr = atomic_read(&tmp_dev->ref_count);
tmp_dev           200 drivers/crypto/virtio/virtio_crypto_mgr.c 				vcrypto_dev = tmp_dev;
tmp_dev           210 drivers/crypto/virtio/virtio_crypto_mgr.c 		list_for_each_entry(tmp_dev,
tmp_dev           212 drivers/crypto/virtio/virtio_crypto_mgr.c 			if (virtcrypto_dev_started(tmp_dev) &&
tmp_dev           213 drivers/crypto/virtio/virtio_crypto_mgr.c 			    virtcrypto_algo_is_supported(tmp_dev,
tmp_dev           215 drivers/crypto/virtio/virtio_crypto_mgr.c 				vcrypto_dev = tmp_dev;
tmp_dev           107 drivers/iommu/dmar.c 	struct device *tmp_dev;
tmp_dev           110 drivers/iommu/dmar.c 		for_each_active_dev_scope(*devices, *cnt, i, tmp_dev)
tmp_dev           111 drivers/iommu/dmar.c 			put_device(tmp_dev);
tmp_dev           243 drivers/md/md-linear.c 	struct dev_info *tmp_dev;
tmp_dev           251 drivers/md/md-linear.c 	tmp_dev = which_dev(mddev, bio_sector);
tmp_dev           252 drivers/md/md-linear.c 	start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
tmp_dev           253 drivers/md/md-linear.c 	end_sector = tmp_dev->end_sector;
tmp_dev           254 drivers/md/md-linear.c 	data_offset = tmp_dev->rdev->data_offset;
tmp_dev           260 drivers/md/md-linear.c 	if (unlikely(is_mddev_broken(tmp_dev->rdev, "linear"))) {
tmp_dev           274 drivers/md/md-linear.c 	bio_set_dev(bio, tmp_dev->rdev->bdev);
tmp_dev           297 drivers/md/md-linear.c 	       bdevname(tmp_dev->rdev->bdev, b),
tmp_dev           298 drivers/md/md-linear.c 	       (unsigned long long)tmp_dev->rdev->sectors,
tmp_dev           571 drivers/md/raid0.c 	struct md_rdev *tmp_dev;
tmp_dev           611 drivers/md/raid0.c 		tmp_dev = map_sector(mddev, zone, orig_sector, &sector);
tmp_dev           614 drivers/md/raid0.c 		tmp_dev = map_sector(mddev, zone, sector, &sector);
tmp_dev           622 drivers/md/raid0.c 	if (unlikely(is_mddev_broken(tmp_dev, "raid0"))) {
tmp_dev           627 drivers/md/raid0.c 	bio_set_dev(bio, tmp_dev->bdev);
tmp_dev           629 drivers/md/raid0.c 		tmp_dev->data_offset;
tmp_dev           315 drivers/net/ethernet/mellanox/mlx5/core/dev.c 	struct mlx5_core_dev *tmp_dev;
tmp_dev           324 drivers/net/ethernet/mellanox/mlx5/core/dev.c 		tmp_dev = container_of(priv, struct mlx5_core_dev, priv);
tmp_dev           325 drivers/net/ethernet/mellanox/mlx5/core/dev.c 		if (!mlx5_core_is_pf(tmp_dev))
tmp_dev           328 drivers/net/ethernet/mellanox/mlx5/core/dev.c 		if ((dev != tmp_dev) && (mlx5_gen_pci_id(tmp_dev) == pci_id)) {
tmp_dev           329 drivers/net/ethernet/mellanox/mlx5/core/dev.c 			res = tmp_dev;
tmp_dev           562 drivers/net/ethernet/mellanox/mlx5/core/lag.c 	struct mlx5_core_dev *tmp_dev;
tmp_dev           570 drivers/net/ethernet/mellanox/mlx5/core/lag.c 	tmp_dev = mlx5_get_next_phys_dev(dev);
tmp_dev           571 drivers/net/ethernet/mellanox/mlx5/core/lag.c 	if (tmp_dev)
tmp_dev           572 drivers/net/ethernet/mellanox/mlx5/core/lag.c 		ldev = tmp_dev->priv.lag;
tmp_dev            80 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 		struct mlx5_core_dev *tmp_dev = NULL;
tmp_dev            85 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 				tmp_dev = iter->devs[i];
tmp_dev            93 drivers/net/ethernet/mellanox/mlx5/core/lib/devcom.c 		sguid1 = mlx5_query_nic_system_image_guid(tmp_dev);
tmp_dev          2870 drivers/net/usb/hso.c 	struct hso_device *tmp_dev = NULL;
tmp_dev          2910 drivers/net/usb/hso.c 				tmp_dev = hso_dev;
tmp_dev          2931 drivers/net/usb/hso.c 		if (tmp_dev)
tmp_dev          2932 drivers/net/usb/hso.c 			hso_dev = tmp_dev;
tmp_dev           217 drivers/pcmcia/ds.c 	struct device *tmp_dev;
tmp_dev           218 drivers/pcmcia/ds.c 	tmp_dev = get_device(&p_dev->dev);
tmp_dev           219 drivers/pcmcia/ds.c 	if (!tmp_dev)
tmp_dev           221 drivers/pcmcia/ds.c 	return to_pcmcia_dev(tmp_dev);
tmp_dev           487 drivers/pcmcia/ds.c 	struct pcmcia_device *p_dev, *tmp_dev;
tmp_dev           536 drivers/pcmcia/ds.c 	list_for_each_entry(tmp_dev, &s->devices_list, socket_device_list)
tmp_dev           537 drivers/pcmcia/ds.c 		if (p_dev->func == tmp_dev->func) {
tmp_dev           538 drivers/pcmcia/ds.c 			p_dev->function_config = tmp_dev->function_config;
tmp_dev           539 drivers/pcmcia/ds.c 			p_dev->irq = tmp_dev->irq;
tmp_dev          2700 drivers/target/target_core_user.c 	struct tcmu_dev *udev, *tmp_dev;
tmp_dev          2706 drivers/target/target_core_user.c 	list_for_each_entry_safe(udev, tmp_dev, &devs, timedout_entry) {