kv                 83 arch/arc/mm/highmem.c void __kunmap_atomic(void *kv)
kv                 85 arch/arc/mm/highmem.c 	unsigned long kvaddr = (unsigned long)kv;
kv               1041 crypto/testmgr.c 	struct kvec kv;
kv               1044 crypto/testmgr.c 	kv.iov_base = (void *)vec->plaintext;
kv               1045 crypto/testmgr.c 	kv.iov_len = vec->psize;
kv               1046 crypto/testmgr.c 	iov_iter_kvec(&input, WRITE, &kv, 1, vec->psize);
kv               1866 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	s64 kt, kv, leakage_w, i_leakage, vddc;
kv               1883 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
kv               1885 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
kv               1904 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	s64 kt, kv, leakage_w, i_leakage, vddc;
kv               1910 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
kv               1913 drivers/gpu/drm/amd/amdgpu/si_dpm.c 	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
kv                747 drivers/gpu/drm/radeon/ni_dpm.c 	s64 kt, kv, leakage_w, i_leakage, vddc, temperature;
kv                755 drivers/gpu/drm/radeon/ni_dpm.c 	kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 1000),
kv                758 drivers/gpu/drm/radeon/ni_dpm.c 	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
kv               1775 drivers/gpu/drm/radeon/si_dpm.c 	s64 kt, kv, leakage_w, i_leakage, vddc;
kv               1792 drivers/gpu/drm/radeon/si_dpm.c 	kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
kv               1794 drivers/gpu/drm/radeon/si_dpm.c 	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
kv               1813 drivers/gpu/drm/radeon/si_dpm.c 	s64 kt, kv, leakage_w, i_leakage, vddc;
kv               1819 drivers/gpu/drm/radeon/si_dpm.c 	kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
kv               1822 drivers/gpu/drm/radeon/si_dpm.c 	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
kv                422 drivers/media/usb/pvrusb2/pvrusb2-ctrl.c 	int mask,val,kv,mode,ret;
kv                446 drivers/media/usb/pvrusb2/pvrusb2-ctrl.c 		if (parse_mtoken(ptr,cnt,&kv,names,valid_bits)) {
kv                455 drivers/media/usb/pvrusb2/pvrusb2-ctrl.c 			val |= kv;
kv                458 drivers/media/usb/pvrusb2/pvrusb2-ctrl.c 			mask |= kv;
kv                459 drivers/media/usb/pvrusb2/pvrusb2-ctrl.c 			val &= ~kv;
kv                462 drivers/media/usb/pvrusb2/pvrusb2-ctrl.c 			mask |= kv;
kv                463 drivers/media/usb/pvrusb2/pvrusb2-ctrl.c 			val |= kv;
kv               2073 net/9p/client.c 	struct kvec kv = {.iov_base = data, .iov_len = count};
kv               2076 net/9p/client.c 	iov_iter_kvec(&to, READ, &kv, 1, count);
kv               8026 net/core/devlink.c 		const struct nlattr *kv;
kv               8032 net/core/devlink.c 		nla_for_each_nested(kv, nlattr, rem_kv) {
kv               8033 net/core/devlink.c 			if (nla_type(kv) != DEVLINK_ATTR_INFO_VERSION_VALUE)
kv               8036 net/core/devlink.c 			strlcat(buf, nla_data(kv), len);
kv               2462 net/core/skbuff.c 		struct kvec kv;
kv               2466 net/core/skbuff.c 		kv.iov_base = skb->data + offset;
kv               2467 net/core/skbuff.c 		kv.iov_len = slen;
kv               2471 net/core/skbuff.c 		ret = kernel_sendmsg_locked(sk, &msg, &kv, 1, slen);
kv                138 net/ipv4/fib_trie.c 	struct key_vector kv[1];
kv                139 net/ipv4/fib_trie.c #define tn_bits kv[0].bits
kv                142 net/ipv4/fib_trie.c #define TNODE_SIZE(n)	offsetof(struct tnode, kv[0].tnode[n])
kv                167 net/ipv4/fib_trie.c 	struct key_vector kv[1];
kv                188 net/ipv4/fib_trie.c static inline struct tnode *tn_info(struct key_vector *kv)
kv                190 net/ipv4/fib_trie.c 	return container_of(kv, struct tnode, kv[0]);
kv                218 net/ipv4/fib_trie.c #define get_cindex(key, kv) (((key) ^ (kv)->key) >> (kv)->pos)
kv                220 net/ipv4/fib_trie.c static inline unsigned long get_index(t_key key, struct key_vector *kv)
kv                222 net/ipv4/fib_trie.c 	unsigned long index = key ^ kv->key;
kv                224 net/ipv4/fib_trie.c 	if ((BITS_PER_LONG <= KEYLENGTH) && (KEYLENGTH == kv->pos))
kv                227 net/ipv4/fib_trie.c 	return index >> kv->pos;
kv                358 net/ipv4/fib_trie.c 	struct tnode *kv;
kv                360 net/ipv4/fib_trie.c 	kv = kmem_cache_alloc(trie_leaf_kmem, GFP_KERNEL);
kv                361 net/ipv4/fib_trie.c 	if (!kv)
kv                365 net/ipv4/fib_trie.c 	l = kv->kv;
kv                399 net/ipv4/fib_trie.c 	tn = tnode->kv;
kv                500 net/ipv4/fib_trie.c 		tn = container_of(head, struct tnode, rcu)->kv;
kv                940 net/ipv4/fib_trie.c 	struct key_vector *pn, *n = t->kv;
kv               1325 net/ipv4/fib_trie.c 	pn = t->kv;
kv               1678 net/ipv4/fib_trie.c 	struct key_vector *pn = t->kv;
kv               1736 net/ipv4/fib_trie.c 	struct key_vector *l, *tp = ot->kv;
kv               1795 net/ipv4/fib_trie.c 	struct key_vector *pn = t->kv;
kv               1864 net/ipv4/fib_trie.c 	struct key_vector *pn = t->kv;
kv               1952 net/ipv4/fib_trie.c 	struct key_vector *pn = t->kv;
kv               2044 net/ipv4/fib_trie.c 	struct key_vector *l, *tp = t->kv;
kv               2171 net/ipv4/fib_trie.c 	struct key_vector *l, *tp = t->kv;
kv               2243 net/ipv4/fib_trie.c 	t->kv[0].pos = KEYLENGTH;
kv               2244 net/ipv4/fib_trie.c 	t->kv[0].slen = KEYLENGTH;
kv               2317 net/ipv4/fib_trie.c 	pn = t->kv;
kv               2715 net/ipv4/fib_trie.c 	iter->tnode = t->kv;
kv                159 virt/kvm/vfio.c 	struct kvm_vfio *kv = dev->private;
kv                163 virt/kvm/vfio.c 	mutex_lock(&kv->lock);
kv                165 virt/kvm/vfio.c 	list_for_each_entry(kvg, &kv->group_list, node) {
kv                172 virt/kvm/vfio.c 	if (noncoherent != kv->noncoherent) {
kv                173 virt/kvm/vfio.c 		kv->noncoherent = noncoherent;
kv                175 virt/kvm/vfio.c 		if (kv->noncoherent)
kv                181 virt/kvm/vfio.c 	mutex_unlock(&kv->lock);
kv                186 virt/kvm/vfio.c 	struct kvm_vfio *kv = dev->private;
kv                209 virt/kvm/vfio.c 		mutex_lock(&kv->lock);
kv                211 virt/kvm/vfio.c 		list_for_each_entry(kvg, &kv->group_list, node) {
kv                213 virt/kvm/vfio.c 				mutex_unlock(&kv->lock);
kv                221 virt/kvm/vfio.c 			mutex_unlock(&kv->lock);
kv                226 virt/kvm/vfio.c 		list_add_tail(&kvg->node, &kv->group_list);
kv                231 virt/kvm/vfio.c 		mutex_unlock(&kv->lock);
kv                249 virt/kvm/vfio.c 		mutex_lock(&kv->lock);
kv                251 virt/kvm/vfio.c 		list_for_each_entry(kvg, &kv->group_list, node) {
kv                269 virt/kvm/vfio.c 		mutex_unlock(&kv->lock);
kv                280 virt/kvm/vfio.c 		struct kvm_vfio *kv = dev->private;
kv                308 virt/kvm/vfio.c 		mutex_lock(&kv->lock);
kv                310 virt/kvm/vfio.c 		list_for_each_entry(kvg, &kv->group_list, node) {
kv                319 virt/kvm/vfio.c 		mutex_unlock(&kv->lock);
kv                365 virt/kvm/vfio.c 	struct kvm_vfio *kv = dev->private;
kv                368 virt/kvm/vfio.c 	list_for_each_entry_safe(kvg, tmp, &kv->group_list, node) {
kv                381 virt/kvm/vfio.c 	kfree(kv);
kv                398 virt/kvm/vfio.c 	struct kvm_vfio *kv;
kv                405 virt/kvm/vfio.c 	kv = kzalloc(sizeof(*kv), GFP_KERNEL_ACCOUNT);
kv                406 virt/kvm/vfio.c 	if (!kv)
kv                409 virt/kvm/vfio.c 	INIT_LIST_HEAD(&kv->group_list);
kv                410 virt/kvm/vfio.c 	mutex_init(&kv->lock);
kv                412 virt/kvm/vfio.c 	dev->private = kv;