k1                121 arch/mips/include/asm/stackframe.h 		lui	k1, %hi(kernelsp)
k1                123 arch/mips/include/asm/stackframe.h 		lui	k1, %highest(kernelsp)
k1                124 arch/mips/include/asm/stackframe.h 		daddiu	k1, %higher(kernelsp)
k1                125 arch/mips/include/asm/stackframe.h 		dsll	k1, 16
k1                126 arch/mips/include/asm/stackframe.h 		daddiu	k1, %hi(kernelsp)
k1                127 arch/mips/include/asm/stackframe.h 		dsll	k1, 16
k1                130 arch/mips/include/asm/stackframe.h 		LONG_ADDU	k1, k0
k1                136 arch/mips/include/asm/stackframe.h 		LONG_L	sp, %lo(kernelsp)(k1)
k1                138 arch/mips/include/asm/stackframe.h 		LONG_L	k1, %lo(kernelsp)(k1)
k1                170 arch/mips/include/asm/stackframe.h 		lui	k1, %hi(kernelsp)
k1                172 arch/mips/include/asm/stackframe.h 		lui	k1, %highest(kernelsp)
k1                173 arch/mips/include/asm/stackframe.h 		daddiu	k1, %higher(kernelsp)
k1                174 arch/mips/include/asm/stackframe.h 		dsll	k1, k1, 16
k1                175 arch/mips/include/asm/stackframe.h 		daddiu	k1, %hi(kernelsp)
k1                176 arch/mips/include/asm/stackframe.h 		dsll	k1, k1, 16
k1                183 arch/mips/include/asm/stackframe.h 		LONG_L	sp, %lo(kernelsp)(k1)
k1                185 arch/mips/include/asm/stackframe.h 		LONG_L	k1, %lo(kernelsp)(k1)
k1                236 arch/mips/include/asm/stackframe.h 		.set	at=k1
k1                201 arch/sparc/crypto/des_glue.c 	u64 k1[DES_EXPKEY_WORDS / 2];
k1                210 arch/sparc/crypto/des_glue.c 	des_sparc64_key_expand((const u32 *)key, k1);
k1                216 arch/sparc/crypto/des_glue.c 	memcpy(&dctx->encrypt_expkey[0], &k1[0], sizeof(k1));
k1                225 arch/sparc/crypto/des_glue.c 			   &k1[0]);
k1                 60 crypto/tea.c   	u32 k0, k1, k2, k3;
k1                 69 crypto/tea.c   	k1 = ctx->KEY[1];
k1                 77 crypto/tea.c   		y += ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1);
k1                 88 crypto/tea.c   	u32 k0, k1, k2, k3;
k1                 97 crypto/tea.c   	k1 = ctx->KEY[1];
k1                107 crypto/tea.c   		y -= ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1);
k1                293 crypto/vmac.c  	p += MUL32(a1, k1);
k1                299 crypto/vmac.c  	p += MUL32(a2, k1);
k1                307 crypto/vmac.c  	q += MUL32(a3, k1);
k1                313 crypto/vmac.c  	p += MUL32(a0, k1);
k1                358 crypto/vmac.c  static u64 l3hash(u64 p1, u64 p2, u64 k1, u64 k2, u64 len)
k1                379 crypto/vmac.c  	p1 += k1;
k1                380 crypto/vmac.c  	p1 += (0 - (p1 < k1)) & 257;
k1                104 drivers/clk/sprd/pll.c 	u16 k1, k2;
k1                132 drivers/clk/sprd/pll.c 		k1 = pll->k1;
k1                134 drivers/clk/sprd/pll.c 		rate = DIV_ROUND_CLOSEST_ULL(refin * kint * k1,
k1                 56 drivers/clk/sprd/pll.h 	u16 k1;
k1                 72 drivers/clk/sprd/pll.h 		.k1		= _k1,					\
k1                307 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	gk = (__be64 *)ctx->u.aes.k1;
k1                314 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	if (ctx->u.aes.k1[0] & 0x80) {
k1                323 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	ctx->u.aes.kn_len = sizeof(ctx->u.aes.k1);
k1                324 drivers/crypto/ccp/ccp-crypto-aes-cmac.c 	sg_init_one(&ctx->u.aes.k1_sg, ctx->u.aes.k1, sizeof(ctx->u.aes.k1));
k1                107 drivers/crypto/ccp/ccp-crypto.h 	u8 k1[AES_BLOCK_SIZE];
k1                257 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 	int64_t k1;
k1                338 drivers/gpu/drm/nouveau/dispnv04/tvmodesnv17.c 				int64_t c = (p->k1 + p->ki*i + p->ki2*i*i +
k1                227 drivers/gpu/drm/nouveau/dispnv50/atom.h 		u8 k1;
k1                291 drivers/gpu/drm/nouveau/dispnv50/wndw.c 		asyw->blend.k1 = asyw->state.alpha >> 8;
k1                 91 drivers/gpu/drm/nouveau/dispnv50/wndwc37e.c 		evo_data(push, asyw->blend.k1);
k1               3204 drivers/gpu/drm/radeon/r100.c 	fixed20_12 mc_latency_sclk, mc_latency_mclk, k1;
k1               3412 drivers/gpu/drm/radeon/r100.c 			k1.full = dfixed_const(40);
k1               3415 drivers/gpu/drm/radeon/r100.c 			k1.full = dfixed_const(20);
k1               3419 drivers/gpu/drm/radeon/r100.c 		k1.full = dfixed_const(40);
k1               3430 drivers/gpu/drm/radeon/r100.c 	mc_latency_mclk.full += k1.full;
k1                518 drivers/net/wireless/intel/iwlwifi/fw/api/sta.h 	u8 k1[16];
k1               1552 fs/btrfs/ctree.c 	struct btrfs_key k1;
k1               1554 fs/btrfs/ctree.c 	btrfs_disk_key_to_cpu(&k1, disk);
k1               1556 fs/btrfs/ctree.c 	return btrfs_comp_cpu_keys(&k1, k2);
k1               1562 fs/btrfs/ctree.c int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
k1               1564 fs/btrfs/ctree.c 	if (k1->objectid > k2->objectid)
k1               1566 fs/btrfs/ctree.c 	if (k1->objectid < k2->objectid)
k1               1568 fs/btrfs/ctree.c 	if (k1->type > k2->type)
k1               1570 fs/btrfs/ctree.c 	if (k1->type < k2->type)
k1               1572 fs/btrfs/ctree.c 	if (k1->offset > k2->offset)
k1               1574 fs/btrfs/ctree.c 	if (k1->offset < k2->offset)
k1               2508 fs/btrfs/ctree.h int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
k1                 35 fs/hfsplus/attributes.c int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *k1,
k1                 40 fs/hfsplus/attributes.c 	k1_cnid = k1->attr.cnid;
k1                 46 fs/hfsplus/attributes.c 			(const struct hfsplus_unistr *)&k1->attr.key_name,
k1                 16 fs/hfsplus/catalog.c int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1,
k1                 21 fs/hfsplus/catalog.c 	k1p = k1->cat.parent;
k1                 26 fs/hfsplus/catalog.c 	return hfsplus_strcasecmp(&k1->cat.name, &k2->cat.name);
k1                 29 fs/hfsplus/catalog.c int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
k1                 34 fs/hfsplus/catalog.c 	k1p = k1->cat.parent;
k1                 39 fs/hfsplus/catalog.c 	return hfsplus_strcmp(&k1->cat.name, &k2->cat.name);
k1                 20 fs/hfsplus/extents.c int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
k1                 26 fs/hfsplus/extents.c 	k1id = k1->ext.cnid;
k1                 31 fs/hfsplus/extents.c 	if (k1->ext.fork_type != k2->ext.fork_type)
k1                 32 fs/hfsplus/extents.c 		return k1->ext.fork_type < k2->ext.fork_type ? -1 : 1;
k1                 34 fs/hfsplus/extents.c 	k1s = k1->ext.start_block;
k1                374 fs/hfsplus/hfsplus_fs.h int hfsplus_attr_bin_cmp_key(const hfsplus_btree_key *k1,
k1                445 fs/hfsplus/hfsplus_fs.h int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1,
k1                447 fs/hfsplus/hfsplus_fs.h int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1,
k1                467 fs/hfsplus/hfsplus_fs.h int hfsplus_ext_cmp_key(const hfsplus_btree_key *k1,
k1                132 fs/reiserfs/stree.c inline int comp_le_keys(const struct reiserfs_key *k1,
k1                135 fs/reiserfs/stree.c 	return memcmp(k1, k2, sizeof(struct reiserfs_key));
k1                266 fs/xfs/libxfs/xfs_alloc_btree.c 	union xfs_btree_key	*k1,
k1                269 fs/xfs/libxfs/xfs_alloc_btree.c 	return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
k1                276 fs/xfs/libxfs/xfs_alloc_btree.c 	union xfs_btree_key	*k1,
k1                281 fs/xfs/libxfs/xfs_alloc_btree.c 	diff =  be32_to_cpu(k1->alloc.ar_blockcount) -
k1                286 fs/xfs/libxfs/xfs_alloc_btree.c 	return  be32_to_cpu(k1->alloc.ar_startblock) -
k1                386 fs/xfs/libxfs/xfs_alloc_btree.c 	union xfs_btree_key	*k1,
k1                389 fs/xfs/libxfs/xfs_alloc_btree.c 	return be32_to_cpu(k1->alloc.ar_startblock) <
k1                407 fs/xfs/libxfs/xfs_alloc_btree.c 	union xfs_btree_key	*k1,
k1                410 fs/xfs/libxfs/xfs_alloc_btree.c 	return be32_to_cpu(k1->alloc.ar_blockcount) <
k1                412 fs/xfs/libxfs/xfs_alloc_btree.c 		(k1->alloc.ar_blockcount == k2->alloc.ar_blockcount &&
k1                413 fs/xfs/libxfs/xfs_alloc_btree.c 		 be32_to_cpu(k1->alloc.ar_startblock) <
k1                400 fs/xfs/libxfs/xfs_bmap_btree.c 	union xfs_btree_key	*k1,
k1                403 fs/xfs/libxfs/xfs_bmap_btree.c 	uint64_t		a = be64_to_cpu(k1->bmbt.br_startoff);
k1                501 fs/xfs/libxfs/xfs_bmap_btree.c 	union xfs_btree_key	*k1,
k1                504 fs/xfs/libxfs/xfs_bmap_btree.c 	return be64_to_cpu(k1->bmbt.br_startoff) <
k1                155 fs/xfs/libxfs/xfs_btree.h 				union xfs_btree_key *k1,
k1                245 fs/xfs/libxfs/xfs_ialloc_btree.c 	union xfs_btree_key	*k1,
k1                248 fs/xfs/libxfs/xfs_ialloc_btree.c 	return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
k1                343 fs/xfs/libxfs/xfs_ialloc_btree.c 	union xfs_btree_key	*k1,
k1                346 fs/xfs/libxfs/xfs_ialloc_btree.c 	return be32_to_cpu(k1->inobt.ir_startino) <
k1                193 fs/xfs/libxfs/xfs_refcount_btree.c 	union xfs_btree_key	*k1,
k1                196 fs/xfs/libxfs/xfs_refcount_btree.c 	return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
k1                274 fs/xfs/libxfs/xfs_refcount_btree.c 	union xfs_btree_key	*k1,
k1                277 fs/xfs/libxfs/xfs_refcount_btree.c 	return be32_to_cpu(k1->refc.rc_startblock) <
k1                258 fs/xfs/libxfs/xfs_rmap_btree.c 	union xfs_btree_key	*k1,
k1                261 fs/xfs/libxfs/xfs_rmap_btree.c 	struct xfs_rmap_key	*kp1 = &k1->rmap;
k1                373 fs/xfs/libxfs/xfs_rmap_btree.c 	union xfs_btree_key	*k1,
k1                381 fs/xfs/libxfs/xfs_rmap_btree.c 	x = be32_to_cpu(k1->rmap.rm_startblock);
k1                387 fs/xfs/libxfs/xfs_rmap_btree.c 	a = be64_to_cpu(k1->rmap.rm_owner);
k1                393 fs/xfs/libxfs/xfs_rmap_btree.c 	a = XFS_RMAP_OFF(be64_to_cpu(k1->rmap.rm_offset));
k1                 22 include/linux/btree-128.h static inline void *btree_lookup128(struct btree_head128 *head, u64 k1, u64 k2)
k1                 24 include/linux/btree-128.h 	u64 key[2] = {k1, k2};
k1                 29 include/linux/btree-128.h 				      u64 *k1, u64 *k2)
k1                 31 include/linux/btree-128.h 	u64 key[2] = {*k1, *k2};
k1                 36 include/linux/btree-128.h 	*k1 = key[0];
k1                 41 include/linux/btree-128.h static inline int btree_insert128(struct btree_head128 *head, u64 k1, u64 k2,
k1                 44 include/linux/btree-128.h 	u64 key[2] = {k1, k2};
k1                 49 include/linux/btree-128.h static inline int btree_update128(struct btree_head128 *head, u64 k1, u64 k2,
k1                 52 include/linux/btree-128.h 	u64 key[2] = {k1, k2};
k1                 57 include/linux/btree-128.h static inline void *btree_remove128(struct btree_head128 *head, u64 k1, u64 k2)
k1                 59 include/linux/btree-128.h 	u64 key[2] = {k1, k2};
k1                 63 include/linux/btree-128.h static inline void *btree_last128(struct btree_head128 *head, u64 *k1, u64 *k2)
k1                 70 include/linux/btree-128.h 		*k1 = key[0];
k1                106 include/linux/btree-128.h #define btree_for_each_safe128(head, k1, k2, val)	\
k1                107 include/linux/btree-128.h 	for (val = btree_last128(head, &k1, &k2);	\
k1                109 include/linux/btree-128.h 	     val = btree_get_prev128(head, &k1, &k2))
k1                654 include/uapi/linux/pkt_sched.h 	__u32 k1;
k1                366 kernel/locking/lockdep.c 	u32 k0 = key, k1 = key >> 32;
k1                368 kernel/locking/lockdep.c 	__jhash_mix(idx, k0, k1); /* Macro that modifies arguments! */
k1                370 kernel/locking/lockdep.c 	return k0 | (u64)k1 << 32;
k1                210 lib/xxhash.c   		const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
k1                212 lib/xxhash.c   		h64 ^= k1;
k1                470 lib/xxhash.c   		const uint64_t k1 = xxh64_round(0, get_unaligned_le64(p));
k1                472 lib/xxhash.c   		h64 ^= k1;
k1                467 net/netfilter/nft_set_hash.c 	u32 hash, k1, k2;
k1                469 net/netfilter/nft_set_hash.c 	k1 = *key;
k1                470 net/netfilter/nft_set_hash.c 	hash = jhash_1word(k1, priv->seed);
k1                474 net/netfilter/nft_set_hash.c 		if (k1 == k2 &&
k1                487 net/netfilter/nft_set_hash.c 	u32 hash, k1;
k1                490 net/netfilter/nft_set_hash.c 		k1 = *(u32 *)key;
k1                491 net/netfilter/nft_set_hash.c 		hash = jhash_1word(k1, priv->seed);
k1                904 net/sched/sch_netem.c 			q->clg.a4 = ge->k1;
k1               1114 net/sched/sch_netem.c 			.k1 = q->clg.a4,
k1               1035 scripts/kconfig/expr.c 	enum string_value_kind k1 = k_string, k2 = k_string;
k1               1075 scripts/kconfig/expr.c 		k1 = expr_parse_string(str1, e->left.sym->type, &lval);
k1               1079 scripts/kconfig/expr.c 	if (k1 == k_string || k2 == k_string)
k1               1081 scripts/kconfig/expr.c 	else if (k1 == k_unsigned || k2 == k_unsigned)
k1                417 security/selinux/ss/policydb.c static int filenametr_cmp(struct hashtab *h, const void *k1, const void *k2)
k1                419 security/selinux/ss/policydb.c 	const struct filename_trans *ft1 = k1;
k1                447 security/selinux/ss/policydb.c static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
k1                449 security/selinux/ss/policydb.c 	const struct range_trans *key1 = k1, *key2 = k2;
k1                653 tools/include/uapi/linux/pkt_sched.h 	__u32 k1;
k1               1380 tools/lib/bpf/btf.c static bool btf_dedup_equal_fn(const void *k1, const void *k2, void *ctx)
k1               1382 tools/lib/bpf/btf.c 	return k1 == k2;
k1               2946 tools/lib/bpf/libbpf.c static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
k1               2948 tools/lib/bpf/libbpf.c 	return k1 == k2;
k1                259 tools/testing/selftests/bpf/test_hashmap.c 	void *k1 = (void *)0, *k2 = (void *)1;
k1                277 tools/testing/selftests/bpf/test_hashmap.c 	err = hashmap__append(map, k1, (void *)1);
k1                280 tools/testing/selftests/bpf/test_hashmap.c 	err = hashmap__append(map, k1, (void *)2);
k1                283 tools/testing/selftests/bpf/test_hashmap.c 	err = hashmap__append(map, k1, (void *)4);
k1                312 tools/testing/selftests/bpf/test_hashmap.c 	hashmap__for_each_key_entry(map, entry, k1) {