tk                279 arch/arm/kernel/vdso.c static bool tk_is_cntvct(const struct timekeeper *tk)
tk                284 arch/arm/kernel/vdso.c 	if (!tk->tkr_mono.clock->archdata.vdso_direct)
tk                308 arch/arm/kernel/vdso.c void update_vsyscall(struct timekeeper *tk)
tk                310 arch/arm/kernel/vdso.c 	struct timespec64 *wtm = &tk->wall_to_monotonic;
tk                321 arch/arm/kernel/vdso.c 	vdso_data->tk_is_cntvct			= tk_is_cntvct(tk);
tk                322 arch/arm/kernel/vdso.c 	vdso_data->xtime_coarse_sec		= tk->xtime_sec;
tk                323 arch/arm/kernel/vdso.c 	vdso_data->xtime_coarse_nsec		= (u32)(tk->tkr_mono.xtime_nsec >>
tk                324 arch/arm/kernel/vdso.c 							tk->tkr_mono.shift);
tk                329 arch/arm/kernel/vdso.c 		vdso_data->cs_cycle_last	= tk->tkr_mono.cycle_last;
tk                330 arch/arm/kernel/vdso.c 		vdso_data->xtime_clock_sec	= tk->xtime_sec;
tk                331 arch/arm/kernel/vdso.c 		vdso_data->xtime_clock_snsec	= tk->tkr_mono.xtime_nsec;
tk                332 arch/arm/kernel/vdso.c 		vdso_data->cs_mult		= tk->tkr_mono.mult;
tk                333 arch/arm/kernel/vdso.c 		vdso_data->cs_shift		= tk->tkr_mono.shift;
tk                334 arch/arm/kernel/vdso.c 		vdso_data->cs_mask		= tk->tkr_mono.mask;
tk                101 arch/arm/xen/enlighten.c 	struct timekeeper *tk = priv;
tk                103 arch/arm/xen/enlighten.c 	now.tv_sec = tk->xtime_sec;
tk                104 arch/arm/xen/enlighten.c 	now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
tk                105 arch/arm/xen/enlighten.c 	system_time = timespec64_add(now, tk->wall_to_monotonic);
tk                 25 arch/arm64/include/asm/vdso/vsyscall.h int __arm64_get_clock_mode(struct timekeeper *tk)
tk                 27 arch/arm64/include/asm/vdso/vsyscall.h 	u32 use_syscall = !tk->tkr_mono.clock->archdata.vdso_direct;
tk                 34 arch/arm64/include/asm/vdso/vsyscall.h void __arm64_update_vsyscall(struct vdso_data *vdata, struct timekeeper *tk)
tk                431 arch/ia64/kernel/time.c void update_vsyscall(struct timekeeper *tk)
tk                436 arch/ia64/kernel/time.c 	fsyscall_gtod_data.clk_mask = tk->tkr_mono.mask;
tk                437 arch/ia64/kernel/time.c 	fsyscall_gtod_data.clk_mult = tk->tkr_mono.mult;
tk                438 arch/ia64/kernel/time.c 	fsyscall_gtod_data.clk_shift = tk->tkr_mono.shift;
tk                439 arch/ia64/kernel/time.c 	fsyscall_gtod_data.clk_fsys_mmio = tk->tkr_mono.clock->archdata.fsys_mmio;
tk                440 arch/ia64/kernel/time.c 	fsyscall_gtod_data.clk_cycle_last = tk->tkr_mono.cycle_last;
tk                442 arch/ia64/kernel/time.c 	fsyscall_gtod_data.wall_time.sec = tk->xtime_sec;
tk                443 arch/ia64/kernel/time.c 	fsyscall_gtod_data.wall_time.snsec = tk->tkr_mono.xtime_nsec;
tk                445 arch/ia64/kernel/time.c 	fsyscall_gtod_data.monotonic_time.sec = tk->xtime_sec
tk                446 arch/ia64/kernel/time.c 					      + tk->wall_to_monotonic.tv_sec;
tk                447 arch/ia64/kernel/time.c 	fsyscall_gtod_data.monotonic_time.snsec = tk->tkr_mono.xtime_nsec
tk                448 arch/ia64/kernel/time.c 						+ ((u64)tk->wall_to_monotonic.tv_nsec
tk                449 arch/ia64/kernel/time.c 							<< tk->tkr_mono.shift);
tk                453 arch/ia64/kernel/time.c 					(((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
tk                455 arch/ia64/kernel/time.c 					((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
tk                 23 arch/mips/include/asm/vdso/vsyscall.h int __mips_get_clock_mode(struct timekeeper *tk)
tk                 25 arch/mips/include/asm/vdso/vsyscall.h 	u32 clock_mode = tk->tkr_mono.clock->archdata.vdso_clock_mode;
tk                209 arch/nds32/kernel/vdso.c void update_vsyscall(struct timekeeper *tk)
tk                212 arch/nds32/kernel/vdso.c 	vdso_data->cs_mask = tk->tkr_mono.mask;
tk                213 arch/nds32/kernel/vdso.c 	vdso_data->cs_mult = tk->tkr_mono.mult;
tk                214 arch/nds32/kernel/vdso.c 	vdso_data->cs_shift = tk->tkr_mono.shift;
tk                215 arch/nds32/kernel/vdso.c 	vdso_data->cs_cycle_last = tk->tkr_mono.cycle_last;
tk                216 arch/nds32/kernel/vdso.c 	vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
tk                217 arch/nds32/kernel/vdso.c 	vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
tk                218 arch/nds32/kernel/vdso.c 	vdso_data->xtime_clock_sec = tk->xtime_sec;
tk                219 arch/nds32/kernel/vdso.c 	vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
tk                220 arch/nds32/kernel/vdso.c 	vdso_data->xtime_coarse_sec = tk->xtime_sec;
tk                221 arch/nds32/kernel/vdso.c 	vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
tk                222 arch/nds32/kernel/vdso.c 	    tk->tkr_mono.shift;
tk                868 arch/powerpc/kernel/time.c void update_vsyscall(struct timekeeper *tk)
tk                871 arch/powerpc/kernel/time.c 	struct clocksource *clock = tk->tkr_mono.clock;
tk                872 arch/powerpc/kernel/time.c 	u32 mult = tk->tkr_mono.mult;
tk                873 arch/powerpc/kernel/time.c 	u32 shift = tk->tkr_mono.shift;
tk                874 arch/powerpc/kernel/time.c 	u64 cycle_last = tk->tkr_mono.cycle_last;
tk                881 arch/powerpc/kernel/time.c 	xt.tv_sec = tk->xtime_sec;
tk                882 arch/powerpc/kernel/time.c 	xt.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
tk                918 arch/powerpc/kernel/time.c 	frac_sec = tk->tkr_mono.xtime_nsec << (32 - shift);
tk                926 arch/powerpc/kernel/time.c 	new_stamp_xsec += tk->xtime_sec * XSEC_PER_SEC;
tk                940 arch/powerpc/kernel/time.c 	vdso_data->wtom_clock_sec = tk->wall_to_monotonic.tv_sec;
tk                941 arch/powerpc/kernel/time.c 	vdso_data->wtom_clock_nsec = tk->wall_to_monotonic.tv_nsec;
tk                276 arch/s390/kernel/time.c void update_vsyscall(struct timekeeper *tk)
tk                280 arch/s390/kernel/time.c 	if (tk->tkr_mono.clock != &clocksource_tod)
tk                286 arch/s390/kernel/time.c 	vdso_data->xtime_tod_stamp = tk->tkr_mono.cycle_last;
tk                287 arch/s390/kernel/time.c 	vdso_data->xtime_clock_sec = tk->xtime_sec;
tk                288 arch/s390/kernel/time.c 	vdso_data->xtime_clock_nsec = tk->tkr_mono.xtime_nsec;
tk                290 arch/s390/kernel/time.c 		tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
tk                291 arch/s390/kernel/time.c 	vdso_data->wtom_clock_nsec = tk->tkr_mono.xtime_nsec +
tk                292 arch/s390/kernel/time.c 		+ ((u64) tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
tk                293 arch/s390/kernel/time.c 	nsecps = (u64) NSEC_PER_SEC << tk->tkr_mono.shift;
tk                299 arch/s390/kernel/time.c 	vdso_data->xtime_coarse_sec = tk->xtime_sec;
tk                301 arch/s390/kernel/time.c 		(long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
tk                303 arch/s390/kernel/time.c 		vdso_data->xtime_coarse_sec + tk->wall_to_monotonic.tv_sec;
tk                305 arch/s390/kernel/time.c 		vdso_data->xtime_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
tk                311 arch/s390/kernel/time.c 	vdso_data->tk_mult = tk->tkr_mono.mult;
tk                312 arch/s390/kernel/time.c 	vdso_data->tk_shift = tk->tkr_mono.shift;
tk                 25 arch/sparc/kernel/vdso.c void update_vsyscall(struct timekeeper *tk)
tk                 33 arch/sparc/kernel/vdso.c 	vdata->vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
tk                 34 arch/sparc/kernel/vdso.c 	vdata->clock.cycle_last = tk->tkr_mono.cycle_last;
tk                 35 arch/sparc/kernel/vdso.c 	vdata->clock.mask = tk->tkr_mono.mask;
tk                 36 arch/sparc/kernel/vdso.c 	vdata->clock.mult = tk->tkr_mono.mult;
tk                 37 arch/sparc/kernel/vdso.c 	vdata->clock.shift = tk->tkr_mono.shift;
tk                 39 arch/sparc/kernel/vdso.c 	vdata->wall_time_sec = tk->xtime_sec;
tk                 40 arch/sparc/kernel/vdso.c 	vdata->wall_time_snsec = tk->tkr_mono.xtime_nsec;
tk                 42 arch/sparc/kernel/vdso.c 	vdata->monotonic_time_sec = tk->xtime_sec +
tk                 43 arch/sparc/kernel/vdso.c 				    tk->wall_to_monotonic.tv_sec;
tk                 44 arch/sparc/kernel/vdso.c 	vdata->monotonic_time_snsec = tk->tkr_mono.xtime_nsec +
tk                 45 arch/sparc/kernel/vdso.c 				      (tk->wall_to_monotonic.tv_nsec <<
tk                 46 arch/sparc/kernel/vdso.c 				       tk->tkr_mono.shift);
tk                 49 arch/sparc/kernel/vdso.c 	       (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
tk                 51 arch/sparc/kernel/vdso.c 				((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
tk                 55 arch/sparc/kernel/vdso.c 	vdata->wall_time_coarse_sec = tk->xtime_sec;
tk                 57 arch/sparc/kernel/vdso.c 			(long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
tk                 60 arch/sparc/kernel/vdso.c 		vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
tk                 62 arch/sparc/kernel/vdso.c 		vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
tk                 27 arch/x86/include/asm/vdso/vsyscall.h int __x86_get_clock_mode(struct timekeeper *tk)
tk                 29 arch/x86/include/asm/vdso/vsyscall.h 	int vclock_mode = tk->tkr_mono.clock->archdata.vclock_mode;
tk               1637 arch/x86/kvm/x86.c static void update_pvclock_gtod(struct timekeeper *tk)
tk               1642 arch/x86/kvm/x86.c 	boot_ns = ktime_to_ns(ktime_add(tk->tkr_mono.base, tk->offs_boot));
tk               1647 arch/x86/kvm/x86.c 	vdata->clock.vclock_mode	= tk->tkr_mono.clock->archdata.vclock_mode;
tk               1648 arch/x86/kvm/x86.c 	vdata->clock.cycle_last		= tk->tkr_mono.cycle_last;
tk               1649 arch/x86/kvm/x86.c 	vdata->clock.mask		= tk->tkr_mono.mask;
tk               1650 arch/x86/kvm/x86.c 	vdata->clock.mult		= tk->tkr_mono.mult;
tk               1651 arch/x86/kvm/x86.c 	vdata->clock.shift		= tk->tkr_mono.shift;
tk               1654 arch/x86/kvm/x86.c 	vdata->nsec_base		= tk->tkr_mono.xtime_nsec;
tk               1656 arch/x86/kvm/x86.c 	vdata->wall_time_sec            = tk->xtime_sec;
tk               7222 arch/x86/kvm/x86.c 	struct timekeeper *tk = priv;
tk               7224 arch/x86/kvm/x86.c 	update_pvclock_gtod(tk);
tk                 96 arch/x86/xen/time.c 	struct timekeeper *tk = priv;
tk                100 arch/x86/xen/time.c 	now.tv_sec = tk->xtime_sec;
tk                101 arch/x86/xen/time.c 	now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
tk                303 crypto/asymmetric_keys/asym_tpm.c 	struct tpm_key *tk = asymmetric_key->payload.data[asym_crypto];
tk                305 crypto/asymmetric_keys/asym_tpm.c 	if (!tk)
tk                313 crypto/asymmetric_keys/asym_tpm.c 	struct tpm_key *tk = payload0;
tk                315 crypto/asymmetric_keys/asym_tpm.c 	if (!tk)
tk                318 crypto/asymmetric_keys/asym_tpm.c 	kfree(tk->blob);
tk                319 crypto/asymmetric_keys/asym_tpm.c 	tk->blob_len = 0;
tk                321 crypto/asymmetric_keys/asym_tpm.c 	kfree(tk);
tk                411 crypto/asymmetric_keys/asym_tpm.c 	struct tpm_key *tk = params->key->payload.data[asym_crypto];
tk                428 crypto/asymmetric_keys/asym_tpm.c 	der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len,
tk                437 crypto/asymmetric_keys/asym_tpm.c 	info->key_size = tk->key_len;
tk                438 crypto/asymmetric_keys/asym_tpm.c 	info->max_data_size = tk->key_len / 8;
tk                441 crypto/asymmetric_keys/asym_tpm.c 	info->max_dec_size = tk->key_len / 8;
tk                459 crypto/asymmetric_keys/asym_tpm.c static int tpm_key_encrypt(struct tpm_key *tk,
tk                482 crypto/asymmetric_keys/asym_tpm.c 	der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len,
tk                519 crypto/asymmetric_keys/asym_tpm.c static int tpm_key_decrypt(struct tpm_key *tk,
tk                545 crypto/asymmetric_keys/asym_tpm.c 				tk->blob, tk->blob_len, &keyhandle);
tk                643 crypto/asymmetric_keys/asym_tpm.c static int tpm_key_sign(struct tpm_key *tk,
tk                680 crypto/asymmetric_keys/asym_tpm.c 	if (in_len > tk->key_len / 8 - 11) {
tk                694 crypto/asymmetric_keys/asym_tpm.c 			 tk->blob, tk->blob_len, &keyhandle);
tk                724 crypto/asymmetric_keys/asym_tpm.c 	struct tpm_key *tk = params->key->payload.data[asym_crypto];
tk                730 crypto/asymmetric_keys/asym_tpm.c 		ret = tpm_key_encrypt(tk, params, in, out);
tk                733 crypto/asymmetric_keys/asym_tpm.c 		ret = tpm_key_decrypt(tk, params, in, out);
tk                736 crypto/asymmetric_keys/asym_tpm.c 		ret = tpm_key_sign(tk, params, in, out);
tk                751 crypto/asymmetric_keys/asym_tpm.c 	const struct tpm_key *tk = key->payload.data[asym_crypto];
tk                763 crypto/asymmetric_keys/asym_tpm.c 	BUG_ON(!tk);
tk                778 crypto/asymmetric_keys/asym_tpm.c 	der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len,
tk                830 crypto/asymmetric_keys/asym_tpm.c static int extract_key_parameters(struct tpm_key *tk)
tk                832 crypto/asymmetric_keys/asym_tpm.c 	const void *cur = tk->blob;
tk                833 crypto/asymmetric_keys/asym_tpm.c 	uint32_t len = tk->blob_len;
tk                907 crypto/asymmetric_keys/asym_tpm.c 	tk->key_len = key_len;
tk                908 crypto/asymmetric_keys/asym_tpm.c 	tk->pub_key = pub_key;
tk                909 crypto/asymmetric_keys/asym_tpm.c 	tk->pub_key_len = sz;
tk                918 crypto/asymmetric_keys/asym_tpm.c 	struct tpm_key *tk;
tk                931 crypto/asymmetric_keys/asym_tpm.c 	tk = kzalloc(sizeof(struct tpm_key), GFP_KERNEL);
tk                932 crypto/asymmetric_keys/asym_tpm.c 	if (!tk)
tk                935 crypto/asymmetric_keys/asym_tpm.c 	tk->blob = kmemdup(blob, blob_len, GFP_KERNEL);
tk                936 crypto/asymmetric_keys/asym_tpm.c 	if (!tk->blob)
tk                939 crypto/asymmetric_keys/asym_tpm.c 	tk->blob_len = blob_len;
tk                941 crypto/asymmetric_keys/asym_tpm.c 	r = extract_key_parameters(tk);
tk                945 crypto/asymmetric_keys/asym_tpm.c 	return tk;
tk                948 crypto/asymmetric_keys/asym_tpm.c 	kfree(tk->blob);
tk                949 crypto/asymmetric_keys/asym_tpm.c 	tk->blob_len = 0;
tk                951 crypto/asymmetric_keys/asym_tpm.c 	kfree(tk);
tk                 58 crypto/asymmetric_keys/tpm_parser.c 	struct tpm_key *tk;
tk                 67 crypto/asymmetric_keys/tpm_parser.c 	tk = tpm_parse(prep->data, prep->datalen);
tk                 69 crypto/asymmetric_keys/tpm_parser.c 	if (IS_ERR(tk))
tk                 70 crypto/asymmetric_keys/tpm_parser.c 		return PTR_ERR(tk);
tk                 76 crypto/asymmetric_keys/tpm_parser.c 	prep->payload.data[asym_crypto] = tk;
tk                 29 drivers/input/keyboard/dlink-dir685-touchkeys.c 	struct dir685_touchkeys *tk = data;
tk                 30 drivers/input/keyboard/dlink-dir685-touchkeys.c 	const int num_bits = min_t(int, ARRAY_SIZE(tk->codes), 16);
tk                 38 drivers/input/keyboard/dlink-dir685-touchkeys.c 	err = i2c_master_recv(tk->client, buf, sizeof(buf));
tk                 40 drivers/input/keyboard/dlink-dir685-touchkeys.c 		dev_err(tk->dev, "short read %d\n", err);
tk                 44 drivers/input/keyboard/dlink-dir685-touchkeys.c 	dev_dbg(tk->dev, "IN: %*ph\n", (int)sizeof(buf), buf);
tk                 48 drivers/input/keyboard/dlink-dir685-touchkeys.c 	changed = tk->cur_key ^ key;
tk                 50 drivers/input/keyboard/dlink-dir685-touchkeys.c 		dev_dbg(tk->dev, "key %d is %s\n", i,
tk                 52 drivers/input/keyboard/dlink-dir685-touchkeys.c 		input_report_key(tk->input, tk->codes[i], test_bit(i, &key));
tk                 56 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->cur_key = key;
tk                 57 drivers/input/keyboard/dlink-dir685-touchkeys.c 	input_sync(tk->input);
tk                 65 drivers/input/keyboard/dlink-dir685-touchkeys.c 	struct dir685_touchkeys *tk;
tk                 71 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk = devm_kzalloc(&client->dev, sizeof(*tk), GFP_KERNEL);
tk                 72 drivers/input/keyboard/dlink-dir685-touchkeys.c 	if (!tk)
tk                 75 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->input = devm_input_allocate_device(dev);
tk                 76 drivers/input/keyboard/dlink-dir685-touchkeys.c 	if (!tk->input)
tk                 79 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->client = client;
tk                 80 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->dev = dev;
tk                 82 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->input->keycodesize = sizeof(u16);
tk                 83 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->input->keycodemax = ARRAY_SIZE(tk->codes);
tk                 84 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->input->keycode = tk->codes;
tk                 85 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->codes[0] = KEY_UP;
tk                 86 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->codes[1] = KEY_DOWN;
tk                 87 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->codes[2] = KEY_LEFT;
tk                 88 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->codes[3] = KEY_RIGHT;
tk                 89 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->codes[4] = KEY_ENTER;
tk                 90 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->codes[5] = KEY_WPS_BUTTON;
tk                 95 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->codes[6] = KEY_RESERVED;
tk                 97 drivers/input/keyboard/dlink-dir685-touchkeys.c 	__set_bit(EV_KEY, tk->input->evbit);
tk                 98 drivers/input/keyboard/dlink-dir685-touchkeys.c 	for (i = 0; i < ARRAY_SIZE(tk->codes); i++)
tk                 99 drivers/input/keyboard/dlink-dir685-touchkeys.c 		__set_bit(tk->codes[i], tk->input->keybit);
tk                100 drivers/input/keyboard/dlink-dir685-touchkeys.c 	__clear_bit(KEY_RESERVED, tk->input->keybit);
tk                102 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->input->name = "D-Link DIR-685 touchkeys";
tk                103 drivers/input/keyboard/dlink-dir685-touchkeys.c 	tk->input->id.bustype = BUS_I2C;
tk                105 drivers/input/keyboard/dlink-dir685-touchkeys.c 	err = input_register_device(tk->input);
tk                112 drivers/input/keyboard/dlink-dir685-touchkeys.c 		dev_warn(tk->dev, "error setting brightness level\n");
tk                121 drivers/input/keyboard/dlink-dir685-touchkeys.c 					"dir685-tk", tk);
tk                385 drivers/staging/rtl8188eu/core/rtw_security.c #define  TK16(N)     Mk16(tk[2*(N)+1], tk[2*(N)])
tk                487 drivers/staging/rtl8188eu/core/rtw_security.c static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
tk                532 drivers/staging/rtl8188eu/core/rtw_security.c static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
tk                372 drivers/staging/rtl8712/rtl871x_security.c #define  TK16(N)  Mk16(tk[2 * (N) + 1], tk[2 * (N)])
tk                474 drivers/staging/rtl8712/rtl871x_security.c static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
tk                519 drivers/staging/rtl8712/rtl871x_security.c static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
tk                451 drivers/staging/rtl8723bs/core/rtw_security.c #define  TK16(N)     Mk16(tk[2*(N)+1], tk[2*(N)])
tk                555 drivers/staging/rtl8723bs/core/rtw_security.c static void phase1(u16 *p1k, const u8 *tk, const u8 *ta, u32 iv32)
tk                603 drivers/staging/rtl8723bs/core/rtw_security.c static void phase2(u8 *rc4key, const u8 *tk, const u16 *p1k, u16 iv16)
tk                 22 include/asm-generic/vdso/vsyscall.h static __always_inline int __arch_get_clock_mode(struct timekeeper *tk)
tk                 30 include/asm-generic/vdso/vsyscall.h 						   struct timekeeper *tk)
tk                365 include/linux/kprobes.h void kprobe_flush_task(struct task_struct *tk);
tk                422 include/linux/kprobes.h static inline void kprobe_flush_task(struct task_struct *tk)
tk                143 include/linux/timekeeper_internal.h extern void update_vsyscall(struct timekeeper *tk);
tk                148 include/linux/timekeeper_internal.h static inline void update_vsyscall(struct timekeeper *tk)
tk               1111 include/net/bluetooth/hci_core.h 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand);
tk               1245 kernel/kprobes.c void kprobe_flush_task(struct task_struct *tk)
tk               1257 kernel/kprobes.c 	hash = hash_ptr(tk, KPROBE_HASH_BITS);
tk               1261 kernel/kprobes.c 		if (ri->task == tk)
tk                 95 kernel/time/timekeeping.c static inline void tk_normalize_xtime(struct timekeeper *tk)
tk                 97 kernel/time/timekeeping.c 	while (tk->tkr_mono.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_mono.shift)) {
tk                 98 kernel/time/timekeeping.c 		tk->tkr_mono.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
tk                 99 kernel/time/timekeeping.c 		tk->xtime_sec++;
tk                101 kernel/time/timekeeping.c 	while (tk->tkr_raw.xtime_nsec >= ((u64)NSEC_PER_SEC << tk->tkr_raw.shift)) {
tk                102 kernel/time/timekeeping.c 		tk->tkr_raw.xtime_nsec -= (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
tk                103 kernel/time/timekeeping.c 		tk->raw_sec++;
tk                107 kernel/time/timekeeping.c static inline struct timespec64 tk_xtime(const struct timekeeper *tk)
tk                111 kernel/time/timekeeping.c 	ts.tv_sec = tk->xtime_sec;
tk                112 kernel/time/timekeeping.c 	ts.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
tk                116 kernel/time/timekeeping.c static void tk_set_xtime(struct timekeeper *tk, const struct timespec64 *ts)
tk                118 kernel/time/timekeeping.c 	tk->xtime_sec = ts->tv_sec;
tk                119 kernel/time/timekeeping.c 	tk->tkr_mono.xtime_nsec = (u64)ts->tv_nsec << tk->tkr_mono.shift;
tk                122 kernel/time/timekeeping.c static void tk_xtime_add(struct timekeeper *tk, const struct timespec64 *ts)
tk                124 kernel/time/timekeeping.c 	tk->xtime_sec += ts->tv_sec;
tk                125 kernel/time/timekeeping.c 	tk->tkr_mono.xtime_nsec += (u64)ts->tv_nsec << tk->tkr_mono.shift;
tk                126 kernel/time/timekeeping.c 	tk_normalize_xtime(tk);
tk                129 kernel/time/timekeeping.c static void tk_set_wall_to_mono(struct timekeeper *tk, struct timespec64 wtm)
tk                137 kernel/time/timekeeping.c 	set_normalized_timespec64(&tmp, -tk->wall_to_monotonic.tv_sec,
tk                138 kernel/time/timekeeping.c 					-tk->wall_to_monotonic.tv_nsec);
tk                139 kernel/time/timekeeping.c 	WARN_ON_ONCE(tk->offs_real != timespec64_to_ktime(tmp));
tk                140 kernel/time/timekeeping.c 	tk->wall_to_monotonic = wtm;
tk                142 kernel/time/timekeeping.c 	tk->offs_real = timespec64_to_ktime(tmp);
tk                143 kernel/time/timekeeping.c 	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tk->tai_offset, 0));
tk                146 kernel/time/timekeeping.c static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
tk                148 kernel/time/timekeeping.c 	tk->offs_boot = ktime_add(tk->offs_boot, delta);
tk                153 kernel/time/timekeeping.c 	tk->monotonic_to_boot = ktime_to_timespec64(tk->offs_boot);
tk                179 kernel/time/timekeeping.c static void timekeeping_check_update(struct timekeeper *tk, u64 offset)
tk                182 kernel/time/timekeeping.c 	u64 max_cycles = tk->tkr_mono.clock->max_cycles;
tk                183 kernel/time/timekeeping.c 	const char *name = tk->tkr_mono.clock->name;
tk                197 kernel/time/timekeeping.c 	if (tk->underflow_seen) {
tk                198 kernel/time/timekeeping.c 		if (jiffies - tk->last_warning > WARNING_FREQ) {
tk                202 kernel/time/timekeeping.c 			tk->last_warning = jiffies;
tk                204 kernel/time/timekeeping.c 		tk->underflow_seen = 0;
tk                207 kernel/time/timekeeping.c 	if (tk->overflow_seen) {
tk                208 kernel/time/timekeeping.c 		if (jiffies - tk->last_warning > WARNING_FREQ) {
tk                212 kernel/time/timekeeping.c 			tk->last_warning = jiffies;
tk                214 kernel/time/timekeeping.c 		tk->overflow_seen = 0;
tk                220 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                246 kernel/time/timekeeping.c 		tk->underflow_seen = 1;
tk                252 kernel/time/timekeeping.c 		tk->overflow_seen = 1;
tk                259 kernel/time/timekeeping.c static inline void timekeeping_check_update(struct timekeeper *tk, u64 offset)
tk                287 kernel/time/timekeeping.c static void tk_setup_internals(struct timekeeper *tk, struct clocksource *clock)
tk                293 kernel/time/timekeeping.c 	++tk->cs_was_changed_seq;
tk                294 kernel/time/timekeeping.c 	old_clock = tk->tkr_mono.clock;
tk                295 kernel/time/timekeeping.c 	tk->tkr_mono.clock = clock;
tk                296 kernel/time/timekeeping.c 	tk->tkr_mono.mask = clock->mask;
tk                297 kernel/time/timekeeping.c 	tk->tkr_mono.cycle_last = tk_clock_read(&tk->tkr_mono);
tk                299 kernel/time/timekeeping.c 	tk->tkr_raw.clock = clock;
tk                300 kernel/time/timekeeping.c 	tk->tkr_raw.mask = clock->mask;
tk                301 kernel/time/timekeeping.c 	tk->tkr_raw.cycle_last = tk->tkr_mono.cycle_last;
tk                313 kernel/time/timekeeping.c 	tk->cycle_interval = interval;
tk                316 kernel/time/timekeeping.c 	tk->xtime_interval = interval * clock->mult;
tk                317 kernel/time/timekeeping.c 	tk->xtime_remainder = ntpinterval - tk->xtime_interval;
tk                318 kernel/time/timekeeping.c 	tk->raw_interval = interval * clock->mult;
tk                324 kernel/time/timekeeping.c 			tk->tkr_mono.xtime_nsec >>= -shift_change;
tk                325 kernel/time/timekeeping.c 			tk->tkr_raw.xtime_nsec >>= -shift_change;
tk                327 kernel/time/timekeeping.c 			tk->tkr_mono.xtime_nsec <<= shift_change;
tk                328 kernel/time/timekeeping.c 			tk->tkr_raw.xtime_nsec <<= shift_change;
tk                332 kernel/time/timekeeping.c 	tk->tkr_mono.shift = clock->shift;
tk                333 kernel/time/timekeeping.c 	tk->tkr_raw.shift = clock->shift;
tk                335 kernel/time/timekeeping.c 	tk->ntp_error = 0;
tk                336 kernel/time/timekeeping.c 	tk->ntp_error_shift = NTP_SCALE_SHIFT - clock->shift;
tk                337 kernel/time/timekeeping.c 	tk->ntp_tick = ntpinterval << tk->ntp_error_shift;
tk                344 kernel/time/timekeeping.c 	tk->tkr_mono.mult = clock->mult;
tk                345 kernel/time/timekeeping.c 	tk->tkr_raw.mult = clock->mult;
tk                346 kernel/time/timekeeping.c 	tk->ntp_err_mult = 0;
tk                347 kernel/time/timekeeping.c 	tk->skip_second_overflow = 0;
tk                507 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                509 kernel/time/timekeeping.c 	return (ktime_get_mono_fast_ns() + ktime_to_ns(tk->offs_boot));
tk                557 kernel/time/timekeeping.c static void halt_fast_timekeeper(const struct timekeeper *tk)
tk                560 kernel/time/timekeeping.c 	const struct tk_read_base *tkr = &tk->tkr_mono;
tk                565 kernel/time/timekeeping.c 	tkr_dummy.base_real = tkr->base + tk->offs_real;
tk                568 kernel/time/timekeeping.c 	tkr = &tk->tkr_raw;
tk                576 kernel/time/timekeeping.c static void update_pvclock_gtod(struct timekeeper *tk, bool was_set)
tk                578 kernel/time/timekeeping.c 	raw_notifier_call_chain(&pvclock_gtod_chain, was_set, tk);
tk                586 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                592 kernel/time/timekeeping.c 	update_pvclock_gtod(tk, true);
tk                619 kernel/time/timekeeping.c static inline void tk_update_leap_state(struct timekeeper *tk)
tk                621 kernel/time/timekeeping.c 	tk->next_leap_ktime = ntp_get_next_leap();
tk                622 kernel/time/timekeeping.c 	if (tk->next_leap_ktime != KTIME_MAX)
tk                624 kernel/time/timekeeping.c 		tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
tk                630 kernel/time/timekeeping.c static inline void tk_update_ktime_data(struct timekeeper *tk)
tk                642 kernel/time/timekeeping.c 	seconds = (u64)(tk->xtime_sec + tk->wall_to_monotonic.tv_sec);
tk                643 kernel/time/timekeeping.c 	nsec = (u32) tk->wall_to_monotonic.tv_nsec;
tk                644 kernel/time/timekeeping.c 	tk->tkr_mono.base = ns_to_ktime(seconds * NSEC_PER_SEC + nsec);
tk                651 kernel/time/timekeeping.c 	nsec += (u32)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift);
tk                654 kernel/time/timekeeping.c 	tk->ktime_sec = seconds;
tk                657 kernel/time/timekeeping.c 	tk->tkr_raw.base = ns_to_ktime(tk->raw_sec * NSEC_PER_SEC);
tk                661 kernel/time/timekeeping.c static void timekeeping_update(struct timekeeper *tk, unsigned int action)
tk                664 kernel/time/timekeeping.c 		tk->ntp_error = 0;
tk                668 kernel/time/timekeeping.c 	tk_update_leap_state(tk);
tk                669 kernel/time/timekeeping.c 	tk_update_ktime_data(tk);
tk                671 kernel/time/timekeeping.c 	update_vsyscall(tk);
tk                672 kernel/time/timekeeping.c 	update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
tk                674 kernel/time/timekeeping.c 	tk->tkr_mono.base_real = tk->tkr_mono.base + tk->offs_real;
tk                675 kernel/time/timekeeping.c 	update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
tk                676 kernel/time/timekeeping.c 	update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
tk                679 kernel/time/timekeeping.c 		tk->clock_was_set_seq++;
tk                697 kernel/time/timekeeping.c static void timekeeping_forward_now(struct timekeeper *tk)
tk                701 kernel/time/timekeeping.c 	cycle_now = tk_clock_read(&tk->tkr_mono);
tk                702 kernel/time/timekeeping.c 	delta = clocksource_delta(cycle_now, tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
tk                703 kernel/time/timekeeping.c 	tk->tkr_mono.cycle_last = cycle_now;
tk                704 kernel/time/timekeeping.c 	tk->tkr_raw.cycle_last  = cycle_now;
tk                706 kernel/time/timekeeping.c 	tk->tkr_mono.xtime_nsec += delta * tk->tkr_mono.mult;
tk                709 kernel/time/timekeeping.c 	tk->tkr_mono.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_mono.shift;
tk                712 kernel/time/timekeeping.c 	tk->tkr_raw.xtime_nsec += delta * tk->tkr_raw.mult;
tk                715 kernel/time/timekeeping.c 	tk->tkr_raw.xtime_nsec += (u64)arch_gettimeoffset() << tk->tkr_raw.shift;
tk                717 kernel/time/timekeeping.c 	tk_normalize_xtime(tk);
tk                728 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                737 kernel/time/timekeeping.c 		ts->tv_sec = tk->xtime_sec;
tk                738 kernel/time/timekeeping.c 		nsecs = timekeeping_get_ns(&tk->tkr_mono);
tk                749 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                758 kernel/time/timekeeping.c 		base = tk->tkr_mono.base;
tk                759 kernel/time/timekeeping.c 		nsecs = timekeeping_get_ns(&tk->tkr_mono);
tk                769 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                777 kernel/time/timekeeping.c 		nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
tk                792 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                801 kernel/time/timekeeping.c 		base = ktime_add(tk->tkr_mono.base, *offset);
tk                802 kernel/time/timekeeping.c 		nsecs = timekeeping_get_ns(&tk->tkr_mono);
tk                813 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                822 kernel/time/timekeeping.c 		base = ktime_add(tk->tkr_mono.base, *offset);
tk                823 kernel/time/timekeeping.c 		nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
tk                856 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                863 kernel/time/timekeeping.c 		base = tk->tkr_raw.base;
tk                864 kernel/time/timekeeping.c 		nsecs = timekeeping_get_ns(&tk->tkr_raw);
tk                882 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                891 kernel/time/timekeeping.c 		ts->tv_sec = tk->xtime_sec;
tk                892 kernel/time/timekeeping.c 		nsec = timekeeping_get_ns(&tk->tkr_mono);
tk                893 kernel/time/timekeeping.c 		tomono = tk->wall_to_monotonic;
tk                914 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                917 kernel/time/timekeeping.c 	return tk->ktime_sec;
tk                934 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                939 kernel/time/timekeeping.c 		return tk->xtime_sec;
tk                943 kernel/time/timekeeping.c 		seconds = tk->xtime_sec;
tk                958 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                960 kernel/time/timekeeping.c 	return tk->xtime_sec;
tk                969 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk                981 kernel/time/timekeeping.c 		now = tk_clock_read(&tk->tkr_mono);
tk                982 kernel/time/timekeeping.c 		systime_snapshot->cs_was_changed_seq = tk->cs_was_changed_seq;
tk                983 kernel/time/timekeeping.c 		systime_snapshot->clock_was_set_seq = tk->clock_was_set_seq;
tk                984 kernel/time/timekeeping.c 		base_real = ktime_add(tk->tkr_mono.base,
tk                986 kernel/time/timekeeping.c 		base_raw = tk->tkr_raw.base;
tk                987 kernel/time/timekeeping.c 		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono, now);
tk                988 kernel/time/timekeeping.c 		nsec_raw  = timekeeping_cycles_to_ns(&tk->tkr_raw, now);
tk               1038 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1072 kernel/time/timekeeping.c 			(corr_raw, tk->tkr_mono.mult, tk->tkr_raw.mult);
tk               1126 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1151 kernel/time/timekeeping.c 		if (tk->tkr_mono.clock != system_counterval.cs)
tk               1159 kernel/time/timekeeping.c 		now = tk_clock_read(&tk->tkr_mono);
tk               1160 kernel/time/timekeeping.c 		interval_start = tk->tkr_mono.cycle_last;
tk               1162 kernel/time/timekeeping.c 			clock_was_set_seq = tk->clock_was_set_seq;
tk               1163 kernel/time/timekeeping.c 			cs_was_changed_seq = tk->cs_was_changed_seq;
tk               1170 kernel/time/timekeeping.c 		base_real = ktime_add(tk->tkr_mono.base,
tk               1172 kernel/time/timekeeping.c 		base_raw = tk->tkr_raw.base;
tk               1174 kernel/time/timekeeping.c 		nsec_real = timekeeping_cycles_to_ns(&tk->tkr_mono,
tk               1176 kernel/time/timekeeping.c 		nsec_raw = timekeeping_cycles_to_ns(&tk->tkr_raw,
tk               1226 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1237 kernel/time/timekeeping.c 	timekeeping_forward_now(tk);
tk               1239 kernel/time/timekeeping.c 	xt = tk_xtime(tk);
tk               1243 kernel/time/timekeeping.c 	if (timespec64_compare(&tk->wall_to_monotonic, &ts_delta) > 0) {
tk               1248 kernel/time/timekeeping.c 	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, ts_delta));
tk               1250 kernel/time/timekeeping.c 	tk_set_xtime(tk, ts);
tk               1252 kernel/time/timekeeping.c 	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
tk               1275 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1286 kernel/time/timekeeping.c 	timekeeping_forward_now(tk);
tk               1289 kernel/time/timekeeping.c 	tmp = timespec64_add(tk_xtime(tk), *ts);
tk               1290 kernel/time/timekeeping.c 	if (timespec64_compare(&tk->wall_to_monotonic, ts) > 0 ||
tk               1296 kernel/time/timekeeping.c 	tk_xtime_add(tk, ts);
tk               1297 kernel/time/timekeeping.c 	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *ts));
tk               1300 kernel/time/timekeeping.c 	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
tk               1349 kernel/time/timekeeping.c static void __timekeeping_set_tai_offset(struct timekeeper *tk, s32 tai_offset)
tk               1351 kernel/time/timekeeping.c 	tk->tai_offset = tai_offset;
tk               1352 kernel/time/timekeeping.c 	tk->offs_tai = ktime_add(tk->offs_real, ktime_set(tai_offset, 0));
tk               1362 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1371 kernel/time/timekeeping.c 	timekeeping_forward_now(tk);
tk               1378 kernel/time/timekeeping.c 			old = tk->tkr_mono.clock;
tk               1379 kernel/time/timekeeping.c 			tk_setup_internals(tk, new);
tk               1387 kernel/time/timekeeping.c 	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
tk               1404 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1406 kernel/time/timekeeping.c 	if (tk->tkr_mono.clock == clock)
tk               1410 kernel/time/timekeeping.c 	return tk->tkr_mono.clock == clock ? 0 : -1;
tk               1421 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1427 kernel/time/timekeeping.c 		ts->tv_sec = tk->raw_sec;
tk               1428 kernel/time/timekeeping.c 		nsecs = timekeeping_get_ns(&tk->tkr_raw);
tk               1443 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1450 kernel/time/timekeeping.c 		ret = tk->tkr_mono.clock->flags & CLOCK_SOURCE_VALID_FOR_HRES;
tk               1462 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1469 kernel/time/timekeeping.c 		ret = tk->tkr_mono.clock->max_idle_ns;
tk               1535 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1564 kernel/time/timekeeping.c 	tk_setup_internals(tk, clock);
tk               1566 kernel/time/timekeeping.c 	tk_set_xtime(tk, &wall_time);
tk               1567 kernel/time/timekeeping.c 	tk->raw_sec = 0;
tk               1569 kernel/time/timekeeping.c 	tk_set_wall_to_mono(tk, wall_to_mono);
tk               1571 kernel/time/timekeeping.c 	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
tk               1587 kernel/time/timekeeping.c static void __timekeeping_inject_sleeptime(struct timekeeper *tk,
tk               1596 kernel/time/timekeeping.c 	tk_xtime_add(tk, delta);
tk               1597 kernel/time/timekeeping.c 	tk_set_wall_to_mono(tk, timespec64_sub(tk->wall_to_monotonic, *delta));
tk               1598 kernel/time/timekeeping.c 	tk_update_sleep_time(tk, timespec64_to_ktime(*delta));
tk               1651 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1659 kernel/time/timekeeping.c 	timekeeping_forward_now(tk);
tk               1661 kernel/time/timekeeping.c 	__timekeeping_inject_sleeptime(tk, delta);
tk               1663 kernel/time/timekeeping.c 	timekeeping_update(tk, TK_CLEAR_NTP | TK_MIRROR | TK_CLOCK_WAS_SET);
tk               1678 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1679 kernel/time/timekeeping.c 	struct clocksource *clock = tk->tkr_mono.clock;
tk               1705 kernel/time/timekeeping.c 	cycle_now = tk_clock_read(&tk->tkr_mono);
tk               1717 kernel/time/timekeeping.c 		__timekeeping_inject_sleeptime(tk, &ts_delta);
tk               1721 kernel/time/timekeeping.c 	tk->tkr_mono.cycle_last = cycle_now;
tk               1722 kernel/time/timekeeping.c 	tk->tkr_raw.cycle_last  = cycle_now;
tk               1724 kernel/time/timekeeping.c 	tk->ntp_error = 0;
tk               1726 kernel/time/timekeeping.c 	timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
tk               1738 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               1759 kernel/time/timekeeping.c 	timekeeping_forward_now(tk);
tk               1767 kernel/time/timekeeping.c 	curr_clock = tk->tkr_mono.clock;
tk               1768 kernel/time/timekeeping.c 	cycle_now = tk->tkr_mono.cycle_last;
tk               1778 kernel/time/timekeeping.c 		delta = timespec64_sub(tk_xtime(tk), timekeeping_suspend_time);
tk               1793 kernel/time/timekeeping.c 	timekeeping_update(tk, TK_MIRROR);
tk               1794 kernel/time/timekeeping.c 	halt_fast_timekeeper(tk);
tk               1821 kernel/time/timekeeping.c static __always_inline void timekeeping_apply_adjustment(struct timekeeper *tk,
tk               1825 kernel/time/timekeeping.c 	s64 interval = tk->cycle_interval;
tk               1884 kernel/time/timekeeping.c 	if ((mult_adj > 0) && (tk->tkr_mono.mult + mult_adj < mult_adj)) {
tk               1890 kernel/time/timekeeping.c 	tk->tkr_mono.mult += mult_adj;
tk               1891 kernel/time/timekeeping.c 	tk->xtime_interval += interval;
tk               1892 kernel/time/timekeeping.c 	tk->tkr_mono.xtime_nsec -= offset;
tk               1899 kernel/time/timekeeping.c static void timekeeping_adjust(struct timekeeper *tk, s64 offset)
tk               1907 kernel/time/timekeeping.c 	if (likely(tk->ntp_tick == ntp_tick_length())) {
tk               1908 kernel/time/timekeeping.c 		mult = tk->tkr_mono.mult - tk->ntp_err_mult;
tk               1910 kernel/time/timekeeping.c 		tk->ntp_tick = ntp_tick_length();
tk               1911 kernel/time/timekeeping.c 		mult = div64_u64((tk->ntp_tick >> tk->ntp_error_shift) -
tk               1912 kernel/time/timekeeping.c 				 tk->xtime_remainder, tk->cycle_interval);
tk               1921 kernel/time/timekeeping.c 	tk->ntp_err_mult = tk->ntp_error > 0 ? 1 : 0;
tk               1922 kernel/time/timekeeping.c 	mult += tk->ntp_err_mult;
tk               1924 kernel/time/timekeeping.c 	timekeeping_apply_adjustment(tk, offset, mult - tk->tkr_mono.mult);
tk               1926 kernel/time/timekeeping.c 	if (unlikely(tk->tkr_mono.clock->maxadj &&
tk               1927 kernel/time/timekeeping.c 		(abs(tk->tkr_mono.mult - tk->tkr_mono.clock->mult)
tk               1928 kernel/time/timekeeping.c 			> tk->tkr_mono.clock->maxadj))) {
tk               1931 kernel/time/timekeeping.c 			tk->tkr_mono.clock->name, (long)tk->tkr_mono.mult,
tk               1932 kernel/time/timekeeping.c 			(long)tk->tkr_mono.clock->mult + tk->tkr_mono.clock->maxadj);
tk               1945 kernel/time/timekeeping.c 	if (unlikely((s64)tk->tkr_mono.xtime_nsec < 0)) {
tk               1946 kernel/time/timekeeping.c 		tk->tkr_mono.xtime_nsec += (u64)NSEC_PER_SEC <<
tk               1947 kernel/time/timekeeping.c 							tk->tkr_mono.shift;
tk               1948 kernel/time/timekeeping.c 		tk->xtime_sec--;
tk               1949 kernel/time/timekeeping.c 		tk->skip_second_overflow = 1;
tk               1961 kernel/time/timekeeping.c static inline unsigned int accumulate_nsecs_to_secs(struct timekeeper *tk)
tk               1963 kernel/time/timekeeping.c 	u64 nsecps = (u64)NSEC_PER_SEC << tk->tkr_mono.shift;
tk               1966 kernel/time/timekeeping.c 	while (tk->tkr_mono.xtime_nsec >= nsecps) {
tk               1969 kernel/time/timekeeping.c 		tk->tkr_mono.xtime_nsec -= nsecps;
tk               1970 kernel/time/timekeeping.c 		tk->xtime_sec++;
tk               1976 kernel/time/timekeeping.c 		if (unlikely(tk->skip_second_overflow)) {
tk               1977 kernel/time/timekeeping.c 			tk->skip_second_overflow = 0;
tk               1982 kernel/time/timekeeping.c 		leap = second_overflow(tk->xtime_sec);
tk               1986 kernel/time/timekeeping.c 			tk->xtime_sec += leap;
tk               1990 kernel/time/timekeeping.c 			tk_set_wall_to_mono(tk,
tk               1991 kernel/time/timekeeping.c 				timespec64_sub(tk->wall_to_monotonic, ts));
tk               1993 kernel/time/timekeeping.c 			__timekeeping_set_tai_offset(tk, tk->tai_offset - leap);
tk               2010 kernel/time/timekeeping.c static u64 logarithmic_accumulation(struct timekeeper *tk, u64 offset,
tk               2013 kernel/time/timekeeping.c 	u64 interval = tk->cycle_interval << shift;
tk               2022 kernel/time/timekeeping.c 	tk->tkr_mono.cycle_last += interval;
tk               2023 kernel/time/timekeeping.c 	tk->tkr_raw.cycle_last  += interval;
tk               2025 kernel/time/timekeeping.c 	tk->tkr_mono.xtime_nsec += tk->xtime_interval << shift;
tk               2026 kernel/time/timekeeping.c 	*clock_set |= accumulate_nsecs_to_secs(tk);
tk               2029 kernel/time/timekeeping.c 	tk->tkr_raw.xtime_nsec += tk->raw_interval << shift;
tk               2030 kernel/time/timekeeping.c 	snsec_per_sec = (u64)NSEC_PER_SEC << tk->tkr_raw.shift;
tk               2031 kernel/time/timekeeping.c 	while (tk->tkr_raw.xtime_nsec >= snsec_per_sec) {
tk               2032 kernel/time/timekeeping.c 		tk->tkr_raw.xtime_nsec -= snsec_per_sec;
tk               2033 kernel/time/timekeeping.c 		tk->raw_sec++;
tk               2037 kernel/time/timekeeping.c 	tk->ntp_error += tk->ntp_tick << shift;
tk               2038 kernel/time/timekeeping.c 	tk->ntp_error -= (tk->xtime_interval + tk->xtime_remainder) <<
tk               2039 kernel/time/timekeeping.c 						(tk->ntp_error_shift + shift);
tk               2051 kernel/time/timekeeping.c 	struct timekeeper *tk = &shadow_timekeeper;
tk               2069 kernel/time/timekeeping.c 	offset = clocksource_delta(tk_clock_read(&tk->tkr_mono),
tk               2070 kernel/time/timekeeping.c 				   tk->tkr_mono.cycle_last, tk->tkr_mono.mask);
tk               2078 kernel/time/timekeeping.c 	timekeeping_check_update(tk, offset);
tk               2088 kernel/time/timekeeping.c 	shift = ilog2(offset) - ilog2(tk->cycle_interval);
tk               2093 kernel/time/timekeeping.c 	while (offset >= tk->cycle_interval) {
tk               2094 kernel/time/timekeeping.c 		offset = logarithmic_accumulation(tk, offset, shift,
tk               2096 kernel/time/timekeeping.c 		if (offset < tk->cycle_interval<<shift)
tk               2101 kernel/time/timekeeping.c 	timekeeping_adjust(tk, offset);
tk               2107 kernel/time/timekeeping.c 	clock_set |= accumulate_nsecs_to_secs(tk);
tk               2120 kernel/time/timekeeping.c 	timekeeping_update(tk, clock_set);
tk               2121 kernel/time/timekeeping.c 	memcpy(real_tk, tk, sizeof(*tk));
tk               2153 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               2154 kernel/time/timekeeping.c 	ktime_t t = ktime_sub(tk->offs_real, tk->offs_boot);
tk               2162 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               2168 kernel/time/timekeeping.c 		*ts = tk_xtime(tk);
tk               2175 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               2182 kernel/time/timekeeping.c 		now = tk_xtime(tk);
tk               2183 kernel/time/timekeeping.c 		mono = tk->wall_to_monotonic;
tk               2216 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               2224 kernel/time/timekeeping.c 		base = tk->tkr_mono.base;
tk               2225 kernel/time/timekeeping.c 		nsecs = timekeeping_get_ns(&tk->tkr_mono);
tk               2228 kernel/time/timekeeping.c 		if (*cwsseq != tk->clock_was_set_seq) {
tk               2229 kernel/time/timekeeping.c 			*cwsseq = tk->clock_was_set_seq;
tk               2230 kernel/time/timekeeping.c 			*offs_real = tk->offs_real;
tk               2231 kernel/time/timekeeping.c 			*offs_boot = tk->offs_boot;
tk               2232 kernel/time/timekeeping.c 			*offs_tai = tk->offs_tai;
tk               2236 kernel/time/timekeeping.c 		if (unlikely(base >= tk->next_leap_ktime))
tk               2237 kernel/time/timekeeping.c 			*offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
tk               2315 kernel/time/timekeeping.c 	struct timekeeper *tk = &tk_core.timekeeper;
tk               2347 kernel/time/timekeeping.c 	orig_tai = tai = tk->tai_offset;
tk               2351 kernel/time/timekeeping.c 		__timekeeping_set_tai_offset(tk, tai);
tk               2352 kernel/time/timekeeping.c 		timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
tk               2354 kernel/time/timekeeping.c 	tk_update_leap_state(tk);
tk                 17 kernel/time/vsyscall.c 				    struct timekeeper *tk)
tk                 22 kernel/time/vsyscall.c 	vdata[CS_HRES_COARSE].cycle_last	= tk->tkr_mono.cycle_last;
tk                 23 kernel/time/vsyscall.c 	vdata[CS_HRES_COARSE].mask		= tk->tkr_mono.mask;
tk                 24 kernel/time/vsyscall.c 	vdata[CS_HRES_COARSE].mult		= tk->tkr_mono.mult;
tk                 25 kernel/time/vsyscall.c 	vdata[CS_HRES_COARSE].shift		= tk->tkr_mono.shift;
tk                 26 kernel/time/vsyscall.c 	vdata[CS_RAW].cycle_last		= tk->tkr_raw.cycle_last;
tk                 27 kernel/time/vsyscall.c 	vdata[CS_RAW].mask			= tk->tkr_raw.mask;
tk                 28 kernel/time/vsyscall.c 	vdata[CS_RAW].mult			= tk->tkr_raw.mult;
tk                 29 kernel/time/vsyscall.c 	vdata[CS_RAW].shift			= tk->tkr_raw.shift;
tk                 33 kernel/time/vsyscall.c 	vdso_ts->sec	= tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
tk                 35 kernel/time/vsyscall.c 	nsec = tk->tkr_mono.xtime_nsec;
tk                 36 kernel/time/vsyscall.c 	nsec += ((u64)tk->wall_to_monotonic.tv_nsec << tk->tkr_mono.shift);
tk                 37 kernel/time/vsyscall.c 	while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
tk                 38 kernel/time/vsyscall.c 		nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
tk                 46 kernel/time/vsyscall.c 	sec	+= tk->monotonic_to_boot.tv_sec;
tk                 47 kernel/time/vsyscall.c 	nsec	+= (u64)tk->monotonic_to_boot.tv_nsec << tk->tkr_mono.shift;
tk                 53 kernel/time/vsyscall.c 	while (nsec >= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
tk                 54 kernel/time/vsyscall.c 		nsec -= (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift);
tk                 61 kernel/time/vsyscall.c 	vdso_ts->sec	= tk->raw_sec;
tk                 62 kernel/time/vsyscall.c 	vdso_ts->nsec	= tk->tkr_raw.xtime_nsec;
tk                 66 kernel/time/vsyscall.c 	vdso_ts->sec	= tk->xtime_sec + (s64)tk->tai_offset;
tk                 67 kernel/time/vsyscall.c 	vdso_ts->nsec	= tk->tkr_mono.xtime_nsec;
tk                 70 kernel/time/vsyscall.c void update_vsyscall(struct timekeeper *tk)
tk                 79 kernel/time/vsyscall.c 	vdata[CS_HRES_COARSE].clock_mode	= __arch_get_clock_mode(tk);
tk                 80 kernel/time/vsyscall.c 	vdata[CS_RAW].clock_mode		= __arch_get_clock_mode(tk);
tk                 84 kernel/time/vsyscall.c 	vdso_ts->sec	= tk->xtime_sec;
tk                 85 kernel/time/vsyscall.c 	vdso_ts->nsec	= tk->tkr_mono.xtime_nsec;
tk                 89 kernel/time/vsyscall.c 	vdso_ts->sec	= tk->xtime_sec;
tk                 90 kernel/time/vsyscall.c 	vdso_ts->nsec	= tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
tk                 94 kernel/time/vsyscall.c 	vdso_ts->sec	= tk->xtime_sec + tk->wall_to_monotonic.tv_sec;
tk                 95 kernel/time/vsyscall.c 	nsec		= tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
tk                 96 kernel/time/vsyscall.c 	nsec		= nsec + tk->wall_to_monotonic.tv_nsec;
tk                110 kernel/time/vsyscall.c 		update_vdso_data(vdata, tk);
tk                112 kernel/time/vsyscall.c 	__arch_update_vsyscall(vdata, tk);
tk                 87 kernel/trace/trace_kprobe.c static nokprobe_inline bool trace_kprobe_is_return(struct trace_kprobe *tk)
tk                 89 kernel/trace/trace_kprobe.c 	return tk->rp.handler != NULL;
tk                 92 kernel/trace/trace_kprobe.c static nokprobe_inline const char *trace_kprobe_symbol(struct trace_kprobe *tk)
tk                 94 kernel/trace/trace_kprobe.c 	return tk->symbol ? tk->symbol : "unknown";
tk                 97 kernel/trace/trace_kprobe.c static nokprobe_inline unsigned long trace_kprobe_offset(struct trace_kprobe *tk)
tk                 99 kernel/trace/trace_kprobe.c 	return tk->rp.kp.offset;
tk                102 kernel/trace/trace_kprobe.c static nokprobe_inline bool trace_kprobe_has_gone(struct trace_kprobe *tk)
tk                104 kernel/trace/trace_kprobe.c 	return !!(kprobe_gone(&tk->rp.kp));
tk                107 kernel/trace/trace_kprobe.c static nokprobe_inline bool trace_kprobe_within_module(struct trace_kprobe *tk,
tk                111 kernel/trace/trace_kprobe.c 	const char *name = trace_kprobe_symbol(tk);
tk                115 kernel/trace/trace_kprobe.c static nokprobe_inline bool trace_kprobe_module_exist(struct trace_kprobe *tk)
tk                120 kernel/trace/trace_kprobe.c 	if (!tk->symbol)
tk                122 kernel/trace/trace_kprobe.c 	p = strchr(tk->symbol, ':');
tk                127 kernel/trace/trace_kprobe.c 	ret = !!find_module(tk->symbol);
tk                136 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = to_trace_kprobe(ev);
tk                138 kernel/trace/trace_kprobe.c 	return trace_probe_is_enabled(&tk->tp);
tk                141 kernel/trace/trace_kprobe.c static bool trace_kprobe_match_command_head(struct trace_kprobe *tk,
tk                149 kernel/trace/trace_kprobe.c 	if (!tk->symbol)
tk                150 kernel/trace/trace_kprobe.c 		snprintf(buf, sizeof(buf), "0x%p", tk->rp.kp.addr);
tk                151 kernel/trace/trace_kprobe.c 	else if (tk->rp.kp.offset)
tk                153 kernel/trace/trace_kprobe.c 			 trace_kprobe_symbol(tk), tk->rp.kp.offset);
tk                155 kernel/trace/trace_kprobe.c 		snprintf(buf, sizeof(buf), "%s", trace_kprobe_symbol(tk));
tk                160 kernel/trace/trace_kprobe.c 	return trace_probe_match_command_args(&tk->tp, argc, argv);
tk                166 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = to_trace_kprobe(ev);
tk                168 kernel/trace/trace_kprobe.c 	return strcmp(trace_probe_name(&tk->tp), event) == 0 &&
tk                169 kernel/trace/trace_kprobe.c 	    (!system || strcmp(trace_probe_group_name(&tk->tp), system) == 0) &&
tk                170 kernel/trace/trace_kprobe.c 	    trace_kprobe_match_command_head(tk, argc, argv);
tk                173 kernel/trace/trace_kprobe.c static nokprobe_inline unsigned long trace_kprobe_nhit(struct trace_kprobe *tk)
tk                179 kernel/trace/trace_kprobe.c 		nhit += *per_cpu_ptr(tk->nhit, cpu);
tk                184 kernel/trace/trace_kprobe.c static nokprobe_inline bool trace_kprobe_is_registered(struct trace_kprobe *tk)
tk                186 kernel/trace/trace_kprobe.c 	return !(list_empty(&tk->rp.kp.list) &&
tk                187 kernel/trace/trace_kprobe.c 		 hlist_unhashed(&tk->rp.kp.hlist));
tk                192 kernel/trace/trace_kprobe.c unsigned long trace_kprobe_address(struct trace_kprobe *tk)
tk                196 kernel/trace/trace_kprobe.c 	if (tk->symbol) {
tk                198 kernel/trace/trace_kprobe.c 			kallsyms_lookup_name(trace_kprobe_symbol(tk));
tk                200 kernel/trace/trace_kprobe.c 			addr += tk->rp.kp.offset;
tk                202 kernel/trace/trace_kprobe.c 		addr = (unsigned long)tk->rp.kp.addr;
tk                221 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
tk                223 kernel/trace/trace_kprobe.c 	return tk ? kprobe_on_func_entry(tk->rp.kp.addr,
tk                224 kernel/trace/trace_kprobe.c 			tk->rp.kp.addr ? NULL : tk->rp.kp.symbol_name,
tk                225 kernel/trace/trace_kprobe.c 			tk->rp.kp.addr ? 0 : tk->rp.kp.offset) : false;
tk                230 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = trace_kprobe_primary_from_call(call);
tk                232 kernel/trace/trace_kprobe.c 	return tk ? within_error_injection_list(trace_kprobe_address(tk)) :
tk                236 kernel/trace/trace_kprobe.c static int register_kprobe_event(struct trace_kprobe *tk);
tk                237 kernel/trace/trace_kprobe.c static int unregister_kprobe_event(struct trace_kprobe *tk);
tk                243 kernel/trace/trace_kprobe.c static void free_trace_kprobe(struct trace_kprobe *tk)
tk                245 kernel/trace/trace_kprobe.c 	if (tk) {
tk                246 kernel/trace/trace_kprobe.c 		trace_probe_cleanup(&tk->tp);
tk                247 kernel/trace/trace_kprobe.c 		kfree(tk->symbol);
tk                248 kernel/trace/trace_kprobe.c 		free_percpu(tk->nhit);
tk                249 kernel/trace/trace_kprobe.c 		kfree(tk);
tk                264 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk                267 kernel/trace/trace_kprobe.c 	tk = kzalloc(SIZEOF_TRACE_KPROBE(nargs), GFP_KERNEL);
tk                268 kernel/trace/trace_kprobe.c 	if (!tk)
tk                271 kernel/trace/trace_kprobe.c 	tk->nhit = alloc_percpu(unsigned long);
tk                272 kernel/trace/trace_kprobe.c 	if (!tk->nhit)
tk                276 kernel/trace/trace_kprobe.c 		tk->symbol = kstrdup(symbol, GFP_KERNEL);
tk                277 kernel/trace/trace_kprobe.c 		if (!tk->symbol)
tk                279 kernel/trace/trace_kprobe.c 		tk->rp.kp.symbol_name = tk->symbol;
tk                280 kernel/trace/trace_kprobe.c 		tk->rp.kp.offset = offs;
tk                282 kernel/trace/trace_kprobe.c 		tk->rp.kp.addr = addr;
tk                285 kernel/trace/trace_kprobe.c 		tk->rp.handler = kretprobe_dispatcher;
tk                287 kernel/trace/trace_kprobe.c 		tk->rp.kp.pre_handler = kprobe_dispatcher;
tk                289 kernel/trace/trace_kprobe.c 	tk->rp.maxactive = maxactive;
tk                290 kernel/trace/trace_kprobe.c 	INIT_HLIST_NODE(&tk->rp.kp.hlist);
tk                291 kernel/trace/trace_kprobe.c 	INIT_LIST_HEAD(&tk->rp.kp.list);
tk                293 kernel/trace/trace_kprobe.c 	ret = trace_probe_init(&tk->tp, event, group, false);
tk                297 kernel/trace/trace_kprobe.c 	dyn_event_init(&tk->devent, &trace_kprobe_ops);
tk                298 kernel/trace/trace_kprobe.c 	return tk;
tk                300 kernel/trace/trace_kprobe.c 	free_trace_kprobe(tk);
tk                308 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk                310 kernel/trace/trace_kprobe.c 	for_each_trace_kprobe(tk, pos)
tk                311 kernel/trace/trace_kprobe.c 		if (strcmp(trace_probe_name(&tk->tp), event) == 0 &&
tk                312 kernel/trace/trace_kprobe.c 		    strcmp(trace_probe_group_name(&tk->tp), group) == 0)
tk                313 kernel/trace/trace_kprobe.c 			return tk;
tk                317 kernel/trace/trace_kprobe.c static inline int __enable_trace_kprobe(struct trace_kprobe *tk)
tk                321 kernel/trace/trace_kprobe.c 	if (trace_kprobe_is_registered(tk) && !trace_kprobe_has_gone(tk)) {
tk                322 kernel/trace/trace_kprobe.c 		if (trace_kprobe_is_return(tk))
tk                323 kernel/trace/trace_kprobe.c 			ret = enable_kretprobe(&tk->rp);
tk                325 kernel/trace/trace_kprobe.c 			ret = enable_kprobe(&tk->rp.kp);
tk                334 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk                337 kernel/trace/trace_kprobe.c 		tk = container_of(pos, struct trace_kprobe, tp);
tk                338 kernel/trace/trace_kprobe.c 		if (!trace_kprobe_is_registered(tk))
tk                340 kernel/trace/trace_kprobe.c 		if (trace_kprobe_is_return(tk))
tk                341 kernel/trace/trace_kprobe.c 			disable_kretprobe(&tk->rp);
tk                343 kernel/trace/trace_kprobe.c 			disable_kprobe(&tk->rp.kp);
tk                355 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk                376 kernel/trace/trace_kprobe.c 		tk = container_of(pos, struct trace_kprobe, tp);
tk                377 kernel/trace/trace_kprobe.c 		if (trace_kprobe_has_gone(tk))
tk                379 kernel/trace/trace_kprobe.c 		ret = __enable_trace_kprobe(tk);
tk                455 kernel/trace/trace_kprobe.c static bool within_notrace_func(struct trace_kprobe *tk)
tk                457 kernel/trace/trace_kprobe.c 	unsigned long addr = trace_kprobe_address(tk);
tk                477 kernel/trace/trace_kprobe.c #define within_notrace_func(tk)	(false)
tk                481 kernel/trace/trace_kprobe.c static int __register_trace_kprobe(struct trace_kprobe *tk)
tk                489 kernel/trace/trace_kprobe.c 	if (trace_kprobe_is_registered(tk))
tk                492 kernel/trace/trace_kprobe.c 	if (within_notrace_func(tk)) {
tk                494 kernel/trace/trace_kprobe.c 			trace_kprobe_symbol(tk));
tk                498 kernel/trace/trace_kprobe.c 	for (i = 0; i < tk->tp.nr_args; i++) {
tk                499 kernel/trace/trace_kprobe.c 		ret = traceprobe_update_arg(&tk->tp.args[i]);
tk                505 kernel/trace/trace_kprobe.c 	if (trace_probe_is_enabled(&tk->tp))
tk                506 kernel/trace/trace_kprobe.c 		tk->rp.kp.flags &= ~KPROBE_FLAG_DISABLED;
tk                508 kernel/trace/trace_kprobe.c 		tk->rp.kp.flags |= KPROBE_FLAG_DISABLED;
tk                510 kernel/trace/trace_kprobe.c 	if (trace_kprobe_is_return(tk))
tk                511 kernel/trace/trace_kprobe.c 		ret = register_kretprobe(&tk->rp);
tk                513 kernel/trace/trace_kprobe.c 		ret = register_kprobe(&tk->rp.kp);
tk                519 kernel/trace/trace_kprobe.c static void __unregister_trace_kprobe(struct trace_kprobe *tk)
tk                521 kernel/trace/trace_kprobe.c 	if (trace_kprobe_is_registered(tk)) {
tk                522 kernel/trace/trace_kprobe.c 		if (trace_kprobe_is_return(tk))
tk                523 kernel/trace/trace_kprobe.c 			unregister_kretprobe(&tk->rp);
tk                525 kernel/trace/trace_kprobe.c 			unregister_kprobe(&tk->rp.kp);
tk                527 kernel/trace/trace_kprobe.c 		INIT_HLIST_NODE(&tk->rp.kp.hlist);
tk                528 kernel/trace/trace_kprobe.c 		INIT_LIST_HEAD(&tk->rp.kp.list);
tk                529 kernel/trace/trace_kprobe.c 		if (tk->rp.kp.symbol_name)
tk                530 kernel/trace/trace_kprobe.c 			tk->rp.kp.addr = NULL;
tk                535 kernel/trace/trace_kprobe.c static int unregister_trace_kprobe(struct trace_kprobe *tk)
tk                538 kernel/trace/trace_kprobe.c 	if (trace_probe_has_sibling(&tk->tp))
tk                542 kernel/trace/trace_kprobe.c 	if (trace_probe_is_enabled(&tk->tp))
tk                546 kernel/trace/trace_kprobe.c 	if (unregister_kprobe_event(tk))
tk                550 kernel/trace/trace_kprobe.c 	__unregister_trace_kprobe(tk);
tk                551 kernel/trace/trace_kprobe.c 	dyn_event_remove(&tk->devent);
tk                552 kernel/trace/trace_kprobe.c 	trace_probe_unlink(&tk->tp);
tk                588 kernel/trace/trace_kprobe.c static int append_trace_kprobe(struct trace_kprobe *tk, struct trace_kprobe *to)
tk                592 kernel/trace/trace_kprobe.c 	ret = trace_probe_compare_arg_type(&tk->tp, &to->tp);
tk                599 kernel/trace/trace_kprobe.c 	if (trace_kprobe_has_same_kprobe(to, tk)) {
tk                606 kernel/trace/trace_kprobe.c 	ret = trace_probe_append(&tk->tp, &to->tp);
tk                611 kernel/trace/trace_kprobe.c 	ret = __register_trace_kprobe(tk);
tk                612 kernel/trace/trace_kprobe.c 	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
tk                618 kernel/trace/trace_kprobe.c 		trace_probe_unlink(&tk->tp);
tk                620 kernel/trace/trace_kprobe.c 		dyn_event_add(&tk->devent);
tk                626 kernel/trace/trace_kprobe.c static int register_trace_kprobe(struct trace_kprobe *tk)
tk                633 kernel/trace/trace_kprobe.c 	old_tk = find_trace_kprobe(trace_probe_name(&tk->tp),
tk                634 kernel/trace/trace_kprobe.c 				   trace_probe_group_name(&tk->tp));
tk                636 kernel/trace/trace_kprobe.c 		if (trace_kprobe_is_return(tk) != trace_kprobe_is_return(old_tk)) {
tk                641 kernel/trace/trace_kprobe.c 			ret = append_trace_kprobe(tk, old_tk);
tk                647 kernel/trace/trace_kprobe.c 	ret = register_kprobe_event(tk);
tk                654 kernel/trace/trace_kprobe.c 	ret = __register_trace_kprobe(tk);
tk                655 kernel/trace/trace_kprobe.c 	if (ret == -ENOENT && !trace_kprobe_module_exist(tk)) {
tk                661 kernel/trace/trace_kprobe.c 		unregister_kprobe_event(tk);
tk                663 kernel/trace/trace_kprobe.c 		dyn_event_add(&tk->devent);
tk                676 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk                684 kernel/trace/trace_kprobe.c 	for_each_trace_kprobe(tk, pos) {
tk                685 kernel/trace/trace_kprobe.c 		if (trace_kprobe_within_module(tk, mod)) {
tk                687 kernel/trace/trace_kprobe.c 			__unregister_trace_kprobe(tk);
tk                688 kernel/trace/trace_kprobe.c 			ret = __register_trace_kprobe(tk);
tk                691 kernel/trace/trace_kprobe.c 					trace_probe_name(&tk->tp),
tk                736 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = NULL;
tk                841 kernel/trace/trace_kprobe.c 	tk = alloc_trace_kprobe(group, event, addr, symbol, offset, maxactive,
tk                843 kernel/trace/trace_kprobe.c 	if (IS_ERR(tk)) {
tk                844 kernel/trace/trace_kprobe.c 		ret = PTR_ERR(tk);
tk                860 kernel/trace/trace_kprobe.c 		ret = traceprobe_parse_probe_arg(&tk->tp, i, tmp, flags);
tk                866 kernel/trace/trace_kprobe.c 	ret = traceprobe_set_print_fmt(&tk->tp, is_return);
tk                870 kernel/trace/trace_kprobe.c 	ret = register_trace_kprobe(tk);
tk                890 kernel/trace/trace_kprobe.c 	free_trace_kprobe(tk);
tk                907 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = to_trace_kprobe(ev);
tk                908 kernel/trace/trace_kprobe.c 	int ret = unregister_trace_kprobe(tk);
tk                911 kernel/trace/trace_kprobe.c 		free_trace_kprobe(tk);
tk                917 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = to_trace_kprobe(ev);
tk                920 kernel/trace/trace_kprobe.c 	seq_putc(m, trace_kprobe_is_return(tk) ? 'r' : 'p');
tk                921 kernel/trace/trace_kprobe.c 	if (trace_kprobe_is_return(tk) && tk->rp.maxactive)
tk                922 kernel/trace/trace_kprobe.c 		seq_printf(m, "%d", tk->rp.maxactive);
tk                923 kernel/trace/trace_kprobe.c 	seq_printf(m, ":%s/%s", trace_probe_group_name(&tk->tp),
tk                924 kernel/trace/trace_kprobe.c 				trace_probe_name(&tk->tp));
tk                926 kernel/trace/trace_kprobe.c 	if (!tk->symbol)
tk                927 kernel/trace/trace_kprobe.c 		seq_printf(m, " 0x%p", tk->rp.kp.addr);
tk                928 kernel/trace/trace_kprobe.c 	else if (tk->rp.kp.offset)
tk                929 kernel/trace/trace_kprobe.c 		seq_printf(m, " %s+%u", trace_kprobe_symbol(tk),
tk                930 kernel/trace/trace_kprobe.c 			   tk->rp.kp.offset);
tk                932 kernel/trace/trace_kprobe.c 		seq_printf(m, " %s", trace_kprobe_symbol(tk));
tk                934 kernel/trace/trace_kprobe.c 	for (i = 0; i < tk->tp.nr_args; i++)
tk                935 kernel/trace/trace_kprobe.c 		seq_printf(m, " %s=%s", tk->tp.args[i].name, tk->tp.args[i].comm);
tk                995 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk               1000 kernel/trace/trace_kprobe.c 	tk = to_trace_kprobe(ev);
tk               1002 kernel/trace/trace_kprobe.c 		   trace_probe_name(&tk->tp),
tk               1003 kernel/trace/trace_kprobe.c 		   trace_kprobe_nhit(tk),
tk               1004 kernel/trace/trace_kprobe.c 		   tk->rp.kp.nmissed);
tk               1176 kernel/trace/trace_kprobe.c __kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs,
tk               1184 kernel/trace/trace_kprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
tk               1194 kernel/trace/trace_kprobe.c 	dsize = __get_data_size(&tk->tp, regs);
tk               1195 kernel/trace/trace_kprobe.c 	size = sizeof(*entry) + tk->tp.size + dsize;
tk               1204 kernel/trace/trace_kprobe.c 	entry->ip = (unsigned long)tk->rp.kp.addr;
tk               1205 kernel/trace/trace_kprobe.c 	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
tk               1212 kernel/trace/trace_kprobe.c kprobe_trace_func(struct trace_kprobe *tk, struct pt_regs *regs)
tk               1216 kernel/trace/trace_kprobe.c 	trace_probe_for_each_link_rcu(link, &tk->tp)
tk               1217 kernel/trace/trace_kprobe.c 		__kprobe_trace_func(tk, regs, link->file);
tk               1223 kernel/trace/trace_kprobe.c __kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
tk               1232 kernel/trace/trace_kprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
tk               1242 kernel/trace/trace_kprobe.c 	dsize = __get_data_size(&tk->tp, regs);
tk               1243 kernel/trace/trace_kprobe.c 	size = sizeof(*entry) + tk->tp.size + dsize;
tk               1252 kernel/trace/trace_kprobe.c 	entry->func = (unsigned long)tk->rp.kp.addr;
tk               1254 kernel/trace/trace_kprobe.c 	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
tk               1261 kernel/trace/trace_kprobe.c kretprobe_trace_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
tk               1266 kernel/trace/trace_kprobe.c 	trace_probe_for_each_link_rcu(link, &tk->tp)
tk               1267 kernel/trace/trace_kprobe.c 		__kretprobe_trace_func(tk, ri, regs, link->file);
tk               1374 kernel/trace/trace_kprobe.c kprobe_perf_func(struct trace_kprobe *tk, struct pt_regs *regs)
tk               1376 kernel/trace/trace_kprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
tk               1403 kernel/trace/trace_kprobe.c 	dsize = __get_data_size(&tk->tp, regs);
tk               1404 kernel/trace/trace_kprobe.c 	__size = sizeof(*entry) + tk->tp.size + dsize;
tk               1412 kernel/trace/trace_kprobe.c 	entry->ip = (unsigned long)tk->rp.kp.addr;
tk               1414 kernel/trace/trace_kprobe.c 	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
tk               1423 kernel/trace/trace_kprobe.c kretprobe_perf_func(struct trace_kprobe *tk, struct kretprobe_instance *ri,
tk               1426 kernel/trace/trace_kprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
tk               1439 kernel/trace/trace_kprobe.c 	dsize = __get_data_size(&tk->tp, regs);
tk               1440 kernel/trace/trace_kprobe.c 	__size = sizeof(*entry) + tk->tp.size + dsize;
tk               1448 kernel/trace/trace_kprobe.c 	entry->func = (unsigned long)tk->rp.kp.addr;
tk               1450 kernel/trace/trace_kprobe.c 	store_trace_args(&entry[1], &tk->tp, regs, sizeof(*entry), dsize);
tk               1462 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk               1465 kernel/trace/trace_kprobe.c 		tk = find_trace_kprobe(pevent, group);
tk               1467 kernel/trace/trace_kprobe.c 		tk = event->tp_event->data;
tk               1468 kernel/trace/trace_kprobe.c 	if (!tk)
tk               1471 kernel/trace/trace_kprobe.c 	*fd_type = trace_kprobe_is_return(tk) ? BPF_FD_TYPE_KRETPROBE
tk               1473 kernel/trace/trace_kprobe.c 	if (tk->symbol) {
tk               1474 kernel/trace/trace_kprobe.c 		*symbol = tk->symbol;
tk               1475 kernel/trace/trace_kprobe.c 		*probe_offset = tk->rp.kp.offset;
tk               1480 kernel/trace/trace_kprobe.c 		*probe_addr = (unsigned long)tk->rp.kp.addr;
tk               1520 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = container_of(kp, struct trace_kprobe, rp.kp);
tk               1523 kernel/trace/trace_kprobe.c 	raw_cpu_inc(*tk->nhit);
tk               1525 kernel/trace/trace_kprobe.c 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
tk               1526 kernel/trace/trace_kprobe.c 		kprobe_trace_func(tk, regs);
tk               1528 kernel/trace/trace_kprobe.c 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
tk               1529 kernel/trace/trace_kprobe.c 		ret = kprobe_perf_func(tk, regs);
tk               1538 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk = container_of(ri->rp, struct trace_kprobe, rp);
tk               1540 kernel/trace/trace_kprobe.c 	raw_cpu_inc(*tk->nhit);
tk               1542 kernel/trace/trace_kprobe.c 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_TRACE))
tk               1543 kernel/trace/trace_kprobe.c 		kretprobe_trace_func(tk, ri, regs);
tk               1545 kernel/trace/trace_kprobe.c 	if (trace_probe_test_flag(&tk->tp, TP_FLAG_PROFILE))
tk               1546 kernel/trace/trace_kprobe.c 		kretprobe_perf_func(tk, ri, regs);
tk               1560 kernel/trace/trace_kprobe.c static inline void init_trace_event_call(struct trace_kprobe *tk)
tk               1562 kernel/trace/trace_kprobe.c 	struct trace_event_call *call = trace_probe_event_call(&tk->tp);
tk               1564 kernel/trace/trace_kprobe.c 	if (trace_kprobe_is_return(tk)) {
tk               1576 kernel/trace/trace_kprobe.c static int register_kprobe_event(struct trace_kprobe *tk)
tk               1578 kernel/trace/trace_kprobe.c 	init_trace_event_call(tk);
tk               1580 kernel/trace/trace_kprobe.c 	return trace_probe_register_event_call(&tk->tp);
tk               1583 kernel/trace/trace_kprobe.c static int unregister_kprobe_event(struct trace_kprobe *tk)
tk               1585 kernel/trace/trace_kprobe.c 	return trace_probe_unregister_event_call(&tk->tp);
tk               1594 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk               1605 kernel/trace/trace_kprobe.c 	tk = alloc_trace_kprobe(KPROBE_EVENT_SYSTEM, event, (void *)addr, func,
tk               1609 kernel/trace/trace_kprobe.c 	if (IS_ERR(tk)) {
tk               1611 kernel/trace/trace_kprobe.c 			(int)PTR_ERR(tk));
tk               1612 kernel/trace/trace_kprobe.c 		return ERR_CAST(tk);
tk               1615 kernel/trace/trace_kprobe.c 	init_trace_event_call(tk);
tk               1617 kernel/trace/trace_kprobe.c 	if (traceprobe_set_print_fmt(&tk->tp, trace_kprobe_is_return(tk)) < 0) {
tk               1622 kernel/trace/trace_kprobe.c 	ret = __register_trace_kprobe(tk);
tk               1626 kernel/trace/trace_kprobe.c 	return trace_probe_event_call(&tk->tp);
tk               1628 kernel/trace/trace_kprobe.c 	free_trace_kprobe(tk);
tk               1634 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk               1636 kernel/trace/trace_kprobe.c 	tk = trace_kprobe_primary_from_call(event_call);
tk               1637 kernel/trace/trace_kprobe.c 	if (unlikely(!tk))
tk               1640 kernel/trace/trace_kprobe.c 	if (trace_probe_is_enabled(&tk->tp)) {
tk               1645 kernel/trace/trace_kprobe.c 	__unregister_trace_kprobe(tk);
tk               1647 kernel/trace/trace_kprobe.c 	free_trace_kprobe(tk);
tk               1655 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk               1659 kernel/trace/trace_kprobe.c 	for_each_trace_kprobe(tk, pos) {
tk               1661 kernel/trace/trace_kprobe.c 			if (file->event_call == trace_probe_event_call(&tk->tp))
tk               1732 kernel/trace/trace_kprobe.c find_trace_probe_file(struct trace_kprobe *tk, struct trace_array *tr)
tk               1737 kernel/trace/trace_kprobe.c 		if (file->event_call == trace_probe_event_call(&tk->tp))
tk               1751 kernel/trace/trace_kprobe.c 	struct trace_kprobe *tk;
tk               1773 kernel/trace/trace_kprobe.c 		tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
tk               1774 kernel/trace/trace_kprobe.c 		if (WARN_ON_ONCE(tk == NULL)) {
tk               1778 kernel/trace/trace_kprobe.c 			file = find_trace_probe_file(tk, top_trace_array());
tk               1784 kernel/trace/trace_kprobe.c 					trace_probe_event_call(&tk->tp), file);
tk               1795 kernel/trace/trace_kprobe.c 		tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
tk               1796 kernel/trace/trace_kprobe.c 		if (WARN_ON_ONCE(tk == NULL)) {
tk               1800 kernel/trace/trace_kprobe.c 			file = find_trace_probe_file(tk, top_trace_array());
tk               1806 kernel/trace/trace_kprobe.c 					trace_probe_event_call(&tk->tp), file);
tk               1824 kernel/trace/trace_kprobe.c 	tk = find_trace_kprobe("testprobe", KPROBE_EVENT_SYSTEM);
tk               1825 kernel/trace/trace_kprobe.c 	if (WARN_ON_ONCE(tk == NULL)) {
tk               1829 kernel/trace/trace_kprobe.c 		if (trace_kprobe_nhit(tk) != 1) {
tk               1834 kernel/trace/trace_kprobe.c 		file = find_trace_probe_file(tk, top_trace_array());
tk               1840 kernel/trace/trace_kprobe.c 				trace_probe_event_call(&tk->tp), file);
tk               1843 kernel/trace/trace_kprobe.c 	tk = find_trace_kprobe("testprobe2", KPROBE_EVENT_SYSTEM);
tk               1844 kernel/trace/trace_kprobe.c 	if (WARN_ON_ONCE(tk == NULL)) {
tk               1848 kernel/trace/trace_kprobe.c 		if (trace_kprobe_nhit(tk) != 1) {
tk               1853 kernel/trace/trace_kprobe.c 		file = find_trace_probe_file(tk, top_trace_array());
tk               1859 kernel/trace/trace_kprobe.c 				trace_probe_event_call(&tk->tp), file);
tk                853 lib/bch.c      	struct gf_poly *tk = bch->poly_2t[2];
tk                863 lib/bch.c      	compute_trace_bk_mod(bch, k, f, z, tk);
tk                865 lib/bch.c      	if (tk->deg > 0) {
tk                868 lib/bch.c      		gcd = gf_poly_gcd(bch, f2, tk);
tk                209 mm/memory-failure.c static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags)
tk                211 mm/memory-failure.c 	struct task_struct *t = tk->tsk;
tk                212 mm/memory-failure.c 	short addr_lsb = tk->size_shift;
tk                219 mm/memory-failure.c 		ret = force_sig_mceerr(BUS_MCEERR_AR, (void __user *)tk->addr,
tk                228 mm/memory-failure.c 		ret = send_sig_mceerr(BUS_MCEERR_AO, (void __user *)tk->addr,
tk                313 mm/memory-failure.c 	struct to_kill *tk;
tk                316 mm/memory-failure.c 		tk = *tkc;
tk                319 mm/memory-failure.c 		tk = kmalloc(sizeof(struct to_kill), GFP_ATOMIC);
tk                320 mm/memory-failure.c 		if (!tk) {
tk                325 mm/memory-failure.c 	tk->addr = page_address_in_vma(p, vma);
tk                327 mm/memory-failure.c 		tk->size_shift = dev_pagemap_mapping_shift(p, vma);
tk                329 mm/memory-failure.c 		tk->size_shift = compound_order(compound_head(p)) + PAGE_SHIFT;
tk                341 mm/memory-failure.c 	if (tk->addr == -EFAULT) {
tk                344 mm/memory-failure.c 	} else if (tk->size_shift == 0) {
tk                345 mm/memory-failure.c 		kfree(tk);
tk                349 mm/memory-failure.c 	tk->tsk = tsk;
tk                350 mm/memory-failure.c 	list_add_tail(&tk->nd, to_kill);
tk                364 mm/memory-failure.c 	struct to_kill *tk, *next;
tk                366 mm/memory-failure.c 	list_for_each_entry_safe (tk, next, to_kill, nd) {
tk                373 mm/memory-failure.c 			if (fail || tk->addr == -EFAULT) {
tk                375 mm/memory-failure.c 				       pfn, tk->tsk->comm, tk->tsk->pid);
tk                377 mm/memory-failure.c 						 tk->tsk, PIDTYPE_PID);
tk                386 mm/memory-failure.c 			else if (kill_proc(tk, pfn, flags) < 0)
tk                388 mm/memory-failure.c 				       pfn, tk->tsk->comm, tk->tsk->pid);
tk                390 mm/memory-failure.c 		put_task_struct(tk->tsk);
tk                391 mm/memory-failure.c 		kfree(tk);
tk                515 mm/memory-failure.c 	struct to_kill *tk;
tk                520 mm/memory-failure.c 	tk = kmalloc(sizeof(struct to_kill), GFP_NOIO);
tk                521 mm/memory-failure.c 	if (!tk)
tk                524 mm/memory-failure.c 		collect_procs_anon(page, tokill, &tk, force_early);
tk                526 mm/memory-failure.c 		collect_procs_file(page, tokill, &tk, force_early);
tk                527 mm/memory-failure.c 	kfree(tk);
tk               1162 mm/memory-failure.c 	struct to_kill *tk;
tk               1207 mm/memory-failure.c 	list_for_each_entry(tk, &tokill, nd)
tk               1208 mm/memory-failure.c 		if (tk->size_shift)
tk               1209 mm/memory-failure.c 			size = max(size, 1UL << tk->size_shift);
tk               2493 net/bluetooth/hci_core.c 			    u8 tk[16], u8 enc_size, __le16 ediv, __le64 rand)
tk               2510 net/bluetooth/hci_core.c 	memcpy(key->val, tk, sizeof(key->val));
tk                106 net/bluetooth/smp.c 	u8		tk[16]; /* SMP Temporary Key */
tk                866 net/bluetooth/smp.c 	memset(smp->tk, 0, sizeof(smp->tk));
tk                923 net/bluetooth/smp.c 		memset(smp->tk, 0, sizeof(smp->tk));
tk                926 net/bluetooth/smp.c 		put_unaligned_le32(passkey, smp->tk);
tk                954 net/bluetooth/smp.c 	ret = smp_c1(smp->tk, smp->prnd, smp->preq, smp->prsp,
tk                982 net/bluetooth/smp.c 	ret = smp_c1(smp->tk, smp->rrnd, smp->preq, smp->prsp,
tk                999 net/bluetooth/smp.c 		smp_s1(smp->tk, smp->rrnd, smp->prnd, stk);
tk               1015 net/bluetooth/smp.c 		smp_s1(smp->tk, smp->prnd, smp->rrnd, stk);
tk               1140 net/bluetooth/smp.c 			       key_type, auth, smp->tk, smp->enc_key_size,
tk               1157 net/bluetooth/smp.c 		if (smp_h7(smp->tfm_cmac, smp->tk, salt, smp->link_key)) {
tk               1166 net/bluetooth/smp.c 		if (smp_h6(smp->tfm_cmac, smp->tk, tmp1, smp->link_key)) {
tk               1215 net/bluetooth/smp.c 		if (smp_h7(smp->tfm_cmac, key->val, salt, smp->tk))
tk               1221 net/bluetooth/smp.c 		if (smp_h6(smp->tfm_cmac, key->val, tmp2, smp->tk))
tk               1225 net/bluetooth/smp.c 	if (smp_h6(smp->tfm_cmac, smp->tk, brle, smp->tk))
tk               1518 net/bluetooth/smp.c 			if (sc_mackey_and_ltk(smp, smp->mackey, smp->tk))
tk               1653 net/bluetooth/smp.c 		memset(smp->tk, 0, sizeof(smp->tk));
tk               1655 net/bluetooth/smp.c 		put_unaligned_le32(value, smp->tk);
tk               2181 net/bluetooth/smp.c 	err = sc_mackey_and_ltk(smp, smp->mackey, smp->tk);
tk               2466 net/bluetooth/smp.c 	memcpy(smp->tk, rp->ltk, sizeof(smp->tk));
tk               2498 net/bluetooth/smp.c 			  authenticated, smp->tk, smp->enc_key_size,
tk               2861 net/bluetooth/smp.c 		hci_le_start_enc(hcon, 0, 0, smp->tk, smp->enc_key_size);
tk                 82 net/mac80211/tkip.c static void tkip_mixing_phase1(const u8 *tk, struct tkip_ctx *ctx,
tk                 96 net/mac80211/tkip.c 		p1k[0] += tkipS(p1k[4] ^ get_unaligned_le16(tk + 0 + j));
tk                 97 net/mac80211/tkip.c 		p1k[1] += tkipS(p1k[0] ^ get_unaligned_le16(tk + 4 + j));
tk                 98 net/mac80211/tkip.c 		p1k[2] += tkipS(p1k[1] ^ get_unaligned_le16(tk + 8 + j));
tk                 99 net/mac80211/tkip.c 		p1k[3] += tkipS(p1k[2] ^ get_unaligned_le16(tk + 12 + j));
tk                100 net/mac80211/tkip.c 		p1k[4] += tkipS(p1k[3] ^ get_unaligned_le16(tk + 0 + j)) + i;
tk                106 net/mac80211/tkip.c static void tkip_mixing_phase2(const u8 *tk, struct tkip_ctx *ctx,
tk                120 net/mac80211/tkip.c 	ppk[0] += tkipS(ppk[5] ^ get_unaligned_le16(tk + 0));
tk                121 net/mac80211/tkip.c 	ppk[1] += tkipS(ppk[0] ^ get_unaligned_le16(tk + 2));
tk                122 net/mac80211/tkip.c 	ppk[2] += tkipS(ppk[1] ^ get_unaligned_le16(tk + 4));
tk                123 net/mac80211/tkip.c 	ppk[3] += tkipS(ppk[2] ^ get_unaligned_le16(tk + 6));
tk                124 net/mac80211/tkip.c 	ppk[4] += tkipS(ppk[3] ^ get_unaligned_le16(tk + 8));
tk                125 net/mac80211/tkip.c 	ppk[5] += tkipS(ppk[4] ^ get_unaligned_le16(tk + 10));
tk                126 net/mac80211/tkip.c 	ppk[0] += ror16(ppk[5] ^ get_unaligned_le16(tk + 12), 1);
tk                127 net/mac80211/tkip.c 	ppk[1] += ror16(ppk[0] ^ get_unaligned_le16(tk + 14), 1);
tk                134 net/mac80211/tkip.c 	*rc4key++ = ((ppk[5] ^ get_unaligned_le16(tk)) >> 1) & 0xFF;
tk                156 net/mac80211/tkip.c 	const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
tk                168 net/mac80211/tkip.c 		tkip_mixing_phase1(tk, ctx, sdata->vif.addr, iv32);
tk                188 net/mac80211/tkip.c 	const u8 *tk = &keyconf->key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
tk                191 net/mac80211/tkip.c 	tkip_mixing_phase1(tk, &ctx, ta, iv32);
tk                201 net/mac80211/tkip.c 	const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
tk                210 net/mac80211/tkip.c 	tkip_mixing_phase2(tk, ctx, iv16, p2k);
tk                249 net/mac80211/tkip.c 	const u8 *tk = &key->conf.key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY];
tk                292 net/mac80211/tkip.c 		tkip_mixing_phase1(tk, &rx_ctx->ctx, ta, iv32);
tk                307 net/mac80211/tkip.c 	tkip_mixing_phase2(tk, &rx_ctx->ctx, iv16, rc4key);