ja                 56 arch/x86/include/asm/mcsafe_test.h 	ja \target
ja                 62 arch/x86/include/asm/mcsafe_test.h 	ja \target
ja                 38 drivers/md/bcache/journal.c 	struct journal_device *ja = &ca->journal;
ja                 39 drivers/md/bcache/journal.c 	struct bio *bio = &ja->bio;
ja                159 drivers/md/bcache/journal.c 			if (j->seq > ja->seq[bucket_index])
ja                160 drivers/md/bcache/journal.c 				ja->seq[bucket_index] = j->seq;
ja                187 drivers/md/bcache/journal.c 		struct journal_device *ja = &ca->journal;
ja                277 drivers/md/bcache/journal.c 			if (ja->seq[i] > seq) {
ja                278 drivers/md/bcache/journal.c 				seq = ja->seq[i];
ja                284 drivers/md/bcache/journal.c 				ja->cur_idx = i;
ja                285 drivers/md/bcache/journal.c 				ja->last_idx = ja->discard_idx = (i + 1) %
ja                580 drivers/md/bcache/journal.c 	struct journal_device *ja =
ja                582 drivers/md/bcache/journal.c 	struct cache *ca = container_of(ja, struct cache, journal);
ja                584 drivers/md/bcache/journal.c 	atomic_set(&ja->discard_in_flight, DISCARD_DONE);
ja                592 drivers/md/bcache/journal.c 	struct journal_device *ja =
ja                595 drivers/md/bcache/journal.c 	submit_bio(&ja->discard_bio);
ja                600 drivers/md/bcache/journal.c 	struct journal_device *ja = &ca->journal;
ja                601 drivers/md/bcache/journal.c 	struct bio *bio = &ja->discard_bio;
ja                604 drivers/md/bcache/journal.c 		ja->discard_idx = ja->last_idx;
ja                608 drivers/md/bcache/journal.c 	switch (atomic_read(&ja->discard_in_flight)) {
ja                613 drivers/md/bcache/journal.c 		ja->discard_idx = (ja->discard_idx + 1) %
ja                616 drivers/md/bcache/journal.c 		atomic_set(&ja->discard_in_flight, DISCARD_READY);
ja                620 drivers/md/bcache/journal.c 		if (ja->discard_idx == ja->last_idx)
ja                623 drivers/md/bcache/journal.c 		atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
ja                628 drivers/md/bcache/journal.c 						ca->sb.d[ja->discard_idx]);
ja                634 drivers/md/bcache/journal.c 		INIT_WORK(&ja->discard_work, journal_discard_work);
ja                635 drivers/md/bcache/journal.c 		queue_work(bch_journal_wq, &ja->discard_work);
ja                657 drivers/md/bcache/journal.c 		struct journal_device *ja = &ca->journal;
ja                659 drivers/md/bcache/journal.c 		while (ja->last_idx != ja->cur_idx &&
ja                660 drivers/md/bcache/journal.c 		       ja->seq[ja->last_idx] < last_seq)
ja                661 drivers/md/bcache/journal.c 			ja->last_idx = (ja->last_idx + 1) %
ja                677 drivers/md/bcache/journal.c 		struct journal_device *ja = &ca->journal;
ja                678 drivers/md/bcache/journal.c 		unsigned int next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
ja                681 drivers/md/bcache/journal.c 		if (next == ja->discard_idx)
ja                684 drivers/md/bcache/journal.c 		ja->cur_idx = next;
ja                686 drivers/md/bcache/journal.c 				  bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
ja               8419 kernel/bpf/verifier.c 	struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
ja               8429 kernel/bpf/verifier.c 			ja.off = insn->off;
ja               8431 kernel/bpf/verifier.c 			ja.off = 0;
ja               8436 kernel/bpf/verifier.c 			bpf_prog_offload_replace_insn(env, i, &ja);
ja               8438 kernel/bpf/verifier.c 		memcpy(insn, &ja, sizeof(ja));
ja               8468 kernel/bpf/verifier.c 	const struct bpf_insn ja = BPF_JMP_IMM(BPF_JA, 0, 0, 0);
ja               8474 kernel/bpf/verifier.c 		if (memcmp(&insn[i], &ja, sizeof(ja)))
ja                556 kernel/rcu/tree_stall.h 	unsigned long ja;
ja                563 kernel/rcu/tree_stall.h 	ja = j - READ_ONCE(rcu_state.gp_activity);
ja                570 kernel/rcu/tree_stall.h 		ja, jr, jw, (long)READ_ONCE(rcu_state.gp_wake_seq),