nxt                31 arch/sparc/include/asm/switch_to_32.h #define SWITCH_DO_LAZY_FPU(nxt)	\
nxt                33 arch/sparc/include/asm/switch_to_32.h 	if (last_task_used_math != (nxt))		\
nxt                34 arch/sparc/include/asm/switch_to_32.h 		(nxt)->thread.kregs->psr&=~PSR_EF;	\
nxt              1215 block/blk-mq.c 	struct request *rq, *nxt;
nxt              1271 block/blk-mq.c 			nxt = list_first_entry(list, struct request, queuelist);
nxt              1272 block/blk-mq.c 			bd.last = !blk_mq_get_driver_tag(nxt);
nxt              1283 block/blk-mq.c 				nxt = list_first_entry(list, struct request, queuelist);
nxt              1284 block/blk-mq.c 				blk_mq_put_driver_tag(nxt);
nxt              1932 drivers/atm/firestream.c 	struct FS_BPENTRY *fp, *nxt;
nxt              1974 drivers/atm/firestream.c 			     !(fp->flags & FP_FLAGS_EPI);fp = nxt) {
nxt              1977 drivers/atm/firestream.c 				nxt = bus_to_virt (fp->next);
nxt               237 drivers/infiniband/hw/cxgb3/cxio_hal.c 	struct list_head *pos, *nxt;
nxt               241 drivers/infiniband/hw/cxgb3/cxio_hal.c 	list_for_each_safe(pos, nxt, &uctx->qpids) {
nxt               209 drivers/infiniband/hw/cxgb3/iwch_provider.h 	struct list_head *pos, *nxt;
nxt               213 drivers/infiniband/hw/cxgb3/iwch_provider.h 	list_for_each_safe(pos, nxt, &ucontext->mmaps) {
nxt               750 drivers/infiniband/hw/cxgb4/device.c 	struct list_head *pos, *nxt;
nxt               754 drivers/infiniband/hw/cxgb4/device.c 	list_for_each_safe(pos, nxt, &uctx->qpids) {
nxt               767 drivers/infiniband/hw/cxgb4/device.c 	list_for_each_safe(pos, nxt, &uctx->cqids) {
nxt               551 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 	struct list_head *pos, *nxt;
nxt               555 drivers/infiniband/hw/cxgb4/iw_cxgb4.h 	list_for_each_safe(pos, nxt, &ucontext->mmaps) {
nxt              6051 drivers/infiniband/hw/qib/qib_iba7322.c 	char *nxt, *str;
nxt              6060 drivers/infiniband/hw/qib/qib_iba7322.c 	deflt = simple_strtoul(str, &nxt, 0);
nxt              6068 drivers/infiniband/hw/qib/qib_iba7322.c 	while (*nxt && nxt[1]) {
nxt              6069 drivers/infiniband/hw/qib/qib_iba7322.c 		str = ++nxt;
nxt              6070 drivers/infiniband/hw/qib/qib_iba7322.c 		unit = simple_strtoul(str, &nxt, 0);
nxt              6071 drivers/infiniband/hw/qib/qib_iba7322.c 		if (nxt == str || !*nxt || *nxt != ',') {
nxt              6072 drivers/infiniband/hw/qib/qib_iba7322.c 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
nxt              6076 drivers/infiniband/hw/qib/qib_iba7322.c 		str = ++nxt;
nxt              6077 drivers/infiniband/hw/qib/qib_iba7322.c 		port = simple_strtoul(str, &nxt, 0);
nxt              6078 drivers/infiniband/hw/qib/qib_iba7322.c 		if (nxt == str || *nxt != '=') {
nxt              6079 drivers/infiniband/hw/qib/qib_iba7322.c 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
nxt              6083 drivers/infiniband/hw/qib/qib_iba7322.c 		str = ++nxt;
nxt              6084 drivers/infiniband/hw/qib/qib_iba7322.c 		val = simple_strtoul(str, &nxt, 0);
nxt              6085 drivers/infiniband/hw/qib/qib_iba7322.c 		if (nxt == str) {
nxt              6086 drivers/infiniband/hw/qib/qib_iba7322.c 			while (*nxt && *nxt++ != ' ') /* skip to next, if any */
nxt              6094 drivers/infiniband/hw/qib/qib_iba7322.c 		if (*nxt == ',' && nxt[1]) {
nxt              6095 drivers/infiniband/hw/qib/qib_iba7322.c 			str = ++nxt;
nxt              6096 drivers/infiniband/hw/qib/qib_iba7322.c 			h1 = (u32)simple_strtoul(str, &nxt, 0);
nxt              6097 drivers/infiniband/hw/qib/qib_iba7322.c 			if (nxt == str)
nxt              6098 drivers/infiniband/hw/qib/qib_iba7322.c 				while (*nxt && *nxt++ != ' ') /* skip */
nxt              6122 drivers/infiniband/hw/qib/qib_iba7322.c 		if (*nxt == '\n')
nxt               524 drivers/net/wan/dlci.c 	struct dlci_local	*dlp, *nxt;
nxt               530 drivers/net/wan/dlci.c 	list_for_each_entry_safe(dlp, nxt, &dlci_devs, list) {
nxt              1255 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	struct ibmvscsis_cmd *cmd, *nxt;
nxt              1261 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp, list) {
nxt              1879 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 	struct ibmvscsis_cmd *cmd, *nxt;
nxt              1887 drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c 			list_for_each_entry_safe(cmd, nxt, &vscsi->waiting_rsp,
nxt               112 drivers/scsi/qla2xxx/qla_dbg.c 	uint32_t ram_dwords, void **nxt)
nxt               168 drivers/scsi/qla2xxx/qla_dbg.c 		*nxt = ram + i;
nxt               185 drivers/scsi/qla2xxx/qla_dbg.c 	*nxt = ram + i;
nxt               191 drivers/scsi/qla2xxx/qla_dbg.c     uint32_t ram_dwords, void **nxt)
nxt               244 drivers/scsi/qla2xxx/qla_dbg.c 		*nxt = ram + i;
nxt               261 drivers/scsi/qla2xxx/qla_dbg.c 	*nxt = ram + i;
nxt               267 drivers/scsi/qla2xxx/qla_dbg.c     uint32_t cram_size, void **nxt)
nxt               272 drivers/scsi/qla2xxx/qla_dbg.c 	rval = qla24xx_dump_ram(ha, 0x20000, code_ram, cram_size / 4, nxt);
nxt               279 drivers/scsi/qla2xxx/qla_dbg.c 	rval = qla24xx_dump_ram(ha, 0x100000, *nxt,
nxt               280 drivers/scsi/qla2xxx/qla_dbg.c 	    ha->fw_memory_size - 0x100000 + 1, nxt);
nxt               370 drivers/scsi/qla2xxx/qla_dbg.c     uint32_t ram_words, void **nxt)
nxt               448 drivers/scsi/qla2xxx/qla_dbg.c 	*nxt = rval == QLA_SUCCESS ? &ram[cnt] : NULL;
nxt               732 drivers/scsi/qla2xxx/qla_dbg.c 	void		*nxt;
nxt               861 drivers/scsi/qla2xxx/qla_dbg.c 		    sizeof(fw->risc_ram) / 2, &nxt);
nxt               866 drivers/scsi/qla2xxx/qla_dbg.c 		    sizeof(fw->stack_ram) / 2, &nxt);
nxt               871 drivers/scsi/qla2xxx/qla_dbg.c 		    ha->fw_memory_size - 0x11000 + 1, &nxt);
nxt               874 drivers/scsi/qla2xxx/qla_dbg.c 		qla2xxx_copy_queues(ha, nxt);
nxt              1103 drivers/scsi/qla2xxx/qla_dbg.c 	void		*nxt;
nxt              1320 drivers/scsi/qla2xxx/qla_dbg.c 	    &nxt);
nxt              1324 drivers/scsi/qla2xxx/qla_dbg.c 	nxt = qla2xxx_copy_queues(ha, nxt);
nxt              1326 drivers/scsi/qla2xxx/qla_dbg.c 	qla24xx_copy_eft(ha, nxt);
nxt              1362 drivers/scsi/qla2xxx/qla_dbg.c 	void		*nxt, *nxt_chain;
nxt              1643 drivers/scsi/qla2xxx/qla_dbg.c 	    &nxt);
nxt              1647 drivers/scsi/qla2xxx/qla_dbg.c 	nxt = qla2xxx_copy_queues(ha, nxt);
nxt              1649 drivers/scsi/qla2xxx/qla_dbg.c 	qla24xx_copy_eft(ha, nxt);
nxt              1688 drivers/scsi/qla2xxx/qla_dbg.c 	void		*nxt, *nxt_chain;
nxt              1970 drivers/scsi/qla2xxx/qla_dbg.c 	    &nxt);
nxt              1974 drivers/scsi/qla2xxx/qla_dbg.c 	nxt = qla2xxx_copy_queues(ha, nxt);
nxt              1976 drivers/scsi/qla2xxx/qla_dbg.c 	qla24xx_copy_eft(ha, nxt);
nxt              2016 drivers/scsi/qla2xxx/qla_dbg.c 	void		*nxt, *nxt_chain;
nxt              2471 drivers/scsi/qla2xxx/qla_dbg.c 			nxt = fw->code_ram;
nxt              2472 drivers/scsi/qla2xxx/qla_dbg.c 			nxt += sizeof(fw->code_ram);
nxt              2473 drivers/scsi/qla2xxx/qla_dbg.c 			nxt += (ha->fw_memory_size - 0x100000 + 1);
nxt              2483 drivers/scsi/qla2xxx/qla_dbg.c 	    &nxt);
nxt              2488 drivers/scsi/qla2xxx/qla_dbg.c 	nxt = qla2xxx_copy_queues(ha, nxt);
nxt              2490 drivers/scsi/qla2xxx/qla_dbg.c 	qla24xx_copy_eft(ha, nxt);
nxt              4045 drivers/scsi/qla2xxx/qla_os.c 			struct dsd_dma *dsd, *nxt;
nxt              4102 drivers/scsi/qla2xxx/qla_os.c 			list_for_each_entry_safe(dsd, nxt,
nxt              4300 drivers/scsi/qla2xxx/qla_os.c 		struct dsd_dma *dsd, *nxt;
nxt              4302 drivers/scsi/qla2xxx/qla_os.c 		list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
nxt              4733 drivers/scsi/qla2xxx/qla_os.c 		struct dsd_dma *dsd, *nxt;
nxt              4735 drivers/scsi/qla2xxx/qla_os.c 		list_for_each_entry_safe(dsd, nxt, &ha->pool.unusable.head,
nxt              4745 drivers/scsi/qla2xxx/qla_os.c 		list_for_each_entry_safe(dsd, nxt, &ha->pool.good.head, list) {
nxt               188 drivers/scsi/snic/snic_disc.c 	struct list_head *cur, *nxt;
nxt               191 drivers/scsi/snic/snic_disc.c 	list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
nxt               545 drivers/scsi/snic/snic_disc.c 	struct list_head *cur, *nxt;
nxt               553 drivers/scsi/snic/snic_disc.c 	list_for_each_safe(cur, nxt, &snic->disc.tgt_list) {
nxt               401 drivers/scsi/snic/snic_io.c 	struct list_head *cur, *nxt;
nxt               405 drivers/scsi/snic/snic_io.c 	list_for_each_safe(cur, nxt, &snic->spl_cmd_list) {
nxt               343 drivers/staging/uwb/uwbd.c 	struct uwb_event *evt, *nxt;
nxt               346 drivers/staging/uwb/uwbd.c 	list_for_each_entry_safe(evt, nxt, &rc->uwbd.event_list, list_node) {
nxt               685 fs/io_uring.c  	struct io_kiocb *nxt;
nxt               692 fs/io_uring.c  	nxt = list_first_entry_or_null(&req->link_list, struct io_kiocb, list);
nxt               693 fs/io_uring.c  	if (nxt) {
nxt               694 fs/io_uring.c  		list_del(&nxt->list);
nxt               696 fs/io_uring.c  			INIT_LIST_HEAD(&nxt->link_list);
nxt               697 fs/io_uring.c  			list_splice(&req->link_list, &nxt->link_list);
nxt               698 fs/io_uring.c  			nxt->flags |= REQ_F_LINK;
nxt               701 fs/io_uring.c  		nxt->flags |= REQ_F_LINK_DONE;
nxt               702 fs/io_uring.c  		INIT_WORK(&nxt->work, io_sq_wq_submit_work);
nxt               703 fs/io_uring.c  		io_queue_async_work(req->ctx, nxt);
nxt              2019 fs/io_uring.c  		struct io_kiocb *nxt = list_entry(entry, struct io_kiocb, list);
nxt              2023 fs/io_uring.c  		if (nxt->flags & REQ_F_TIMEOUT_NOSEQ)
nxt              2031 fs/io_uring.c  		nxt_sq_head = nxt->sequence - nxt->submit.sequence + 1;
nxt              2032 fs/io_uring.c  		tmp_nxt = (long long)nxt_sq_head + nxt->submit.sequence - 1;
nxt              2049 fs/io_uring.c  		nxt->sequence++;
nxt              1332 fs/jfs/jfs_dtree.c 	int skip, nextindex, half, left, nxt, off, si;
nxt              1504 fs/jfs/jfs_dtree.c 	for (nxt = off = 0; nxt < nextindex; ++off) {
nxt              1509 fs/jfs/jfs_dtree.c 			si = stbl[nxt];
nxt              1529 fs/jfs/jfs_dtree.c 			++nxt;	/* advance to next entry to move in sp */
nxt              1553 fs/jfs/jfs_dtree.c 	dtMoveEntry(sp, nxt, rp, &sdtlck, &rdtlck, DO_INDEX(ip));
nxt              1555 fs/jfs/jfs_dtree.c 	sp->header.nextindex = nxt;
nxt              1605 fs/jfs/jfs_dtree.c 		skip -= nxt;
nxt              1582 include/linux/hyperv.h 	struct vmpacket_descriptor *nxt;
nxt              1584 include/linux/hyperv.h 	nxt = __hv_pkt_iter_next(channel, pkt);
nxt              1585 include/linux/hyperv.h 	if (!nxt)
nxt              1588 include/linux/hyperv.h 	return nxt;
nxt              2333 kernel/locking/lockdep.c print_deadlock_scenario(struct held_lock *nxt, struct held_lock *prv)
nxt              2335 kernel/locking/lockdep.c 	struct lock_class *next = hlock_class(nxt);
nxt               217 net/core/net-procfs.c 	struct list_head *nxt;
nxt               225 net/core/net-procfs.c 	nxt = pt->list.next;
nxt               227 net/core/net-procfs.c 		if (nxt != &ptype_all)
nxt               230 net/core/net-procfs.c 		nxt = ptype_base[0].next;
nxt               234 net/core/net-procfs.c 	while (nxt == &ptype_base[hash]) {
nxt               237 net/core/net-procfs.c 		nxt = ptype_base[hash].next;
nxt               240 net/core/net-procfs.c 	return list_entry(nxt, struct packet_type, list);
nxt               234 net/netfilter/ipvs/ip_vs_app.c 	struct ip_vs_app *a, *anxt, *inc, *nxt;
nxt               241 net/netfilter/ipvs/ip_vs_app.c 		list_for_each_entry_safe(inc, nxt, &a->incs_list, a_list) {
nxt               791 net/netfilter/ipvs/ip_vs_ctl.c 	struct ip_vs_dest *dest, *nxt;
nxt               795 net/netfilter/ipvs/ip_vs_ctl.c 	list_for_each_entry_safe(dest, nxt, &ipvs->dest_trash, t_list) {
nxt              1498 net/netfilter/ipvs/ip_vs_ctl.c 	struct ip_vs_dest *dest, *nxt;
nxt              1523 net/netfilter/ipvs/ip_vs_ctl.c 	list_for_each_entry_safe(dest, nxt, &svc->destinations, n_list) {
nxt               508 net/sched/sch_teql.c 	struct teql_master *master, *nxt;
nxt               510 net/sched/sch_teql.c 	list_for_each_entry_safe(master, nxt, &master_dev_list, master_list) {