xchg 203 arch/alpha/include/asm/atomic.h #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) xchg 206 arch/alpha/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 536 arch/alpha/kernel/smp.c while ((ops = xchg(pending_ipis, 0)) != 0) { xchg 222 arch/arc/include/asm/cmpxchg.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 380 arch/arc/kernel/smp.c copy = pending = xchg(this_cpu_ptr(&ipi_data), 0); xchg 245 arch/arm/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 62 arch/arm/kernel/dma.c if (xchg(&dma->lock, 1) != 0) xchg 74 arch/arm/kernel/dma.c xchg(&dma->lock, 0); xchg 105 arch/arm/kernel/dma.c if (xchg(&dma->lock, 0) != 0) { xchg 278 arch/arm/mach-rpc/ecard.c req = xchg(&ecard_req, NULL); xchg 20 arch/arm64/include/asm/xen/events.h #define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) xchg 46 arch/csky/kernel/smp.c ops = xchg(&this_cpu_ptr(&ipi_data)->bits, 0); xchg 64 arch/h8300/include/asm/cmpxchg.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 46 arch/hexagon/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) xchg 88 arch/hexagon/kernel/smp.c while ((ops = xchg(&ipi->bits, 0)) != 0) xchg 212 arch/ia64/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 216 arch/ia64/include/asm/atomic.h #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) xchg 429 arch/ia64/include/asm/pgtable.h return __pte(xchg((long *) ptep, 0)); xchg 104 arch/ia64/kernel/smp.c while ((ops = xchg(pending_ipis, 0)) != 0) { xchg 85 arch/ia64/mm/tlb.c flush_bit = xchg(&ia64_ctx.flushmap[i], 0); xchg 166 arch/m68k/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 231 arch/mips/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), (new))) xchg 431 arch/mips/include/asm/atomic.h #define atomic64_xchg(v, new) (xchg(&((v)->counter), (new))) xchg 167 arch/mips/kernel/rtlx.c state = xchg(&chan->lx_state, RTLX_STATE_OPENED); xchg 1946 arch/mips/kernel/traps.c old_handler = xchg(&exception_handlers[n], handler); xchg 786 arch/mips/kernel/vpe.c state = xchg(&v->state, VPE_STATE_INUSE); xchg 258 arch/mips/sibyte/common/sb_tbprof.c if (xchg(&sbp.tb_enable, 1)) xchg 397 arch/mips/sibyte/common/sb_tbprof.c if (xchg(&sbp.open, SB_OPENING) != SB_CLOSED) xchg 78 arch/parisc/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 224 arch/parisc/include/asm/atomic.h #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) xchg 201 arch/powerpc/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 493 arch/powerpc/include/asm/atomic.h #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) xchg 290 arch/powerpc/kernel/smp.c all = xchg(&info->messages, 0); xchg 4437 arch/powerpc/kvm/book3s_hv.c buf[i] |= xchg(&p[i], 0); xchg 147 arch/powerpc/platforms/powernv/pci-ioda-tce.c oldtce = be64_to_cpu(xchg(ptce, cpu_to_be64(newtce))); xchg 152 arch/riscv/include/asm/cmpxchg.h xchg((ptr), (x)); \ xchg 158 arch/riscv/include/asm/cmpxchg.h xchg((ptr), (x)); \ xchg 127 arch/riscv/kernel/smp.c ops = xchg(pending_ipis, 0); xchg 78 arch/s390/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 125 arch/s390/include/asm/atomic.h #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) xchg 157 arch/s390/include/asm/percpu.h ret__ = xchg(ptr__, nval); \ xchg 37 arch/s390/kernel/kprobes.c if (xchg(&insn_page_in_use, 1) == 1) xchg 46 arch/s390/kernel/kprobes.c xchg(&insn_page_in_use, 0); xchg 171 arch/s390/kernel/nmi.c if (xchg(&mchchk_wng_posted, 1) == 0) xchg 496 arch/s390/kernel/smp.c bits = xchg(&pcpu_devices[smp_processor_id()].ec_mask, 0); xchg 602 arch/s390/kernel/time.c if (xchg(&first, 1) == 0) { xchg 622 arch/s390/kernel/time.c xchg(&first, 0); xchg 3051 arch/s390/kvm/interrupt.c origin = xchg(&gib->alert_list_origin, xchg 35 arch/sh/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 41 arch/sh/include/asm/barrier.h #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) xchg 57 arch/sparc/include/asm/atomic_64.h return xchg(&v->counter, new); xchg 62 arch/sparc/include/asm/atomic_64.h #define atomic64_xchg(v, new) (xchg(&((v)->counter), new)) xchg 52 arch/sparc/include/asm/parport.h if (xchg(&sparc_ebus_dmas[dmanr].lock, 1) != 0) xchg 63 arch/sparc/include/asm/parport.h if (xchg(&sparc_ebus_dmas[dmanr].lock, 0) == 0) { xchg 544 arch/um/kernel/irq.c mask = xchg(&pending_mask, *mask_out); xchg 558 arch/um/kernel/irq.c mask = xchg(&pending_mask, old); xchg 577 arch/um/kernel/irq.c mask = xchg(&pending_mask, 0); xchg 596 arch/um/kernel/irq.c mask = xchg(&pending_mask, 0); xchg 91 arch/x86/hyperv/hv_apic.c if (hvp && (xchg(&hvp->apic_assist, 0) & 0x1)) xchg 50 arch/x86/include/asm/atomic64_32.h ATOMIC64_DECL(xchg); xchg 92 arch/x86/include/asm/atomic64_32.h alternative_atomic64(xchg, "=&A" (o), xchg 64 arch/x86/include/asm/barrier.h #define __smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0) xchg 78 arch/x86/include/asm/cmpxchg.h #define arch_xchg(ptr, v) __xchg_op((ptr), (v), xchg, "") xchg 126 arch/x86/include/asm/local.h #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) xchg 52 arch/x86/include/asm/pgtable-2level.h return __pte(xchg(&xp->pte_low, 0)); xchg 61 arch/x86/include/asm/pgtable-2level.h return __pmd(xchg((pmdval_t *)xp, 0)); xchg 70 arch/x86/include/asm/pgtable-2level.h return __pud(xchg((pudval_t *)xp, 0)); xchg 177 arch/x86/include/asm/pgtable-3level.h res.pmd_low = xchg(&orig->pmd_low, 0); xchg 207 arch/x86/include/asm/pgtable-3level.h old.pmd_low = xchg(&ptr->pmd_low, new.pmd_low); xchg 239 arch/x86/include/asm/pgtable-3level.h res.pud_low = xchg(&orig->pud_low, 0); xchg 1218 arch/x86/include/asm/pgtable.h return xchg(pmdp, pmd); xchg 88 arch/x86/include/asm/pgtable_64.h return native_make_pte(xchg(&xp->pte, 0)); xchg 101 arch/x86/include/asm/pgtable_64.h return native_make_pmd(xchg(&xp->pmd, 0)); xchg 124 arch/x86/include/asm/pgtable_64.h return native_make_pud(xchg(&xp->pud, 0)); xchg 24 arch/x86/include/asm/xen/events.h #define xchg_xen_ulong(ptr, val) xchg((ptr), (val)) xchg 399 arch/x86/kvm/lapic.c irr_val |= xchg(&pir[i], 0); xchg 2723 arch/x86/kvm/lapic.c pe = xchg(&apic->pending_events, 0); xchg 658 arch/x86/kvm/mmu.c return xchg(sptep, spte); xchg 732 arch/x86/kvm/mmu.c orig.spte_low = xchg(&ssptep->spte_low, sspte.spte_low); xchg 2669 arch/x86/kvm/x86.c if (xchg(&st->preempted, 0) & KVM_VCPU_FLUSH_TLB) xchg 259 arch/xtensa/include/asm/atomic.h #define atomic_xchg(v, new) (xchg(&((v)->counter), new)) xchg 1537 drivers/atm/eni.c events = xchg(&eni_dev->events,0); xchg 668 drivers/block/ataflop.c handler = xchg(&FloppyIRQHandler, NULL); xchg 127 drivers/gpu/drm/armada/armada_crtc.c event = xchg(&crtc->state->event, NULL); xchg 295 drivers/gpu/drm/armada/armada_crtc.c event = xchg(&dcrtc->event, NULL); xchg 65 drivers/gpu/drm/i915/gt/intel_reset.c prev_hang = xchg(&file_priv->hang_timestamp, jiffies); xchg 1587 drivers/gpu/drm/i915/gt/selftest_hangcheck.c error = xchg(&global->first_error, (void *)-1); xchg 1591 drivers/gpu/drm/i915/gt/selftest_hangcheck.c xchg(&global->first_error, error); xchg 52 drivers/gpu/drm/i915/gt/selftest_timeline.c tl = xchg(&state->history[idx], tl); xchg 457 drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie); xchg 238 drivers/gpu/drm/i915/i915_gem_fence_reg.c old = xchg(&fence->vma, NULL); xchg 1768 drivers/gpu/drm/i915/i915_gpu_error.c if (!xchg(&warned, true) && xchg 395 drivers/gpu/drm/i915/i915_sw_fence.c fence = xchg(&cb->base.fence, NULL); xchg 416 drivers/gpu/drm/i915/i915_sw_fence.c fence = xchg(&cb->base.fence, NULL); xchg 380 drivers/gpu/drm/i915/intel_uncore.c if (xchg(&domain->active, false)) xchg 38 drivers/gpu/drm/qxl/qxl_irq.c pending = xchg(&qdev->ram_header->int_pending, 0); xchg 842 drivers/hv/channel_mgmt.c if (xchg(&vmbus_connection.conn_state, DISCONNECTED) == DISCONNECTED) xchg 7134 drivers/infiniband/hw/hfi1/chip.c xchg(&ppd->is_link_down_queued, 0); xchg 7890 drivers/infiniband/hw/hfi1/chip.c if (xchg(&ppd->is_link_down_queued, 1) == 1) xchg 142 drivers/infiniband/hw/hfi1/user_sdma.c xchg(&pq->state, SDMA_PKT_Q_DEFERRED); xchg 160 drivers/infiniband/hw/hfi1/user_sdma.c xchg(&pq->state, SDMA_PKT_Q_ACTIVE); xchg 276 drivers/infiniband/sw/rdmavt/qp.c bits = xchg(&wss->entries[entry], 0); xchg 126 drivers/irqchip/irq-ompic.c while ((ops = xchg(pending_ops, 0)) != 0) { xchg 1295 drivers/md/dm-bufio.c a = xchg(&c->async_write_error, 0); xchg 1502 drivers/md/raid5-cache.c sector_t reclaim_target = xchg(&log->reclaim_target, 0); xchg 153 drivers/misc/sgi-gru/grufault.c k = xchg(&tfm->fault_bits[i], 0UL); xchg 157 drivers/misc/sgi-gru/grufault.c k = xchg(&tfm->done_bits[i], 0UL); xchg 295 drivers/net/ethernet/broadcom/bnxt/bnxt_xdp.c old = xchg(&bp->xdp_prog, prog); xchg 1872 drivers/net/ethernet/cavium/thunder/nicvf_main.c old_prog = xchg(&nic->xdp_prog, prog); xchg 1801 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c old = xchg(&priv->xdp_prog, prog); xchg 1807 drivers/net/ethernet/freescale/dpaa2/dpaa2-eth.c old = xchg(&ch->xdp.prog, prog); xchg 12528 drivers/net/ethernet/intel/i40e/i40e_main.c old_prog = xchg(&vsi->xdp_prog, prog); xchg 10263 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c old_prog = xchg(&adapter->xdp_prog, prog); xchg 10281 drivers/net/ethernet/intel/ixgbe/ixgbe_main.c (void)xchg(&adapter->rx_ring[i]->xdp_prog, xchg 4459 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c old_prog = xchg(&adapter->xdp_prog, prog); xchg 4477 drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c xchg(&adapter->rx_ring[i]->xdp_prog, adapter->xdp_prog); xchg 4460 drivers/net/ethernet/mellanox/mlx5/core/en_main.c old_prog = xchg(&priv->channels.params.xdp_prog, prog); xchg 4485 drivers/net/ethernet/mellanox/mlx5/core/en_main.c old_prog = xchg(&c->rq.xdp_prog, prog); xchg 4490 drivers/net/ethernet/mellanox/mlx5/core/en_main.c old_prog = xchg(&c->xskrq.xdp_prog, prog); xchg 1097 drivers/net/ethernet/qlogic/qede/qede_filter.c old = xchg(&edev->xdp_prog, args->u.new_prog); xchg 1294 drivers/net/ethernet/sfc/mcdi.c if (xchg(&mcdi->mode, MCDI_MODE_FAIL) == MCDI_MODE_FAIL) xchg 1789 drivers/net/ethernet/socionext/netsec.c old_prog = xchg(&priv->xdp_prog, prog); xchg 180 drivers/net/slip/slip.c rbuff = xchg(&sl->rbuff, rbuff); xchg 181 drivers/net/slip/slip.c xbuff = xchg(&sl->xbuff, xbuff); xchg 183 drivers/net/slip/slip.c cbuff = xchg(&sl->cbuff, cbuff); xchg 184 drivers/net/slip/slip.c slcomp = xchg(&sl->slcomp, slcomp); xchg 208 drivers/net/slip/slip.c kfree(xchg(&sl->rbuff, NULL)); xchg 209 drivers/net/slip/slip.c kfree(xchg(&sl->xbuff, NULL)); xchg 211 drivers/net/slip/slip.c kfree(xchg(&sl->cbuff, NULL)); xchg 212 drivers/net/slip/slip.c slhc_free(xchg(&sl->slcomp, NULL)); xchg 263 drivers/net/slip/slip.c xbuff = xchg(&sl->xbuff, xbuff); xchg 264 drivers/net/slip/slip.c rbuff = xchg(&sl->rbuff, rbuff); xchg 266 drivers/net/slip/slip.c cbuff = xchg(&sl->cbuff, cbuff); xchg 138 drivers/net/wan/x25_asy.c xbuff = xchg(&sl->xbuff, xbuff); xchg 149 drivers/net/wan/x25_asy.c rbuff = xchg(&sl->rbuff, rbuff); xchg 375 drivers/net/wireless/intersil/prism54/islpci_mgt.c if ((frame = xchg(&priv->mgmt_received, frame)) != NULL) { xchg 457 drivers/net/wireless/intersil/prism54/islpci_mgt.c frame = xchg(&priv->mgmt_received, NULL); xchg 162 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c int i, j, xchg; xchg 180 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c xchg = tbl->win_size - i; xchg 181 drivers/net/wireless/marvell/mwifiex/11n_rxreorder.c for (j = 0; j < xchg; ++j) { xchg 1429 drivers/nvme/target/rdma.c if (xchg(&port->priv, NULL) != cm_id) xchg 1582 drivers/nvme/target/rdma.c struct rdma_cm_id *cm_id = xchg(&port->priv, NULL); xchg 1595 drivers/pci/controller/pci-hyperv.c event = xchg(&hbus->survey_event, NULL); xchg 201 drivers/s390/char/con3270.c xchg(&((struct con3270 *) rq->view)->write, rq); xchg 223 drivers/s390/char/con3270.c wrq = xchg(&cp->write, 0); xchg 285 drivers/s390/char/con3270.c xchg(&cp->write, wrq); xchg 349 drivers/s390/char/con3270.c xchg(&cp->read, rrq); xchg 373 drivers/s390/char/con3270.c rrq = xchg(&cp->read, 0); xchg 358 drivers/s390/char/tty3270.c xchg(&tp->write, rq); xchg 375 drivers/s390/char/tty3270.c wrq = xchg(&tp->write, 0); xchg 448 drivers/s390/char/tty3270.c xchg(&tp->write, wrq); xchg 607 drivers/s390/char/tty3270.c xchg(&tp->read, rrq); xchg 632 drivers/s390/char/tty3270.c rrq = xchg(&tp->read, 0); xchg 647 drivers/s390/char/tty3270.c xchg(&tp->read, rrq); xchg 258 drivers/s390/cio/qdio_main.c xchg(&q->slsb.val[bufnr], state); xchg 121 drivers/s390/cio/qdio_thinint.c xchg(irq_ptr->dsci, 0); xchg 140 drivers/s390/cio/qdio_thinint.c return xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); xchg 150 drivers/s390/cio/qdio_thinint.c xchg(irq->dsci, 0); xchg 166 drivers/s390/cio/qdio_thinint.c xchg(irq->dsci, 0); xchg 437 drivers/s390/crypto/ap_bus.c xchg(ap_airq.lsi_ptr, 0); xchg 671 drivers/s390/crypto/ap_bus.c xchg(ap_airq.lsi_ptr, 0); xchg 51 drivers/staging/speakup/devsynth.c if (xchg(&dev_opened, 1)) xchg 46 drivers/staging/speakup/selection.c tty = xchg(&ssw->tty, NULL); xchg 104 drivers/staging/speakup/selection.c tty = xchg(&speakup_sel_work.tty, NULL); xchg 113 drivers/staging/speakup/selection.c struct tty_struct *tty = xchg(&ssw->tty, NULL); xchg 141 drivers/staging/speakup/selection.c tty = xchg(&speakup_paste_work.tty, NULL); xchg 539 drivers/thunderbolt/xdomain.c const struct tb_xdp_properties_changed *xchg = xchg 550 drivers/thunderbolt/xdomain.c xd = tb_xdomain_find_by_uuid_locked(tb, &xchg->src_uuid); xchg 683 drivers/tty/serial/8250/8250_port.c rpm_active = xchg(&p->rpm_tx_active, 1); xchg 697 drivers/tty/serial/8250/8250_port.c rpm_active = xchg(&p->rpm_tx_active, 0); xchg 115 drivers/tty/tty_audit.c buf = xchg(¤t->signal->tty_audit_buf, ERR_PTR(-ESRCH)); xchg 2935 drivers/tty/vt/vt.c return xchg(&kmsg_con, new); xchg 867 drivers/usb/gadget/function/f_fs.c struct ffs_buffer *buf = xchg(&epfile->read_buffer, READ_BUFFER_DROP); xchg 881 drivers/usb/gadget/function/f_fs.c struct ffs_buffer *buf = xchg(&epfile->read_buffer, NULL); xchg 1643 drivers/video/fbdev/pxafb.c u_int state = xchg(&fbi->task_state, -1); xchg 964 drivers/video/fbdev/sa1100fb.c u_int state = xchg(&fbi->task_state, -1); xchg 335 drivers/xen/events/events_fifo.c ready = xchg(&control_block->ready, 0); xchg 340 drivers/xen/events/events_fifo.c ready |= xchg(&control_block->ready, 0); xchg 2397 fs/btrfs/transaction.c prev = xchg(&fs_info->pending_changes, 0); xchg 94 fs/cachefiles/daemon.c if (xchg(&cachefiles_open, 1) == 1) xchg 1308 fs/cifs/connect.c task_to_wake = xchg(&server->tsk, NULL); xchg 2746 fs/cifs/connect.c task = xchg(&server->tsk, NULL); xchg 1156 fs/eventpoll.c prev = xchg(&head->prev, new); xchg 1188 fs/eventpoll.c epi->next = xchg(&ep->ovflist, epi); xchg 326 fs/ext4/page-io.c bio->bi_private = xchg(&io_end->bio, bio); xchg 386 fs/file.c struct file * file = xchg(&fdt->fd[i], NULL); xchg 1537 fs/namespace.c if (!xchg(&mnt->mnt_expiry_mark, 1)) xchg 2898 fs/namespace.c if (!xchg(&mnt->mnt_expiry_mark, 1) || xchg 519 fs/nfs/flexfilelayout/flexfilelayout.c cred = xchg(&mirror->ro_cred, cred); xchg 522 fs/nfs/flexfilelayout/flexfilelayout.c cred = xchg(&mirror->rw_cred, cred); xchg 2275 fs/nfs/nfs4state.c clnt = xchg(&clp->cl_rpcclient, clnt); xchg 196 fs/nfs/read.c xchg(&nfs_req_openctx(req)->error, error); xchg 352 fs/nfs/read.c xchg(&ctx->error, 0); xchg 357 fs/nfs/read.c error = xchg(&ctx->error, 0); xchg 110 fs/nfs/sysfs.c old = xchg(&c->identifier, p); xchg 2371 fs/ocfs2/file.c saved_ki_complete = xchg(&iocb->ki_complete, NULL); xchg 2419 fs/ocfs2/file.c xchg(&iocb->ki_complete, saved_ki_complete); xchg 68 fs/posix_acl.c old = xchg(p, posix_acl_dup(acl)); xchg 78 fs/posix_acl.c old = xchg(p, ACL_NOT_CACHED); xchg 268 fs/proc/kcore.c if (!xchg(&kcore_need_update, 0)) xchg 195 include/asm-generic/atomic.h #define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v))) xchg 17 include/asm-generic/cmpxchg.h #ifndef xchg xchg 10 include/linux/atomic-fallback.h #define xchg_relaxed xchg xchg 11 include/linux/atomic-fallback.h #define xchg_acquire xchg xchg 12 include/linux/atomic-fallback.h #define xchg_release xchg xchg 17 include/linux/atomic-fallback.h __atomic_op_acquire(xchg, __VA_ARGS__) xchg 22 include/linux/atomic-fallback.h __atomic_op_release(xchg, __VA_ARGS__) xchg 25 include/linux/atomic-fallback.h #ifndef xchg xchg 27 include/linux/atomic-fallback.h __atomic_op_fence(xchg, __VA_ARGS__) xchg 17 include/linux/debug_locks.h return xchg(&debug_locks, 0); xchg 222 include/linux/llist.h return xchg(&head->first, NULL); xchg 1158 include/linux/mm.h return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK); xchg 448 include/linux/skmsg.h prog = xchg(pprog, prog); xchg 141 include/net/pkt_cls.h return xchg(clp, cl); xchg 1926 include/net/sock.h old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst); xchg 2174 include/net/sock.h err = xchg(&sk->sk_err, 0); xchg 252 kernel/acct.c old = xchg(&ns->bacct, &acct->pin); xchg 1473 kernel/audit.c t = xchg(¤t->signal->audit_tty, t); xchg 543 kernel/bpf/arraymap.c old_ptr = xchg(array->ptrs + index, new_ptr); xchg 559 kernel/bpf/arraymap.c old_ptr = xchg(array->ptrs + index, NULL); xchg 447 kernel/bpf/cpumap.c old_rcpu = xchg(&cmap->cpu_map[key_cpu], rcpu); xchg 556 kernel/bpf/devmap.c old_dev = xchg(&dtab->netdev_map[k], NULL); xchg 650 kernel/bpf/devmap.c old_dev = xchg(&dtab->netdev_map[i], dev); xchg 164 kernel/bpf/local_storage.c new = xchg(&storage->buf, new); xchg 431 kernel/bpf/stackmap.c old_bucket = xchg(&smap->buckets[id], new_bucket); xchg 529 kernel/bpf/stackmap.c bucket = xchg(&smap->buckets[id], NULL); xchg 537 kernel/bpf/stackmap.c old_bucket = xchg(&smap->buckets[id], bucket); xchg 588 kernel/bpf/stackmap.c old_bucket = xchg(&smap->buckets[id], NULL); xchg 291 kernel/bpf/xskmap.c old_xs = xchg(map_entry, NULL); xchg 75 kernel/dma.c if (xchg(&dma_chan_busy[dmanr].lock, 1) != 0) xchg 95 kernel/dma.c if (xchg(&dma_chan_busy[dmanr].lock, 0) == 0) { xchg 459 kernel/exit.c self.next = xchg(&core_state->dumper.next, &self); xchg 163 kernel/irq_work.c xchg(&work->flags, flags); xchg 123 kernel/kexec.c kimage_free(xchg(dest_image, NULL)); xchg 132 kernel/kexec.c kimage_free(xchg(&kexec_crash_image, NULL)); xchg 163 kernel/kexec.c image = xchg(dest_image, image); xchg 406 kernel/kexec_file.c kimage_free(xchg(&kexec_crash_image, NULL)); xchg 450 kernel/kexec_file.c image = xchg(dest_image, image); xchg 228 kernel/kthread.c done = xchg(&create->done, NULL); xchg 281 kernel/kthread.c struct completion *done = xchg(&create->done, NULL); xchg 326 kernel/kthread.c if (xchg(&create->done, NULL)) xchg 79 kernel/locking/mcs_spinlock.h prev = xchg(lock, node); xchg 79 kernel/locking/osq_lock.c next = xchg(&node->next, NULL); xchg 222 kernel/locking/osq_lock.c next = xchg(&node->next, NULL); xchg 456 kernel/locking/qspinlock_paravirt.h if (xchg(&lock->locked, _Q_SLOW_VAL) == 0) { xchg 1315 kernel/rcu/rcutorture.c if ((pipe_count > 1 || completed > 1) && !xchg(&err_segs_recorded, 1)) { xchg 1526 kernel/sched/fair.c if (xchg(&rq->numa_migrate_on, 1)) xchg 1808 kernel/sys.c mask = xchg(¤t->fs->umask, mask & S_IRWXUGO); xchg 1862 kernel/sys.c old_exe = xchg(&mm->exe_file, exe.file); xchg 3145 kernel/sysctl.c put_pid(xchg(&cad_pid, new_pid)); xchg 348 kernel/trace/blktrace.c bt = xchg(&q->blk_trace, NULL); xchg 1632 kernel/trace/blktrace.c bt = xchg(&q->blk_trace, NULL); xchg 53 kernel/umh.c struct completion *comp = xchg(&sub_info->complete, NULL); xchg 600 kernel/umh.c if (xchg(&sub_info->complete, NULL)) xchg 81 lib/atomic64_test.c FAMILY_TEST(TEST_ARGS, bit, xchg, init, init, new, new); \ xchg 232 lib/generic-radix-tree.c struct genradix_root *r = xchg(&radix->root, NULL); xchg 29 lib/sbitmap.c mask = xchg(&sb->map[index].cleared, 0); xchg 147 mm/huge_memory.c struct page *zero_page = xchg(&huge_zero_page, NULL); xchg 6177 mm/memcontrol.c xchg(&memcg->memory.max, max); xchg 7248 mm/memcontrol.c xchg(&memcg->swap.max, max); xchg 195 mm/page_counter.c old = xchg(&counter->max, nr_pages); xchg 259 net/atm/clip.c old = xchg(&clip_vcc->xoff, 0); xchg 388 net/atm/clip.c old = xchg(&entry->vccs->xoff, 1); /* assume XOFF ... */ xchg 550 net/atm/svc.c error = -xchg(&sk->sk_err_soft, 0); xchg 577 net/atm/svc.c error = -xchg(&sk->sk_err_soft, 0); xchg 42 net/bridge/netfilter/ebt_limit.c info->credit += (now - xchg(&info->prev, now)) * CREDITS_PER_JIFFY; xchg 200 net/core/gen_estimator.c est = xchg((__force struct net_rate_estimator **)rate_est, NULL); xchg 1289 net/core/sock.c v.val = xchg(&sk->sk_err_soft, 0); xchg 245 net/core/sock_map.c sk = xchg(psk, NULL); xchg 292 net/core/sock_map.c sk = xchg(psk, NULL); xchg 671 net/dccp/ipv6.c opt_skb = xchg(&np->pktoptions, opt_skb); xchg 674 net/dccp/ipv6.c opt_skb = xchg(&np->pktoptions, NULL); xchg 430 net/decnet/dn_neigh.c neigh_release(xchg(&dn_db->router, neigh_clone(neigh))); xchg 362 net/decnet/dn_route.c if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL) xchg 1964 net/ipv4/cipso_ipv4.c opt = xchg((__force struct ip_options_rcu **)&req_inet->ireq_opt, opt); xchg 346 net/ipv4/tcp.c val = xchg(&tcp_memory_pressure, 0); xchg 228 net/ipv4/tcp_cong.c prev = xchg(&net->ipv4.tcp_congestion_control, ca); xchg 6504 net/ipv4/tcp_input.c xchg(&queue->synflood_warned, 1) == 0) xchg 2130 net/ipv4/udp.c old = xchg(&sk->sk_rx_dst, dst); xchg 482 net/ipv6/af_inet6.c skb = xchg(&np->pktoptions, NULL); xchg 485 net/ipv6/af_inet6.c skb = xchg(&np->rxpmtu, NULL); xchg 493 net/ipv6/af_inet6.c opt = xchg((__force struct ipv6_txoptions **)&np->opt, NULL); xchg 1218 net/ipv6/calipso.c txopts = xchg(&req_inet->ipv6_opt, txopts); xchg 1251 net/ipv6/calipso.c txopts = xchg(&req_inet->ipv6_opt, txopts); xchg 393 net/ipv6/datagram.c skb = xchg(&np->rxpmtu, skb); xchg 536 net/ipv6/datagram.c skb = xchg(&np->rxpmtu, NULL); xchg 927 net/ipv6/ip6_fib.c from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); xchg 112 net/ipv6/ipv6_sockglue.c opt = xchg((__force struct ipv6_txoptions **)&inet6_sk(sk)->opt, xchg 240 net/ipv6/ipv6_sockglue.c opt = xchg((__force struct ipv6_txoptions **)&np->opt, xchg 246 net/ipv6/ipv6_sockglue.c pktopt = xchg(&np->pktoptions, NULL); xchg 382 net/ipv6/route.c from = xchg((__force struct fib6_info **)&rt->from, NULL); xchg 1414 net/ipv6/route.c prev = xchg(p, NULL); xchg 1442 net/ipv6/route.c from = xchg((__force struct fib6_info **)&pcpu_rt->from, NULL); xchg 1471 net/ipv6/route.c from = xchg((__force struct fib6_info **)&rt6_ex->rt6i->from, NULL); xchg 1444 net/ipv6/tcp_ipv6.c opt_skb = xchg(&np->pktoptions, opt_skb); xchg 1447 net/ipv6/tcp_ipv6.c opt_skb = xchg(&np->pktoptions, NULL); xchg 196 net/netfilter/nf_conntrack_ecache.c events = xchg(&e->cache, 0); xchg 303 net/netfilter/nf_log.c m = xchg(&emergency_ptr, NULL); xchg 72 net/netfilter/xt_limit.c priv->credit += (now - xchg(&priv->prev, now)) * CREDITS_PER_JIFFY; xchg 377 net/netlabel/netlabel_calipso.c return xchg(&calipso_ops, ops); xchg 92 net/rds/ib_recv.c tmp = xchg(&cache->xfer, NULL); xchg 504 net/rds/ib_recv.c old = xchg(&cache->xfer, NULL); xchg 46 net/sched/act_api.c old = xchg((__force struct tc_cookie **)old_cookie, new_cookie); xchg 441 net/sunrpc/xprtmultipath.c oldswitch = xchg(&xpi->xpi_xpswitch, RCU_INITIALIZER(newswitch)); xchg 2489 net/sunrpc/xprtsock.c sockerr = xchg(&transport->xprt_err, 0); xchg 220 sound/usb/mixer_quirks.c (rc_code = xchg(&mixer->rc_code, 0)) != 0); xchg 282 tools/testing/selftests/powerpc/benchmarks/context_switch.c c = xchg(m, 2); xchg 286 tools/testing/selftests/powerpc/benchmarks/context_switch.c c = xchg(m, 2); xchg 300 tools/testing/selftests/powerpc/benchmarks/context_switch.c else if (xchg(m, 0) == 1) xchg 1278 virt/kvm/kvm_main.c mask = xchg(&dirty_bitmap[i], 0);