Searched refs:preempt (Results 1 - 170 of 170) sorted by relevance

/linux-4.1.27/include/linux/
H A Dvermagic.h10 #define MODULE_VERMAGIC_PREEMPT "preempt "
H A Dbottom_half.h4 #include <linux/preempt.h>
H A Dpreempt.h5 * include/linux/preempt.h - macros for accessing and manipulating
18 #include <asm/preempt.h>
111 * Even if we don't have any preemption, we need preempt disable/enable
113 * that can cause faults and scheduling migrate into our preempt-protected
H A Dhardirq.h31 * because NMI handlers may not preempt and the ops are
H A Dbit_spinlock.h5 #include <linux/preempt.h>
H A Dpreempt_mask.h4 #include <linux/preempt.h>
H A Dspinlock_api_up.h23 * only thing we have to do is to keep the preempt counts and irq
H A Duaccess.h4 #include <linux/preempt.h>
H A Dpercpu.h5 #include <linux/preempt.h>
H A Dsmp.h55 #include <linux/preempt.h>
H A Dspinlock_api_smp.h102 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
H A Dstop_machine.h110 * holding a spinlock or inside any other preempt-disabled region when
H A Dspinlock.h38 * builds. (which are NOPs on non-debug, non-preempt
50 #include <linux/preempt.h>
H A Drwlock_api_smp.h142 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
H A Dftrace_event.h502 * @pc: The state of the preempt count at the start of the event.
530 * @pc: The state of the preempt count at the start of the event.
H A Dseqlock.h36 #include <linux/preempt.h>
H A Dpagemap.h157 * disabling preempt, and hence no need for the "speculative get" that page_cache_get_speculative()
H A Dparport.h141 int (*preempt)(void *); member in struct:pardevice
H A Dpercpu-defs.h401 * If there is no other protection through preempt disable and/or disabling
H A Dradix-tree.h24 #include <linux/preempt.h>
H A Dbio.h489 * might not be a highmem page, but the preempt/irq count bvec_kmap_irq()
H A Dinterrupt.h8 #include <linux/preempt.h>
H A Dkvm_host.h19 #include <linux/preempt.h>
H A Dfs.h709 * with respect to the local cpu (unlike with preempt disabled),
714 * and 64bit archs it makes no difference if preempt is enabled or not.
H A Dsched.h2268 extern int yield_to(struct task_struct *p, bool preempt); sched_autogroup_exit()
/linux-4.1.27/arch/x86/include/asm/
H A Dpreempt.h18 * that think a non-zero value indicates we cannot preempt.
43 * We fold the NEED_RESCHED bit into the preempt count such that
48 * need to resched (the bit is cleared) and can resched (no preempt count).
H A Di387.h29 * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
30 * and they don't touch the preempt state on their own.
31 * If you enable preemption after __kernel_fpu_begin(), preempt notifier
54 /* Must be called with preempt disabled */
H A Dmc146818rtc.h37 * All of these below must be called with interrupts off, preempt
H A Dkvm_emulate.h203 void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
204 void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
H A Dfpu-internal.h267 * These must be called with preempt disabled. Returns
/linux-4.1.27/arch/sh/kernel/
H A Didle.c15 #include <linux/preempt.h>
H A Dkprobes.c14 #include <linux/preempt.h>
/linux-4.1.27/drivers/xen/
H A DMakefile5 obj-y += grant-table.o features.o balloon.o manage.o preempt.o
/linux-4.1.27/arch/x86/kernel/kprobes/
H A Dftrace.c23 #include <linux/preempt.h>
65 /* Disable irq for emulating a breakpoint and avoiding preempt */ kprobe_ftrace_handler()
H A Dopt.c26 #include <linux/preempt.h>
H A Dcore.c47 #include <linux/preempt.h>
/linux-4.1.27/kernel/locking/
H A Drtmutex-debug.c9 * This code is based on the rt.c implementation in the preempt-rt tree.
17 * See rt.c in preempt-rt for proper credits and further information
H A Dspinlock.c18 #include <linux/preempt.h>
27 * not re-enabled during lock-acquire (which the preempt-spin-ops do):
H A Dpercpu-rwsem.c36 * serialize with the preempt-disabled section below.
/linux-4.1.27/arch/x86/lib/
H A Dmsr.c2 #include <linux/preempt.h>
H A Ddelay.c17 #include <linux/preempt.h>
H A Dmsr-smp.c2 #include <linux/preempt.h>
/linux-4.1.27/arch/s390/include/asm/
H A Dfacility.h11 #include <linux/preempt.h>
H A Dpercpu.h4 #include <linux/preempt.h>
/linux-4.1.27/arch/parisc/lib/
H A Ddelay.c15 #include <linux/preempt.h>
/linux-4.1.27/arch/m68k/include/asm/
H A Dirqflags.h8 #include <linux/preempt.h>
/linux-4.1.27/arch/metag/include/asm/
H A Dirqflags.h82 /* Avoid circular include dependencies through <linux/preempt.h> */
/linux-4.1.27/arch/arm/mach-orion5x/
H A Dcommon.h80 * These are not preempt-safe. Locks, if needed, must be taken
/linux-4.1.27/drivers/staging/speakup/
H A Dfakekey.c22 #include <linux/preempt.h>
/linux-4.1.27/arch/mips/lib/
H A Dmips-atomic.c14 #include <linux/preempt.h>
/linux-4.1.27/tools/perf/scripts/python/Perf-Trace-Util/
H A DContext.c77 "Get the common preempt count event field value."},
/linux-4.1.27/arch/arm/include/asm/
H A Dassembler.h191 * Increment/decrement the preempt count.
195 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
201 ldr \tmp, [\ti, #TI_PREEMPT] @ get preempt count
H A Dfutex.h72 #include <linux/preempt.h>
/linux-4.1.27/drivers/gpu/drm/amd/amdkfd/
H A Dkfd_mqd_manager.h39 * @destroy_mqd: Destroys the HQD slot and by that preempt the relevant queue.
H A Dkfd_mqd_manager_cik.c254 * preempt type here is ignored because there is only one way
255 * to preempt sdma queue
/linux-4.1.27/arch/x86/xen/
H A Dirq.c68 /* There's a one instruction preempt window here. We need to xen_irq_disable()
H A Denlighten.c18 #include <linux/preempt.h>
H A Dmmu.c768 * traverse, because we may wrap the preempt count (8 xen_pin_page()
/linux-4.1.27/arch/powerpc/lib/
H A Dxor_vmx.c22 #include <linux/preempt.h>
/linux-4.1.27/arch/powerpc/mm/
H A Dmmu_context_hash32.c90 * Free a context ID. Make sure to call this with preempt disabled!
H A Dtlb_nohash.c36 #include <linux/preempt.h>
247 * We snapshot the PID with preempt disabled. At this point, it can still
407 * while preempt & CPU mask mucking around, or even the IPI but tlb_flush_pgtable()
H A Dtlb_hash64.c139 * Must be called from within some kind of spinlock/non-preempt region...
H A Dtlb_nohash_low.S45 * the PID and I don't want to preempt when that happens.
/linux-4.1.27/arch/arm/kernel/
H A Dsmp_tlb.c10 #include <linux/preempt.h>
H A Dentry-armv.S210 ldr r8, [tsk, #TI_PREEMPT] @ get preempt count
212 teq r8, #0 @ if preempt count != 0
/linux-4.1.27/arch/arc/include/asm/
H A Dfutex.h15 #include <linux/preempt.h>
H A Dsmp.h89 * asm/bitops.h -> linux/spinlock.h -> linux/preempt.h
/linux-4.1.27/kernel/trace/
H A Dtrace_irqsoff.c89 * Prologue for the preempt and irqs off function tracers.
109 * Does not matter if we preempt. We test the flags func_prolog_dec()
111 * If we preempt and get a false positive, the flags func_prolog_dec()
338 /* Skip 5 functions to get to the irq/preempt enable function */ check_critical_timing()
H A Dring_buffer_benchmark.c255 * If we are a non preempt kernel, the 10 second run will ring_buffer_producer()
H A Dtrace_selftest.c898 * in case of parallels max preempt off latencies. trace_selftest_startup_preemptoff()
957 /* reverse the order of preempt vs irqs */ trace_selftest_startup_preemptirqsoff()
964 * in case of parallels max irqs/preempt off latencies. trace_selftest_startup_preemptirqsoff()
993 /* reverse the order of preempt vs irqs */ trace_selftest_startup_preemptirqsoff()
H A Dtrace_output.c422 * trace_print_lat_fmt - print the irq, preempt and lockdep fields
426 * Prints the generic fields of irqs off, in hard or softirq, preempt
H A Dtrace_functions_graph.c1244 seq_printf(s, "#%.*s|| / _--=> preempt-depth \n", size, spaces); print_lat_header()
H A Dring_buffer.c2643 * The lock and unlock are done within a preempt disable section.
2658 * preempt other contexts. A SoftIRQ never preempts an IRQ
4852 /* No sleep, but for non preempt, let others run */ rb_hammer_test()
H A Dtrace.c2565 "# ||| / _--=> preempt-depth \n" print_lat_help_header()
2595 "# || / _--=> preempt-depth\n" print_func_help_header_irq()
2631 "preempt", print_trace_header()
/linux-4.1.27/arch/blackfin/mach-common/
H A Dinterrupt.S126 r7 = [p5 + TI_PREEMPT]; /* get preempt count */
134 [p5 + TI_PREEMPT] = r7; /* restore preempt count */
H A Dentry.S627 * preempt the current domain while the pipeline log is being played
/linux-4.1.27/arch/x86/kernel/cpu/mcheck/
H A Dmce-inject.c20 #include <linux/preempt.h>
H A Dmce_amd.c208 /* cpu init entry point, called from mce.c with preempt off */ mce_amd_feature_init()
H A Dmce.c501 /* Always runs in MCE context with preempt off */ mce_ring_add()
1687 * Must be called with preempt off:
/linux-4.1.27/arch/sparc/mm/
H A Dtlb.c10 #include <linux/preempt.h>
H A Dtsb.c7 #include <linux/preempt.h>
/linux-4.1.27/crypto/
H A Dxor.c24 #include <linux/preempt.h>
/linux-4.1.27/fs/xfs/
H A Dxfs_stats.h221 * We don't disable preempt, not too worried about poking the
/linux-4.1.27/arch/powerpc/platforms/cell/spufs/
H A Dfault.c162 * time slicing will not preempt the context while the page fault spufs_handle_class1()
H A Drun.c19 * It should be impossible to preempt a context while an exception spufs_stop_callback()
H A Dsched.c624 * find_victim - find a lower priority context to preempt
/linux-4.1.27/arch/mips/kernel/
H A Dentry.S31 local_irq_disable # preempt stop
H A Dkprobes.c27 #include <linux/preempt.h>
/linux-4.1.27/arch/powerpc/include/asm/
H A Dlppaca.h85 * hypervisor preempt). An even value implies that the processor is
/linux-4.1.27/arch/ia64/sn/kernel/
H A Dmca.c42 * code. SAL calls are run with preempt disabled so this routine must not
/linux-4.1.27/kernel/
H A Dpanic.c90 * not have preempt disabled. Some functions called from here want panic()
91 * preempt to be disabled. No point enabling it later though... panic()
H A Dcpu.c378 * By now we've cleared cpu_active_mask, wait for all preempt-disabled _cpu_down()
395 * So now all preempt/rcu users must observe !cpu_active(). _cpu_down()
H A Dstop_machine.c224 * our local migrate thread gets the chance to preempt us.
480 "cpu_stop: %s(%p) leaked preempt count\n", cpu_stopper_thread()
H A Dreboot.c126 * preempt all other restart handlers
H A Dsoftirq.c103 * The preempt tracer hooks into preempt_count_add and will break __local_bh_disable_ip()
H A Dmodule.c431 * (optional) module which owns it. Needs preempt disabled or module_mutex. */ find_symbol()
3152 * walking this with preempt disabled. In all the failure paths, do_init_module()
3861 * Must be called with preempt disabled or module mutex held so that
3904 * Must be called with preempt disabled or module mutex held so that
3927 /* Most callers should already have preempt disabled, but make sure */ print_modules()
/linux-4.1.27/block/
H A Dblk-lib.c126 * us to schedule out to avoid softlocking if preempt blkdev_issue_discard()
H A Dcfq-iosched.c3235 * preempt async queue, limiting the sync queue doesn't make cfq_may_dispatch()
3783 * Check if new_cfqq should preempt the currently active queue. Return 0 for
3784 * no or if we aren't sure, a 1 will cause a preempt.
3803 * Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice. cfq_should_preempt()
3850 * current cfqq, let it preempt cfq_should_preempt()
3859 * cfqq preempts the active queue. if we allowed preempt with no slice left,
3866 cfq_log_cfqq(cfqd, cfqq, "preempt"); cfq_preempt_queue()
3870 * workload type is changed, don't save slice, otherwise preempt cfq_preempt_queue()
H A Dblk-core.c3048 * preempt will imply a full memory barrier blk_start_plug()
/linux-4.1.27/arch/powerpc/kvm/
H A Dbook3s_hv_builtin.c11 #include <linux/preempt.h>
H A De500.c117 /* Invalidate all id mappings on local core -- call with preempt disabled */ local_sid_destroy_all()
H A Dbook3s_pr.c193 * our preempt notifiers. Don't bother touching this svcpu then. kvmppc_copy_from_svcpu()
H A Dbook3s_hv.c24 #include <linux/preempt.h>
/linux-4.1.27/arch/arm64/kernel/
H A Dcpuinfo.c27 #include <linux/preempt.h>
H A Defi.c23 #include <linux/preempt.h>
H A Dentry.S369 ldr w24, [tsk, #TI_PREEMPT] // get preempt count
370 cbnz w24, 1f // preempt count != 0
/linux-4.1.27/arch/m68k/coldfire/
H A Dentry.S124 jmp preempt_schedule_irq /* preempt the kernel */
/linux-4.1.27/arch/arm/kvm/
H A Dpsci.c18 #include <linux/preempt.h>
/linux-4.1.27/include/asm-generic/
H A Dtlb.h42 * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
/linux-4.1.27/arch/arm/vfp/
H A Dvfpmodule.c156 * - we could be preempted if tree preempt rcu is enabled, so
163 * - we could be preempted if tree preempt rcu is enabled, so
/linux-4.1.27/arch/arc/kernel/
H A Dentry.S624 ; Can't preempt if preemption disabled
694 ; paranoid check, given A1 was active when A2 happened, preempt count
/linux-4.1.27/fs/ocfs2/cluster/
H A Dquorum.c72 /* panic spins with interrupts enabled. with preempt o2quo_fence_self()
/linux-4.1.27/arch/score/kernel/
H A Dentry.S238 disable_irq # preempt stop
/linux-4.1.27/drivers/parport/
H A Dshare.c571 tmp->preempt = pf; parport_register_device()
787 if (oldcad->preempt) { parport_claim()
788 if (oldcad->preempt(oldcad->private)) parport_claim()
/linux-4.1.27/kernel/sched/
H A Dsched.h112 * Tells if entity @a should preempt entity @b.
774 * preempt-disabled sections.
1169 bool (*yield_to_task) (struct rq *rq, struct task_struct *p, bool preempt);
H A Ddeadline.c234 * If we cannot preempt any rq, fall back to pick any dl_task_offline_migration()
1389 * can be sent to some other CPU where they can preempt
1522 * - it will preempt the last one we pulled (if any). pull_dl_task()
H A Dcore.c2894 * we do not want to preempt the current task. Just return.. preempt_schedule()
2909 * recursion and tracing preempt enabling caused by the tracing
2911 * from userspace or just about to enter userspace, a preempt enable
2929 * Needs preempt disabled in case user_exit() is traced preempt_schedule_context()
3042 * --> -dl task blocks on mutex A and could preempt the rt_mutex_setprio()
4221 * no need to preempt or enable interrupts: SYSCALL_DEFINE0()
4319 * @preempt: whether task preemption is allowed or not
4329 int __sched yield_to(struct task_struct *p, bool preempt) yield_to() argument
4365 yielded = curr->sched_class->yield_to_task(rq, p, preempt); yield_to()
4372 if (preempt && rq != p_rq) yield_to()
4636 /* Set the preempt count _outside_ the spinlocks! */ init_idle()
H A Dfair.c4826 * preempt must be disabled.
4958 * Should 'se' preempt 'curr'.
5054 goto preempt; check_preempt_wakeup()
5057 * Batch and idle tasks do not preempt non-idle tasks (their preemption check_preempt_wakeup()
5073 goto preempt; check_preempt_wakeup()
5078 preempt: check_preempt_wakeup()
5269 static bool yield_to_task_fair(struct rq *rq, struct task_struct *p, bool preempt) yield_to_task_fair() argument
7233 * Drop the rq->lock, but keep IRQ/preempt disabled. idle_balance()
7881 * Priority of the task has changed. Check to see if we preempt
7957 * if we can still preempt the current task. switched_to_fair()
H A Drt.c1589 * "this_cpu" is cheaper to preempt than a for_each_domain()
2166 * we may need to preempt the current running task. switched_to_rt()
/linux-4.1.27/drivers/rtc/
H A Drtc-bfin.c125 * shouldn't be an issue on an SMP or preempt system as this function should
/linux-4.1.27/arch/x86/mm/
H A Dkmmio.c18 #include <linux/preempt.h>
/linux-4.1.27/arch/mn10300/kernel/
H A Dkprobes.c23 #include <linux/preempt.h>
/linux-4.1.27/arch/powerpc/kernel/
H A Dkprobes.c31 #include <linux/preempt.h>
H A Dvector.S297 * with preempt disabled.
H A Dirq.c212 * From this point onward, we can take interrupts, preempt, arch_local_irq_restore()
H A Dsetup-common.c220 /* We only show online cpus: disable preempt (overzealous, I show_cpuinfo()
H A Dentry_64.S700 /* Check if we need to preempt */
/linux-4.1.27/arch/microblaze/kernel/
H A Dentry.S731 preempt: label
738 bnei r5, preempt /* if non zero jump to resched */
/linux-4.1.27/drivers/md/
H A Ddm-stats.c6 #include <linux/preempt.h>
/linux-4.1.27/arch/x86/kernel/
H A Dnmi.c433 * If an NMI executes an iret, another NMI can preempt it. We do not
/linux-4.1.27/arch/tile/kernel/
H A Dprocess.c16 #include <linux/preempt.h>
/linux-4.1.27/drivers/char/hw_random/
H A Dn2-drv.c12 #include <linux/preempt.h>
/linux-4.1.27/drivers/acpi/acpica/
H A Ddswexec.c551 * Tell the walk loop to preempt this running method and acpi_ds_exec_end_op()
/linux-4.1.27/drivers/virtio/
H A Dvirtio_balloon.c376 * and always have work to do. Be nice if preempt disabled. balloon()
/linux-4.1.27/init/
H A Dmain.c408 /* Call into cpu_idle with preempt disabled */ rest_init()
/linux-4.1.27/arch/s390/kernel/
H A Dkprobes.c25 #include <linux/preempt.h>
/linux-4.1.27/mm/
H A Dzbud.c50 #include <linux/preempt.h>
H A Dhuge_memory.c2238 * ptl mostly unnecessary, but preempt has to __collapse_huge_page_copy()
H A Dslab.c61 * and local interrupts are disabled so slab code is preempt-safe.
H A Dmemcontrol.c2111 * This must be called under preempt disabled or must be called by
/linux-4.1.27/sound/drivers/
H A Dmts64.c965 NULL, /* preempt */ snd_mts64_probe()
H A Dportman2x4.c764 NULL, /* preempt */ snd_portman_probe()
/linux-4.1.27/drivers/scsi/
H A Dconstants.c171 {0x4, "Persistent reserve out, preempt"},
172 {0x5, "Persistent reserve out, preempt and abort"},
/linux-4.1.27/kernel/time/
H A Dtimer.c1159 WARN_ONCE(1, "timer: %pF preempt leak: %08x -> %08x\n", call_timer_fn()
1162 * Restore the preempt count. That gives us a decent call_timer_fn()
/linux-4.1.27/drivers/ide/
H A Dide-io.c535 * We let requests forced at head of queue with ide-preempt do_ide_request()
/linux-4.1.27/drivers/target/
H A Dtarget_core_pr.c2836 * server shall perform a preempt by doing the following in an core_scsi3_pro_preempt()
2926 * with a zero SA rservation key, preempt the existing core_scsi3_pro_preempt()
2956 * reservation holder to preempt itself (i.e., a core_scsi3_pro_preempt()
2971 * server shall perform a preempt by doing the following as core_scsi3_pro_preempt()
/linux-4.1.27/drivers/input/misc/
H A Dwistron_btns.c30 #include <linux/preempt.h>
/linux-4.1.27/arch/sparc/kernel/
H A Dsmp_64.c132 /* idle thread is expected to have preempt disabled */ smp_callin()
/linux-4.1.27/arch/metag/kernel/
H A Dtraps.c19 #include <linux/preempt.h>
/linux-4.1.27/drivers/usb/host/
H A Dohci-hcd.c577 /* 2msec timelimit here means no irqs/preempt */ ohci_run()
/linux-4.1.27/arch/ia64/kernel/
H A Dkprobes.c30 #include <linux/preempt.h>
/linux-4.1.27/net/mac80211/
H A Dmesh_hwmp.c1115 * this function is considered "using" the associated mpath, so preempt a path
/linux-4.1.27/kernel/rcu/
H A Dsrcu.c32 #include <linux/preempt.h>
H A Dtree_plugin.h702 * Wait for an RCU-preempt grace period, but expedite it. The basic
906 * Wait for an rcu-preempt grace period, but make it happen quickly.
/linux-4.1.27/drivers/net/ethernet/sis/
H A Dsis190.c690 * again too early (hint: think preempt and unclocked smp systems). sis190_tx_interrupt()
/linux-4.1.27/arch/mips/mm/
H A Dc-r4k.c16 #include <linux/preempt.h>
/linux-4.1.27/drivers/infiniband/hw/qib/
H A Dqib_init.c1044 * This is only roughly accurate, since even with preempt we qib_verify_pioperf()
/linux-4.1.27/ipc/
H A Dsem.c695 * Hold preempt off so that we don't get preempted and have the wake_up_sem_queue_prepare()
/linux-4.1.27/arch/parisc/kernel/
H A Dentry.S968 /* preempt the current task on returning to kernel
/linux-4.1.27/kernel/irq/
H A Dmanage.c816 * interrupts rely on the implicit bh/preempt disable of the hard irq irq_thread_check_affinity()
/linux-4.1.27/drivers/net/ethernet/3com/
H A Dtyphoon.c637 * preempt or do anything other than take interrupts. So, don't typhoon_issue_command()
/linux-4.1.27/drivers/infiniband/hw/ipath/
H A Dipath_driver.c359 * this is only roughly accurate, since even with preempt we ipath_verify_pioperf()
/linux-4.1.27/drivers/usb/gadget/function/
H A Df_mass_storage.c415 * If a lower-or-equal priority exception is in progress, preempt it raise_exception()
/linux-4.1.27/net/core/
H A Dsock.c1948 * disabled, use cond_resched_softirq() to preempt.
/linux-4.1.27/drivers/net/bonding/
H A Dbond_main.c73 #include <linux/preempt.h>
/linux-4.1.27/drivers/net/ethernet/neterion/vxge/
H A Dvxge-main.c1838 * away which can preempt this NAPI thread */ vxge_poll_msix()
/linux-4.1.27/drivers/media/pci/bt8xx/
H A Dbttv-driver.c763 capturing is off, it shall no longer "preempt" VBI capturing, disclaim_video_lines()
/linux-4.1.27/fs/
H A Dbuffer.c1418 * or with preempt disabled.
/linux-4.1.27/tools/lib/traceevent/
H A Devent-parse.c4944 * need rescheduling, in hard/soft interrupt, preempt count
/linux-4.1.27/fs/dlm/
H A Dlock.c1544 in the cancel reply should be 0). We preempt the cancel reply _remove_from_waiters()

Completed in 5790 milliseconds