/linux-4.4.14/tools/perf/ui/ |
H A D | progress.c | 17 p->curr += adv; ui_progress__update() 19 if (p->curr >= p->next) { ui_progress__update() 27 p->curr = 0; ui_progress__init()
|
H A D | progress.h | 10 u64 curr, next, step, total; member in struct:ui_progress
|
/linux-4.4.14/fs/hfsplus/ |
H A D | bitmap.c | 24 __be32 *pptr, *curr, *end; hfsplus_block_allocate() local 42 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; hfsplus_block_allocate() 51 val = *curr; hfsplus_block_allocate() 60 curr++; hfsplus_block_allocate() 64 while (curr < end) { hfsplus_block_allocate() 65 val = *curr; hfsplus_block_allocate() 74 curr++; hfsplus_block_allocate() 86 curr = pptr = kmap(page); hfsplus_block_allocate() 97 start = offset + (curr - pptr) * 32 + i; hfsplus_block_allocate() 114 *curr++ = cpu_to_be32(n); hfsplus_block_allocate() 117 while (curr < end) { hfsplus_block_allocate() 118 n = be32_to_cpu(*curr); hfsplus_block_allocate() 125 *curr++ = cpu_to_be32(0xffffffff); hfsplus_block_allocate() 138 curr = pptr; hfsplus_block_allocate() 151 *curr = cpu_to_be32(n); hfsplus_block_allocate() 154 *max = offset + (curr - pptr) * 32 + i - start; hfsplus_block_allocate() 168 __be32 *pptr, *curr, *end; hfsplus_block_free() local 188 curr = pptr + (offset & (PAGE_CACHE_BITS - 1)) / 32; hfsplus_block_free() 199 *curr++ &= cpu_to_be32(mask); hfsplus_block_free() 202 *curr++ &= cpu_to_be32(mask); hfsplus_block_free() 208 while (curr < end) { hfsplus_block_free() 211 *curr++ = 0; hfsplus_block_free() 222 curr = pptr; hfsplus_block_free() 229 *curr &= cpu_to_be32(mask); hfsplus_block_free()
|
/linux-4.4.14/kernel/sched/ |
H A D | stop_task.c | 60 struct task_struct *curr = rq->curr; put_prev_task_stop() local 63 delta_exec = rq_clock_task(rq) - curr->se.exec_start; put_prev_task_stop() 67 schedstat_set(curr->se.statistics.exec_max, put_prev_task_stop() 68 max(curr->se.statistics.exec_max, delta_exec)); put_prev_task_stop() 70 curr->se.sum_exec_runtime += delta_exec; put_prev_task_stop() 71 account_group_exec_runtime(curr, delta_exec); put_prev_task_stop() 73 curr->se.exec_start = rq_clock_task(rq); put_prev_task_stop() 74 cpuacct_charge(curr, delta_exec); put_prev_task_stop() 77 static void task_tick_stop(struct rq *rq, struct task_struct *curr, int queued) task_tick_stop() argument
|
H A D | rt.c | 85 rt_rq->highest_prio.curr = MAX_RT_PRIO; init_rt_rq() 164 rt_rq->highest_prio.curr = MAX_RT_PRIO; init_tg_rt_entry() 268 return rq->rt.highest_prio.curr > prev->prio; need_pull_rt_task() 490 struct task_struct *curr = rq_of_rt_rq(rt_rq)->curr; sched_rt_rq_enqueue() local 504 if (rt_rq->highest_prio.curr < curr->prio) sched_rt_rq_enqueue() 855 if (rt_rq->rt_nr_running && rq->curr == rq->idle) for_each_cpu() 886 return rt_rq->highest_prio.curr; rt_se_prio() 941 struct task_struct *curr = rq->curr; update_curr_rt() local 942 struct sched_rt_entity *rt_se = &curr->rt; update_curr_rt() 945 if (curr->sched_class != &rt_sched_class) update_curr_rt() 948 delta_exec = rq_clock_task(rq) - curr->se.exec_start; update_curr_rt() 952 schedstat_set(curr->se.statistics.exec_max, update_curr_rt() 953 max(curr->se.statistics.exec_max, delta_exec)); update_curr_rt() 955 curr->se.sum_exec_runtime += delta_exec; update_curr_rt() 956 account_group_exec_runtime(curr, delta_exec); update_curr_rt() 958 curr->se.exec_start = rq_clock_task(rq); update_curr_rt() 959 cpuacct_charge(curr, delta_exec); update_curr_rt() 1041 if (rq->online && rt_rq->highest_prio.curr != prev_prio) dec_rt_prio_smp() 1042 cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); dec_rt_prio_smp() 1058 int prev_prio = rt_rq->highest_prio.curr; inc_rt_prio() 1061 rt_rq->highest_prio.curr = prio; inc_rt_prio() 1069 int prev_prio = rt_rq->highest_prio.curr; dec_rt_prio() 1082 rt_rq->highest_prio.curr = dec_rt_prio() 1087 rt_rq->highest_prio.curr = MAX_RT_PRIO; dec_rt_prio() 1310 requeue_task_rt(rq, rq->curr, 0); yield_task_rt() 1319 struct task_struct *curr; select_task_rq_rt() local 1329 curr = READ_ONCE(rq->curr); /* unlocked access */ select_task_rq_rt() 1353 if (curr && unlikely(rt_task(curr)) && select_task_rq_rt() 1354 (curr->nr_cpus_allowed < 2 || select_task_rq_rt() 1355 curr->prio <= p->prio)) { select_task_rq_rt() 1363 p->prio < cpu_rq(target)->rt.highest_prio.curr) select_task_rq_rt() 1378 if (rq->curr->nr_cpus_allowed == 1 || check_preempt_equal_prio() 1379 !cpupri_find(&rq->rd->cpupri, rq->curr, NULL)) check_preempt_equal_prio() 1406 if (p->prio < rq->curr->prio) { check_preempt_curr_rt() 1424 if (p->prio == rq->curr->prio && !test_tsk_need_resched(rq->curr)) check_preempt_curr_rt() 1648 if (lowest_rq->rt.highest_prio.curr <= task->prio) { find_lock_lowest_rq() 1679 if (lowest_rq->rt.highest_prio.curr > task->prio) find_lock_lowest_rq() 1729 if (unlikely(next_task == rq->curr)) { push_rt_task() 1739 if (unlikely(next_task->prio < rq->curr->prio)) { push_rt_task() 1858 if (next_rq->rt.highest_prio.next < rq->rt.highest_prio.curr) find_next_push_cpu() 1999 this_rq->rt.highest_prio.curr) pull_rt_task() 2019 if (p && (p->prio < this_rq->rt.highest_prio.curr)) { pull_rt_task() 2020 WARN_ON(p == src_rq->curr); pull_rt_task() 2031 if (p->prio < src_rq->curr->prio) pull_rt_task() 2061 !test_tsk_need_resched(rq->curr) && task_woken_rt() 2063 (dl_task(rq->curr) || rt_task(rq->curr)) && task_woken_rt() 2064 (rq->curr->nr_cpus_allowed < 2 || task_woken_rt() 2065 rq->curr->prio <= p->prio)) task_woken_rt() 2077 cpupri_set(&rq->rd->cpupri, rq->cpu, rq->rt.highest_prio.curr); rq_online_rt() 2135 if (task_on_rq_queued(p) && rq->curr != p) { switched_to_rt() 2140 if (p->prio < rq->curr->prio) switched_to_rt() 2156 if (rq->curr == p) { prio_changed_rt() 2169 if (p->prio > rq->rt.highest_prio.curr) prio_changed_rt() 2182 if (p->prio < rq->curr->prio) prio_changed_rt() 2244 struct task_struct *p = rq->curr; set_curr_task_rt()
|
H A D | deadline.c | 78 dl_rq->earliest_dl.curr = dl_rq->earliest_dl.next = 0; init_dl_rq() 648 if (dl_task(rq->curr)) dl_task_timer() 716 struct task_struct *curr = rq->curr; update_curr_dl() local 717 struct sched_dl_entity *dl_se = &curr->dl; update_curr_dl() 720 if (!dl_task(curr) || !on_dl_rq(dl_se)) update_curr_dl() 731 delta_exec = rq_clock_task(rq) - curr->se.exec_start; update_curr_dl() 735 schedstat_set(curr->se.statistics.exec_max, update_curr_dl() 736 max(curr->se.statistics.exec_max, delta_exec)); update_curr_dl() 738 curr->se.sum_exec_runtime += delta_exec; update_curr_dl() 739 account_group_exec_runtime(curr, delta_exec); update_curr_dl() 741 curr->se.exec_start = rq_clock_task(rq); update_curr_dl() 742 cpuacct_charge(curr, delta_exec); update_curr_dl() 749 __dequeue_task_dl(rq, curr, 0); update_curr_dl() 750 if (unlikely(dl_se->dl_boosted || !start_dl_timer(curr))) update_curr_dl() 751 enqueue_task_dl(rq, curr, ENQUEUE_REPLENISH); update_curr_dl() 753 if (!is_leftmost(curr, &rq->dl)) update_curr_dl() 801 if (dl_rq->earliest_dl.curr == 0 || inc_dl_deadline() 802 dl_time_before(deadline, dl_rq->earliest_dl.curr)) { inc_dl_deadline() 809 dl_rq->earliest_dl.next = dl_rq->earliest_dl.curr; inc_dl_deadline() 810 dl_rq->earliest_dl.curr = deadline; inc_dl_deadline() 833 dl_rq->earliest_dl.curr = 0; dec_dl_deadline() 841 dl_rq->earliest_dl.curr = entry->deadline; dec_dl_deadline() 1020 struct task_struct *p = rq->curr; yield_task_dl() 1029 rq->curr->dl.dl_yielded = 1; yield_task_dl() 1049 struct task_struct *curr; select_task_rq_dl() local 1058 curr = READ_ONCE(rq->curr); /* unlocked access */ select_task_rq_dl() 1069 if (unlikely(dl_task(curr)) && select_task_rq_dl() 1070 (curr->nr_cpus_allowed < 2 || select_task_rq_dl() 1071 !dl_entity_preempt(&p->dl, &curr->dl)) && select_task_rq_dl() 1077 cpu_rq(target)->dl.earliest_dl.curr) || select_task_rq_dl() 1093 if (rq->curr->nr_cpus_allowed == 1 || check_preempt_equal_dl() 1094 cpudl_find(&rq->rd->cpudl, rq->curr, NULL) == -1) check_preempt_equal_dl() 1117 if (dl_entity_preempt(&p->dl, &rq->curr->dl)) { check_preempt_curr_dl() 1127 if ((p->dl.deadline == rq->curr->dl.deadline) && check_preempt_curr_dl() 1128 !test_tsk_need_resched(rq->curr)) check_preempt_curr_dl() 1256 struct task_struct *p = rq->curr; set_curr_task_dl() 1430 later_rq->dl.earliest_dl.curr)) { find_lock_later_rq() 1460 later_rq->dl.earliest_dl.curr)) find_lock_later_rq() 1510 if (unlikely(next_task == rq->curr)) { push_dl_task() 1516 * If next_task preempts rq->curr, and rq->curr push_dl_task() 1520 if (dl_task(rq->curr) && push_dl_task() 1521 dl_time_before(next_task->dl.deadline, rq->curr->dl.deadline) && push_dl_task() 1522 rq->curr->nr_cpus_allowed > 1) { push_dl_task() 1608 dl_time_before(this_rq->dl.earliest_dl.curr, pull_dl_task() 1632 this_rq->dl.earliest_dl.curr))) { pull_dl_task() 1633 WARN_ON(p == src_rq->curr); pull_dl_task() 1641 src_rq->curr->dl.deadline)) pull_dl_task() 1668 !test_tsk_need_resched(rq->curr) && task_woken_dl() 1670 dl_task(rq->curr) && task_woken_dl() 1671 (rq->curr->nr_cpus_allowed < 2 || task_woken_dl() 1672 !dl_entity_preempt(&p->dl, &rq->curr->dl))) { task_woken_dl() 1718 cpudl_set(&rq->rd->cpudl, rq->cpu, rq->dl.earliest_dl.curr, 1); rq_online_dl() 1770 if (task_on_rq_queued(p) && rq->curr != p) { switched_to_dl() 1775 if (dl_task(rq->curr)) switched_to_dl() 1790 if (task_on_rq_queued(p) || rq->curr == p) { prio_changed_dl() 1806 if (dl_time_before(rq->dl.earliest_dl.curr, p->dl.deadline)) prio_changed_dl()
|
H A D | fair.c | 461 if (cfs_rq->curr) update_min_vruntime() 462 vruntime = cfs_rq->curr->vruntime; update_min_vruntime() 469 if (!cfs_rq->curr) update_min_vruntime() 703 struct sched_entity *curr = cfs_rq->curr; update_curr() local 707 if (unlikely(!curr)) update_curr() 710 delta_exec = now - curr->exec_start; update_curr() 714 curr->exec_start = now; update_curr() 716 schedstat_set(curr->statistics.exec_max, update_curr() 717 max(delta_exec, curr->statistics.exec_max)); update_curr() 719 curr->sum_exec_runtime += delta_exec; update_curr() 722 curr->vruntime += calc_delta_fair(delta_exec, curr); update_curr() 725 if (entity_is_task(curr)) { update_curr() 726 struct task_struct *curtask = task_of(curr); update_curr() 728 trace_sched_stat_runtime(curtask, delta_exec, curr->vruntime); update_curr() 738 update_curr(cfs_rq_of(&rq->curr->se)); update_curr_fair() 756 if (se != cfs_rq->curr) update_stats_enqueue() 784 if (se != cfs_rq->curr) update_stats_dequeue() 1269 cur = dst_rq->curr; task_numa_compare() 1271 * No need to move the exiting task, and this ensures that ->curr task_numa_compare() 1968 tsk = READ_ONCE(cpu_rq(cpu)->curr); task_numa_group() 2285 void task_tick_numa(struct rq *rq, struct task_struct *curr) task_tick_numa() argument 2287 struct callback_head *work = &curr->numa_work; task_tick_numa() 2293 if (!curr->mm || (curr->flags & PF_EXITING) || work->next != work) task_tick_numa() 2302 now = curr->se.sum_exec_runtime; task_tick_numa() 2303 period = (u64)curr->numa_scan_period * NSEC_PER_MSEC; task_tick_numa() 2305 if (now > curr->node_stamp + period) { task_tick_numa() 2306 if (!curr->node_stamp) task_tick_numa() 2307 curr->numa_scan_period = task_scan_min(curr); task_tick_numa() 2308 curr->node_stamp += period; task_tick_numa() 2310 if (!time_before(jiffies, curr->mm->numa_next_scan)) { task_tick_numa() 2312 task_work_add(curr, work, true); task_tick_numa() 2317 static void task_tick_numa(struct rq *rq, struct task_struct *curr) task_tick_numa() argument 2407 if (cfs_rq->curr == se) reweight_entity() 2705 scale_load_down(cfs_rq->load.weight), cfs_rq->curr != NULL, cfs_rq); update_cfs_rq_load_avg() 2728 cfs_rq->curr == se, NULL); update_load_avg() 2765 cfs_rq->curr == se, NULL); detach_entity_load_avg() 2785 cfs_rq->curr == se, NULL); enqueue_entity_load_avg() 3023 if (se != cfs_rq->curr) enqueue_entity() 3105 if (se != cfs_rq->curr) dequeue_entity() 3112 * update can refer to the ->curr item and we need to reflect this dequeue_entity() 3129 check_preempt_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr) check_preempt_tick() argument 3135 ideal_runtime = sched_slice(cfs_rq, curr); check_preempt_tick() 3136 delta_exec = curr->sum_exec_runtime - curr->prev_sum_exec_runtime; check_preempt_tick() 3143 clear_buddies(cfs_rq, curr); check_preempt_tick() 3156 delta = curr->vruntime - se->vruntime; check_preempt_tick() 3181 cfs_rq->curr = se; set_next_entity() 3197 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se); 3207 pick_next_entity(struct cfs_rq *cfs_rq, struct sched_entity *curr) pick_next_entity() argument 3213 * If curr is set we have to see if its left of the leftmost entity pick_next_entity() 3216 if (!left || (curr && entity_before(curr, left))) pick_next_entity() 3217 left = curr; pick_next_entity() 3228 if (se == curr) { pick_next_entity() 3232 if (!second || (curr && entity_before(curr, second))) pick_next_entity() 3233 second = curr; pick_next_entity() 3279 cfs_rq->curr = NULL; put_prev_entity() 3283 entity_tick(struct cfs_rq *cfs_rq, struct sched_entity *curr, int queued) entity_tick() argument 3293 update_load_avg(curr, 1); entity_tick() 3314 check_preempt_tick(cfs_rq, curr); entity_tick() 3483 if (!assign_cfs_rq_runtime(cfs_rq) && likely(cfs_rq->curr)) __account_cfs_rq_runtime() 3652 if (rq->curr == rq->idle && rq->cfs.nr_running) 3891 if (!cfs_rq->runtime_enabled || cfs_rq->curr) check_enqueue_throttle() 4095 if (rq->curr == p) hrtick_start_fair() 4110 struct task_struct *curr = rq->curr; hrtick_update() local 4112 if (!hrtick_enabled(rq) || curr->sched_class != &fair_sched_class) hrtick_update() 4115 if (cfs_rq_of(&curr->se)->nr_running < sched_nr_latency) hrtick_update() 4116 hrtick_start_fair(rq, curr); hrtick_update() 5038 wakeup_gran(struct sched_entity *curr, struct sched_entity *se) wakeup_gran() argument 5043 * Since its curr running now, convert the gran from real-time wakeup_gran() 5046 * By using 'se' instead of 'curr' we penalize light tasks, so wakeup_gran() 5047 * they get preempted easier. That is, if 'se' < 'curr' then wakeup_gran() 5049 * lighter, if otoh 'se' > 'curr' then the resulting gran will wakeup_gran() 5059 * Should 'se' preempt 'curr'. 5073 wakeup_preempt_entity(struct sched_entity *curr, struct sched_entity *se) wakeup_preempt_entity() argument 5075 s64 gran, vdiff = curr->vruntime - se->vruntime; wakeup_preempt_entity() 5080 gran = wakeup_gran(curr, se); wakeup_preempt_entity() 5116 struct task_struct *curr = rq->curr; check_preempt_wakeup() local 5117 struct sched_entity *se = &curr->se, *pse = &p->se; check_preempt_wakeup() 5118 struct cfs_rq *cfs_rq = task_cfs_rq(curr); check_preempt_wakeup() 5143 * Note: this also catches the edge-case of curr being in a throttled check_preempt_wakeup() 5145 * enqueue of curr) will have resulted in resched being set. This check_preempt_wakeup() 5149 if (test_tsk_need_resched(curr)) check_preempt_wakeup() 5153 if (unlikely(curr->policy == SCHED_IDLE) && check_preempt_wakeup() 5190 if (unlikely(!se->on_rq || curr == rq->idle)) check_preempt_wakeup() 5222 struct sched_entity *curr = cfs_rq->curr; pick_next_task_fair() local 5226 * have to consider cfs_rq->curr. If it is still a runnable pick_next_task_fair() 5230 if (curr) { pick_next_task_fair() 5231 if (curr->on_rq) pick_next_task_fair() 5234 curr = NULL; pick_next_task_fair() 5246 se = pick_next_entity(cfs_rq, curr); pick_next_task_fair() 5349 struct task_struct *curr = rq->curr; yield_task_fair() local 5350 struct cfs_rq *cfs_rq = task_cfs_rq(curr); yield_task_fair() 5351 struct sched_entity *se = &curr->se; yield_task_fair() 5361 if (curr->policy != SCHED_BATCH) { yield_task_fair() 7124 * if the curr task on busiest cpu can't be load_balance() 7128 tsk_cpus_allowed(busiest->curr))) { load_balance() 7875 static void task_tick_fair(struct rq *rq, struct task_struct *curr, int queued) task_tick_fair() argument 7878 struct sched_entity *se = &curr->se; task_tick_fair() 7886 task_tick_numa(rq, curr); 7897 struct sched_entity *se = &p->se, *curr; task_fork_fair() local 7907 curr = cfs_rq->curr; task_fork_fair() 7921 if (curr) task_fork_fair() 7922 se->vruntime = curr->vruntime; task_fork_fair() 7925 if (sysctl_sched_child_runs_first && curr && entity_before(curr, se)) { task_fork_fair() 7930 swap(curr->vruntime, se->vruntime); task_fork_fair() 7954 if (rq->curr == p) { prio_changed_fair() 8041 if (rq->curr == p) switched_to_fair() 8050 * This routine is mostly called to set cfs_rq->curr field when a task 8055 struct sched_entity *se = &rq->curr->se; set_curr_task_fair()
|
H A D | idle_task.c | 54 static void task_tick_idle(struct rq *rq, struct task_struct *curr, int queued) task_tick_idle() argument
|
H A D | cputime.c | 47 void irqtime_account_irq(struct task_struct *curr) irqtime_account_irq() argument 71 else if (in_serving_softirq() && curr != this_cpu_ksoftirqd()) irqtime_account_irq() 578 static void cputime_adjust(struct task_cputime *curr, cputime_adjust() argument 587 rtime = nsecs_to_cputime(curr->sum_exec_runtime); cputime_adjust() 600 stime = curr->stime; cputime_adjust() 601 utime = curr->utime; cputime_adjust()
|
H A D | wait.c | 68 wait_queue_t *curr, *next; __wake_up_common() local 70 list_for_each_entry_safe(curr, next, &q->task_list, task_list) { __wake_up_common() 71 unsigned flags = curr->flags; __wake_up_common() 73 if (curr->func(curr, mode, wake_flags, key) && __wake_up_common()
|
H A D | debug.c | 114 if (rq->curr == p) print_task() 301 SEQ_printf(m, " .%-30s: %ld\n", "curr->pid", (long)(task_pid_nr(rq->curr))); print_cpu()
|
H A D | core.c | 328 rq->curr->sched_class->task_tick(rq, rq->curr, 1); hrtick() 576 struct task_struct *curr = rq->curr; resched_curr() local 581 if (test_tsk_need_resched(curr)) resched_curr() 587 set_tsk_need_resched(curr); resched_curr() 592 if (set_nr_and_not_polling(curr)) resched_curr() 1029 if (p->sched_class == rq->curr->sched_class) { check_preempt_curr() 1030 rq->curr->sched_class->check_preempt_curr(rq, p, flags); check_preempt_curr() 1033 if (class == rq->curr->sched_class) for_each_class() 1046 if (task_on_rq_queued(rq->curr) && test_tsk_need_resched(rq->curr)) 1866 if (!is_idle_task(rcu_dereference(rq->curr))) wake_up_if_idle() 1873 if (is_idle_task(rq->curr)) wake_up_if_idle() 2449 static void __fire_sched_in_preempt_notifiers(struct task_struct *curr) __fire_sched_in_preempt_notifiers() argument 2453 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) __fire_sched_in_preempt_notifiers() 2457 static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) fire_sched_in_preempt_notifiers() argument 2460 __fire_sched_in_preempt_notifiers(curr); fire_sched_in_preempt_notifiers() 2464 __fire_sched_out_preempt_notifiers(struct task_struct *curr, __fire_sched_out_preempt_notifiers() argument 2469 hlist_for_each_entry(notifier, &curr->preempt_notifiers, link) __fire_sched_out_preempt_notifiers() 2474 fire_sched_out_preempt_notifiers(struct task_struct *curr, fire_sched_out_preempt_notifiers() argument 2478 __fire_sched_out_preempt_notifiers(curr, next); fire_sched_out_preempt_notifiers() 2483 static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr) fire_sched_in_preempt_notifiers() argument 2488 fire_sched_out_preempt_notifiers(struct task_struct *curr, fire_sched_out_preempt_notifiers() argument 2843 * Must be ->curr _and_ ->on_rq. If dequeued, we would task_sched_runtime() 2865 struct task_struct *curr = rq->curr; scheduler_tick() local 2871 curr->sched_class->task_tick(rq, curr, 0); scheduler_tick() 3114 prev = rq->curr; __schedule() 3177 rq->curr = next; __schedule() 3346 int default_wake_function(wait_queue_t *curr, unsigned mode, int wake_flags, default_wake_function() argument 3349 return try_to_wake_up(curr->private, mode, wake_flags); default_wake_function() 3389 WARN_ON(p != rq->curr); rt_mutex_setprio() 3572 if (rq->curr != rq->idle) idle_cpu() 4714 struct task_struct *curr = current; yield_to() local 4739 if (!curr->sched_class->yield_to_task) yield_to() 4742 if (curr->sched_class != p->sched_class) yield_to() 4748 yielded = curr->sched_class->yield_to_task(rq, p, preempt); yield_to() 5021 rq->curr = rq->idle = idle; init_idle()
|
H A D | sched.h | 361 * 'curr' points to currently running entity on this cfs_rq. 364 struct sched_entity *curr, *next, *last, *skip; member in struct:cfs_rq 442 int curr; /* highest queued rt task prio */ member in struct:rt_rq::__anon14821 492 u64 curr; member in struct:dl_rq::__anon14822 604 struct task_struct *curr, *idle, *stop; member in struct:rq 707 #define cpu_curr(cpu) (cpu_rq(cpu)->curr) 1027 return rq->curr == p; task_current()
|
H A D | idle.c | 216 * rq->curr != rq->idle). This means that, if rq->idle has cpu_idle_loop()
|
/linux-4.4.14/sound/firewire/oxfw/ |
H A D | oxfw-proc.c | 15 struct snd_oxfw_stream_formation formation, curr; proc_read_formation() local 23 &curr); proc_read_formation() 38 if (memcmp(&formation, &curr, sizeof(curr)) == 0) proc_read_formation() 53 &curr); proc_read_formation() 68 if (memcmp(&formation, &curr, sizeof(curr)) == 0) proc_read_formation()
|
/linux-4.4.14/include/linux/ |
H A D | futex.h | 15 handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi); 56 extern void exit_robust_list(struct task_struct *curr); 57 extern void exit_pi_state_list(struct task_struct *curr); 64 static inline void exit_robust_list(struct task_struct *curr) exit_robust_list() argument 67 static inline void exit_pi_state_list(struct task_struct *curr) exit_pi_state_list() argument
|
H A D | lockdep.h | 477 extern void print_irqtrace_events(struct task_struct *curr); 479 static inline void print_irqtrace_events(struct task_struct *curr) print_irqtrace_events() argument
|
/linux-4.4.14/lib/ |
H A D | sg_split.c | 29 struct sg_splitter *curr = splitters; sg_calculate_split() local 45 if (!curr->in_sg0) { for_each_sg() 46 curr->in_sg0 = sg; for_each_sg() 47 curr->skip_sg0 = skip; for_each_sg() 50 curr->nents++; for_each_sg() 51 curr->length_last_sg = len; for_each_sg() 54 curr++; for_each_sg() 59 curr->in_sg0 = sg; for_each_sg() 60 curr->skip_sg0 = skip; for_each_sg() 61 curr->nents = 1; for_each_sg() 62 curr->length_last_sg = len; for_each_sg() 68 curr++; for_each_sg()
|
/linux-4.4.14/tools/vm/ |
H A D | page_owner_sort.c | 33 char *curr = buf, *const buf_end = buf + buf_size; read_block() local 35 while (buf_end - curr > 1 && fgets(curr, buf_end - curr, fin)) { read_block() 36 if (*curr == '\n') /* empty line */ read_block() 37 return curr - buf; read_block() 38 curr += strlen(curr); read_block()
|
/linux-4.4.14/drivers/input/serio/ |
H A D | hp_sdc.c | 183 hp_sdc_transaction *curr; hp_sdc_take() local 190 curr = hp_sdc.tq[hp_sdc.rcurr]; hp_sdc_take() 193 curr->seq[curr->idx++] = status; hp_sdc_take() 194 curr->seq[curr->idx++] = data; hp_sdc_take() 200 if (curr->seq[curr->actidx] & HP_SDC_ACT_SEMAPHORE) hp_sdc_take() 201 if (curr->act.semaphore) hp_sdc_take() 202 up(curr->act.semaphore); hp_sdc_take() 204 if (curr->seq[curr->actidx] & HP_SDC_ACT_CALLBACK) hp_sdc_take() 205 if (curr->act.irqhook) hp_sdc_take() 206 curr->act.irqhook(irq, dev_id, status, data); hp_sdc_take() 208 curr->actidx = curr->idx; hp_sdc_take() 209 curr->idx++; hp_sdc_take() 316 hp_sdc_transaction *curr; hp_sdc_tasklet() local 319 curr = hp_sdc.tq[hp_sdc.rcurr]; hp_sdc_tasklet() 326 curr->idx += hp_sdc.rqty; hp_sdc_tasklet() 328 tmp = curr->seq[curr->actidx]; hp_sdc_tasklet() 329 curr->seq[curr->actidx] |= HP_SDC_ACT_DEAD; hp_sdc_tasklet() 331 if (curr->act.semaphore) hp_sdc_tasklet() 332 up(curr->act.semaphore); hp_sdc_tasklet() 338 if (curr->act.irqhook) hp_sdc_tasklet() 339 curr->act.irqhook(0, NULL, 0, 0); hp_sdc_tasklet() 342 curr->actidx = curr->idx; hp_sdc_tasklet() 343 curr->idx++; hp_sdc_tasklet() 353 hp_sdc_transaction *curr; hp_sdc_put() local 415 curr = hp_sdc.tq[curridx]; hp_sdc_put() 416 idx = curr->actidx; hp_sdc_put() 418 if (curr->actidx >= curr->endidx) { hp_sdc_put() 427 act = curr->seq[idx]; hp_sdc_put() 430 if (curr->idx >= curr->endidx) { hp_sdc_put() 432 kfree(curr); hp_sdc_put() 442 if (curr->idx != idx) { hp_sdc_put() 447 hp_sdc_status_out8(curr->seq[idx]); hp_sdc_put() 448 curr->idx++; hp_sdc_put() 454 curr->idx++; hp_sdc_put() 460 qty = curr->seq[idx]; hp_sdc_put() 462 if (curr->idx - idx < qty) { hp_sdc_put() 463 hp_sdc_data_out8(curr->seq[curr->idx]); hp_sdc_put() 464 curr->idx++; hp_sdc_put() 466 if (curr->idx - idx >= qty && hp_sdc_put() 478 mask = curr->seq[idx]; hp_sdc_put() 479 if (idx != curr->idx) { hp_sdc_put() 489 w7[0] = (mask & 1) ? curr->seq[++idx] : hp_sdc.r7[0]; hp_sdc_put() 490 w7[1] = (mask & 2) ? curr->seq[++idx] : hp_sdc.r7[1]; hp_sdc_put() 491 w7[2] = (mask & 4) ? curr->seq[++idx] : hp_sdc.r7[2]; hp_sdc_put() 492 w7[3] = (mask & 8) ? curr->seq[++idx] : hp_sdc.r7[3]; hp_sdc_put() 512 curr->idx = idx; hp_sdc_put() 526 curr->idx = idx + 1; hp_sdc_put() 547 /* curr->idx should == idx at this point. */ hp_sdc_put() 548 postcmd = curr->seq[idx]; hp_sdc_put() 549 curr->idx++; hp_sdc_put() 553 hp_sdc.rqty = curr->seq[curr->idx]; hp_sdc_put() 555 curr->idx++; hp_sdc_put() 569 up(curr->act.semaphore); hp_sdc_put() 571 curr->act.irqhook(0,NULL,0,0); hp_sdc_put() 573 if (curr->idx >= curr->endidx) { /* This transaction is over. */ hp_sdc_put() 575 kfree(curr); hp_sdc_put() 578 curr->actidx = idx + 1; hp_sdc_put() 579 curr->idx = idx + 2; hp_sdc_put()
|
/linux-4.4.14/kernel/time/ |
H A D | timer_stats.c | 169 struct entry **head, *curr, *prev; tstat_lookup() local 172 curr = *head; tstat_lookup() 179 while (curr) { tstat_lookup() 180 if (match_entries(curr, entry)) tstat_lookup() 181 return curr; tstat_lookup() 183 curr = curr->next; tstat_lookup() 189 curr = *head; tstat_lookup() 195 while (curr) { tstat_lookup() 196 if (match_entries(curr, entry)) tstat_lookup() 199 prev = curr; tstat_lookup() 200 curr = curr->next; tstat_lookup() 203 curr = alloc_entry(); tstat_lookup() 204 if (curr) { tstat_lookup() 205 *curr = *entry; tstat_lookup() 206 curr->count = 0; tstat_lookup() 207 curr->next = NULL; tstat_lookup() 208 memcpy(curr->comm, comm, TASK_COMM_LEN); tstat_lookup() 210 smp_mb(); /* Ensure that curr is initialized before insert */ tstat_lookup() 213 prev->next = curr; tstat_lookup() 215 *head = curr; tstat_lookup() 220 return curr; tstat_lookup()
|
H A D | timer_list.c | 94 struct timerqueue_node *curr; print_active_timers() local 101 curr = timerqueue_getnext(&base->active); print_active_timers() 106 while (curr && i < next) { print_active_timers() 107 curr = timerqueue_iterate_next(curr); print_active_timers() 111 if (curr) { print_active_timers() 113 timer = container_of(curr, struct hrtimer, node); print_active_timers()
|
/linux-4.4.14/fs/hfs/ |
H A D | bitmap.c | 31 __be32 *curr, *end; hfs_find_set_zero_bits() local 40 curr = bitmap + (offset / 32); hfs_find_set_zero_bits() 44 val = *curr; hfs_find_set_zero_bits() 56 while (++curr < end) { hfs_find_set_zero_bits() 57 val = *curr; hfs_find_set_zero_bits() 70 start = (curr - bitmap) * 32 + i; hfs_find_set_zero_bits() 85 *curr++ = cpu_to_be32(n); hfs_find_set_zero_bits() 88 n = be32_to_cpu(*curr); hfs_find_set_zero_bits() 95 *curr++ = cpu_to_be32(0xffffffff); hfs_find_set_zero_bits() 107 *curr = cpu_to_be32(n); hfs_find_set_zero_bits() 108 *max = (curr - bitmap) * 32 + i - start; hfs_find_set_zero_bits() 195 __be32 *curr; hfs_clear_vbm_bits() local 210 curr = HFS_SB(sb)->bitmap + (start / 32); hfs_clear_vbm_bits() 220 *curr &= cpu_to_be32(mask); hfs_clear_vbm_bits() 223 *curr++ &= cpu_to_be32(mask); hfs_clear_vbm_bits() 229 *curr++ = 0; hfs_clear_vbm_bits() 235 *curr &= cpu_to_be32(mask); hfs_clear_vbm_bits()
|
/linux-4.4.14/arch/mips/kernel/ |
H A D | csrc-r4k.c | 47 unsigned int prev, curr, i; rdhwr_count_usable() local 56 curr = rdhwr_count(); rdhwr_count_usable() 58 if (curr != prev) rdhwr_count_usable() 61 prev = curr; rdhwr_count_usable()
|
/linux-4.4.14/kernel/locking/ |
H A D | osq_lock.c | 41 int curr = encode_cpu(smp_processor_id()); osq_wait_next() local 52 if (atomic_read(&lock->tail) == curr && osq_wait_next() 53 atomic_cmpxchg_acquire(&lock->tail, curr, old) == curr) { osq_wait_next() 88 int curr = encode_cpu(smp_processor_id()); osq_lock() local 93 node->cpu = curr; osq_lock() 101 old = atomic_xchg(&lock->tail, curr); osq_lock() 188 int curr = encode_cpu(smp_processor_id()); osq_unlock() local 193 if (likely(atomic_cmpxchg_release(&lock->tail, curr, osq_unlock() 194 OSQ_UNLOCKED_VAL) == curr)) osq_unlock()
|
H A D | lockdep.c | 573 static void lockdep_print_held_locks(struct task_struct *curr) lockdep_print_held_locks() argument 575 int i, depth = curr->lockdep_depth; lockdep_print_held_locks() 578 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); lockdep_print_held_locks() 582 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); lockdep_print_held_locks() 586 print_lock(curr->held_locks + i); lockdep_print_held_locks() 1163 struct task_struct *curr = current; print_circular_bug_header() local 1174 curr->comm, task_pid_nr(curr)); print_circular_bug_header() 1196 struct task_struct *curr = current; print_circular_bug() local 1223 lockdep_print_held_locks(curr); print_circular_bug() 1492 print_bad_irq_dependency(struct task_struct *curr, print_bad_irq_dependency() argument 1513 curr->comm, task_pid_nr(curr), print_bad_irq_dependency() 1514 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, print_bad_irq_dependency() 1515 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, print_bad_irq_dependency() 1516 curr->hardirqs_enabled, print_bad_irq_dependency() 1517 curr->softirqs_enabled); print_bad_irq_dependency() 1546 lockdep_print_held_locks(curr); print_bad_irq_dependency() 1567 check_usage(struct task_struct *curr, struct held_lock *prev, check_usage() argument 1593 return print_bad_irq_dependency(curr, &this, &that, check_usage() 1640 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, check_irq_usage() argument 1649 if (!check_usage(curr, prev, next, bit, check_irq_usage() 1661 if (!check_usage(curr, prev, next, bit, check_irq_usage() 1669 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, check_prev_add_irq() argument 1673 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ check_prev_add_irq() 1696 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, check_prev_add_irq() argument 1730 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, print_deadlock_bug() argument 1742 curr->comm, task_pid_nr(curr)); print_deadlock_bug() 1749 lockdep_print_held_locks(curr); print_deadlock_bug() 1766 check_deadlock(struct task_struct *curr, struct held_lock *next, check_deadlock() argument 1773 for (i = 0; i < curr->lockdep_depth; i++) { check_deadlock() 1774 prev = curr->held_locks + i; check_deadlock() 1796 return print_deadlock_bug(curr, prev, next); check_deadlock() 1824 check_prev_add(struct task_struct *curr, struct held_lock *prev, check_prev_add() argument 1857 if (!check_prev_add_irq(curr, prev, next)) check_prev_add() 1924 * The ones that are relevant are (in increasing distance from curr): 1929 check_prevs_add(struct task_struct *curr, struct held_lock *next) check_prevs_add() argument 1931 int depth = curr->lockdep_depth; check_prevs_add() 1946 if (curr->held_locks[depth].irq_context != check_prevs_add() 1947 curr->held_locks[depth-1].irq_context) check_prevs_add() 1951 int distance = curr->lockdep_depth - depth + 1; check_prevs_add() 1952 hlock = curr->held_locks + depth - 1; check_prevs_add() 1958 if (!check_prev_add(curr, hlock, next, check_prevs_add() 1979 if (curr->held_locks[depth].irq_context != check_prevs_add() 1980 curr->held_locks[depth-1].irq_context) check_prevs_add() 2015 static inline int lookup_chain_cache(struct task_struct *curr, lookup_chain_cache() argument 2078 for (i = curr->lockdep_depth - 1; i >= 0; i--) { 2079 hlock_curr = curr->held_locks + i; 2084 chain->depth = curr->lockdep_depth + 1 - i; 2089 int lock_id = curr->held_locks[i].class_idx - 1; 2101 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, validate_chain() argument 2115 lookup_chain_cache(curr, hlock, chain_key)) { validate_chain() 2128 int ret = check_deadlock(curr, hlock, lock, hlock->read); validate_chain() 2144 if (!check_prevs_add(curr, hlock)) validate_chain() 2155 static inline int validate_chain(struct task_struct *curr, validate_chain() argument 2167 static void check_chain_key(struct task_struct *curr) check_chain_key() argument 2174 for (i = 0; i < curr->lockdep_depth; i++) { check_chain_key() 2175 hlock = curr->held_locks + i; check_chain_key() 2183 curr->lockdep_depth, i, check_chain_key() 2201 if (chain_key != curr->curr_chain_key) { check_chain_key() 2208 curr->lockdep_depth, i, check_chain_key() 2210 (unsigned long long)curr->curr_chain_key); check_chain_key() 2234 print_usage_bug(struct task_struct *curr, struct held_lock *this, print_usage_bug() argument 2250 curr->comm, task_pid_nr(curr), print_usage_bug() 2251 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, print_usage_bug() 2252 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, print_usage_bug() 2253 trace_hardirqs_enabled(curr), print_usage_bug() 2254 trace_softirqs_enabled(curr)); print_usage_bug() 2260 print_irqtrace_events(curr); print_usage_bug() 2264 lockdep_print_held_locks(curr); print_usage_bug() 2276 valid_state(struct task_struct *curr, struct held_lock *this, valid_state() argument 2280 return print_usage_bug(curr, this, bad_bit, new_bit); valid_state() 2284 static int mark_lock(struct task_struct *curr, struct held_lock *this, 2293 print_irq_inversion_bug(struct task_struct *curr, print_irq_inversion_bug() argument 2311 curr->comm, task_pid_nr(curr)); print_irq_inversion_bug() 2340 lockdep_print_held_locks(curr); print_irq_inversion_bug() 2358 check_usage_forwards(struct task_struct *curr, struct held_lock *this, check_usage_forwards() argument 2373 return print_irq_inversion_bug(curr, &root, target_entry, check_usage_forwards() 2382 check_usage_backwards(struct task_struct *curr, struct held_lock *this, check_usage_backwards() argument 2397 return print_irq_inversion_bug(curr, &root, target_entry, check_usage_backwards() 2401 void print_irqtrace_events(struct task_struct *curr) print_irqtrace_events() argument 2403 printk("irq event stamp: %u\n", curr->irq_events); print_irqtrace_events() 2404 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); print_irqtrace_events() 2405 print_ip_sym(curr->hardirq_enable_ip); print_irqtrace_events() 2406 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); print_irqtrace_events() 2407 print_ip_sym(curr->hardirq_disable_ip); print_irqtrace_events() 2408 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event); print_irqtrace_events() 2409 print_ip_sym(curr->softirq_enable_ip); print_irqtrace_events() 2410 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); print_irqtrace_events() 2411 print_ip_sym(curr->softirq_disable_ip); print_irqtrace_events() 2457 mark_lock_irq(struct task_struct *curr, struct held_lock *this, mark_lock_irq() argument 2478 if (!valid_state(curr, this, new_bit, excl_bit)) mark_lock_irq() 2486 !usage(curr, this, excl_bit, state_name(new_bit & ~1))) mark_lock_irq() 2493 if (!valid_state(curr, this, new_bit, excl_bit + 1)) mark_lock_irq() 2497 !usage(curr, this, excl_bit + 1, mark_lock_irq() 2518 mark_held_locks(struct task_struct *curr, enum mark_type mark) mark_held_locks() argument 2524 for (i = 0; i < curr->lockdep_depth; i++) { mark_held_locks() 2525 hlock = curr->held_locks + i; mark_held_locks() 2536 if (!mark_lock(curr, hlock, usage_bit)) mark_held_locks() 2548 struct task_struct *curr = current; __trace_hardirqs_on_caller() local 2551 curr->hardirqs_enabled = 1; __trace_hardirqs_on_caller() 2557 if (!mark_held_locks(curr, HARDIRQ)) __trace_hardirqs_on_caller() 2564 if (curr->softirqs_enabled) __trace_hardirqs_on_caller() 2565 if (!mark_held_locks(curr, SOFTIRQ)) __trace_hardirqs_on_caller() 2568 curr->hardirq_enable_ip = ip; __trace_hardirqs_on_caller() 2569 curr->hardirq_enable_event = ++curr->irq_events; __trace_hardirqs_on_caller() 2628 struct task_struct *curr = current; trace_hardirqs_off_caller() local 2642 if (curr->hardirqs_enabled) { trace_hardirqs_off_caller() 2646 curr->hardirqs_enabled = 0; trace_hardirqs_off_caller() 2647 curr->hardirq_disable_ip = ip; trace_hardirqs_off_caller() 2648 curr->hardirq_disable_event = ++curr->irq_events; trace_hardirqs_off_caller() 2666 struct task_struct *curr = current; trace_softirqs_on() local 2678 if (curr->softirqs_enabled) { trace_softirqs_on() 2687 curr->softirqs_enabled = 1; trace_softirqs_on() 2688 curr->softirq_enable_ip = ip; trace_softirqs_on() 2689 curr->softirq_enable_event = ++curr->irq_events; trace_softirqs_on() 2696 if (curr->hardirqs_enabled) trace_softirqs_on() 2697 mark_held_locks(curr, SOFTIRQ); trace_softirqs_on() 2706 struct task_struct *curr = current; trace_softirqs_off() local 2717 if (curr->softirqs_enabled) { trace_softirqs_off() 2721 curr->softirqs_enabled = 0; trace_softirqs_off() 2722 curr->softirq_disable_ip = ip; trace_softirqs_off() 2723 curr->softirq_disable_event = ++curr->irq_events; trace_softirqs_off() 2735 struct task_struct *curr = current; __lockdep_trace_alloc() local 2745 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) __lockdep_trace_alloc() 2758 mark_held_locks(curr, RECLAIM_FS); __lockdep_trace_alloc() 2778 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) mark_irqflags() argument 2786 if (curr->hardirq_context) mark_irqflags() 2787 if (!mark_lock(curr, hlock, mark_irqflags() 2790 if (curr->softirq_context) mark_irqflags() 2791 if (!mark_lock(curr, hlock, mark_irqflags() 2795 if (curr->hardirq_context) mark_irqflags() 2796 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) mark_irqflags() 2798 if (curr->softirq_context) mark_irqflags() 2799 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) mark_irqflags() 2805 if (!mark_lock(curr, hlock, mark_irqflags() 2808 if (curr->softirqs_enabled) mark_irqflags() 2809 if (!mark_lock(curr, hlock, mark_irqflags() 2813 if (!mark_lock(curr, hlock, mark_irqflags() 2816 if (curr->softirqs_enabled) mark_irqflags() 2817 if (!mark_lock(curr, hlock, mark_irqflags() 2829 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { mark_irqflags() 2831 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) mark_irqflags() 2834 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) mark_irqflags() 2842 static int separate_irq_context(struct task_struct *curr, separate_irq_context() argument 2845 unsigned int depth = curr->lockdep_depth; separate_irq_context() 2850 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + separate_irq_context() 2851 curr->softirq_context; separate_irq_context() 2855 prev_hlock = curr->held_locks + depth-1; separate_irq_context() 2870 int mark_lock_irq(struct task_struct *curr, struct held_lock *this, mark_lock_irq() argument 2877 static inline int mark_irqflags(struct task_struct *curr, mark_irqflags() argument 2883 static inline int separate_irq_context(struct task_struct *curr, separate_irq_context() argument 2898 static int mark_lock(struct task_struct *curr, struct held_lock *this, mark_lock() argument 2933 ret = mark_lock_irq(curr, this, new_bit); mark_lock() 2955 print_irqtrace_events(curr); mark_lock() 3029 print_lock_nested_lock_not_held(struct task_struct *curr, print_lock_nested_lock_not_held() argument 3044 printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); print_lock_nested_lock_not_held() 3054 lockdep_print_held_locks(curr); print_lock_nested_lock_not_held() 3073 struct task_struct *curr = current; __lock_acquire() local 3119 depth = curr->lockdep_depth; __lock_acquire() 3129 hlock = curr->held_locks + depth - 1; __lock_acquire() 3140 hlock = curr->held_locks + depth; __lock_acquire() 3162 if (check && !mark_irqflags(curr, hlock)) __lock_acquire() 3166 if (!mark_lock(curr, hlock, LOCK_USED)) __lock_acquire() 3186 chain_key = curr->curr_chain_key; __lock_acquire() 3197 if (separate_irq_context(curr, hlock)) { __lock_acquire() 3204 return print_lock_nested_lock_not_held(curr, hlock, ip); __lock_acquire() 3206 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) __lock_acquire() 3209 curr->curr_chain_key = chain_key; __lock_acquire() 3210 curr->lockdep_depth++; __lock_acquire() 3211 check_chain_key(curr); __lock_acquire() 3216 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { __lock_acquire() 3220 curr->lockdep_depth, MAX_LOCK_DEPTH); __lock_acquire() 3229 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) __lock_acquire() 3230 max_lockdep_depth = curr->lockdep_depth; __lock_acquire() 3236 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, print_unlock_imbalance_bug() argument 3250 curr->comm, task_pid_nr(curr)); print_unlock_imbalance_bug() 3256 lockdep_print_held_locks(curr); print_unlock_imbalance_bug() 3304 struct task_struct *curr = current; __lock_set_class() local 3310 depth = curr->lockdep_depth; __lock_set_class() 3320 hlock = curr->held_locks + i; __lock_set_class() 3330 return print_unlock_imbalance_bug(curr, lock, ip); __lock_set_class() 3337 curr->lockdep_depth = i; __lock_set_class() 3338 curr->curr_chain_key = hlock->prev_chain_key; __lock_set_class() 3341 hlock = curr->held_locks + i; __lock_set_class() 3354 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) __lock_set_class() 3369 struct task_struct *curr = current; __lock_release() local 3377 depth = curr->lockdep_depth; __lock_release() 3383 return print_unlock_imbalance_bug(curr, lock, ip); __lock_release() 3391 hlock = curr->held_locks + i; __lock_release() 3401 return print_unlock_imbalance_bug(curr, lock, ip); __lock_release() 3427 curr->lockdep_depth = i; __lock_release() 3428 curr->curr_chain_key = hlock->prev_chain_key; __lock_release() 3431 hlock = curr->held_locks + i; __lock_release() 3444 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) __lock_release() 3452 struct task_struct *curr = current; __lock_is_held() local 3455 for (i = 0; i < curr->lockdep_depth; i++) { __lock_is_held() 3456 struct held_lock *hlock = curr->held_locks + i; __lock_is_held() 3467 struct task_struct *curr = current; __lock_pin_lock() local 3473 for (i = 0; i < curr->lockdep_depth; i++) { __lock_pin_lock() 3474 struct held_lock *hlock = curr->held_locks + i; __lock_pin_lock() 3487 struct task_struct *curr = current; __lock_unpin_lock() local 3493 for (i = 0; i < curr->lockdep_depth; i++) { __lock_unpin_lock() 3494 struct held_lock *hlock = curr->held_locks + i; __lock_unpin_lock() 3677 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, print_lock_contention_bug() argument 3691 curr->comm, task_pid_nr(curr)); print_lock_contention_bug() 3697 lockdep_print_held_locks(curr); print_lock_contention_bug() 3708 struct task_struct *curr = current; __lock_contended() local 3714 depth = curr->lockdep_depth; __lock_contended() 3724 hlock = curr->held_locks + i; __lock_contended() 3734 print_lock_contention_bug(curr, lock, ip); __lock_contended() 3760 struct task_struct *curr = current; __lock_acquired() local 3767 depth = curr->lockdep_depth; __lock_acquired() 3777 hlock = curr->held_locks + i; __lock_acquired() 3787 print_lock_contention_bug(curr, lock, _RET_IP_); __lock_acquired() 4075 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, print_freed_lock_bug() argument 4089 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); print_freed_lock_bug() 4091 lockdep_print_held_locks(curr); print_freed_lock_bug() 4111 struct task_struct *curr = current; debug_check_no_locks_freed() local 4120 for (i = 0; i < curr->lockdep_depth; i++) { debug_check_no_locks_freed() 4121 hlock = curr->held_locks + i; debug_check_no_locks_freed() 4127 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); debug_check_no_locks_freed() 4235 struct task_struct *curr = current; lockdep_sys_exit() local 4237 if (unlikely(curr->lockdep_depth)) { lockdep_sys_exit() 4246 curr->comm, curr->pid); lockdep_sys_exit() 4247 lockdep_print_held_locks(curr); lockdep_sys_exit() 4253 struct task_struct *curr = current; lockdep_rcu_suspicious() local 4296 lockdep_print_held_locks(curr); lockdep_rcu_suspicious()
|
/linux-4.4.14/drivers/char/agp/ |
H A D | generic.c | 162 * @curr: agp_memory pointer to be freed. 167 void agp_free_memory(struct agp_memory *curr) agp_free_memory() argument 171 if (curr == NULL) agp_free_memory() 174 if (curr->is_bound) agp_free_memory() 175 agp_unbind_memory(curr); agp_free_memory() 177 if (curr->type >= AGP_USER_TYPES) { agp_free_memory() 178 agp_generic_free_by_type(curr); agp_free_memory() 182 if (curr->type != 0) { agp_free_memory() 183 curr->bridge->driver->free_by_type(curr); agp_free_memory() 186 if (curr->page_count != 0) { agp_free_memory() 187 if (curr->bridge->driver->agp_destroy_pages) { agp_free_memory() 188 curr->bridge->driver->agp_destroy_pages(curr); agp_free_memory() 191 for (i = 0; i < curr->page_count; i++) { agp_free_memory() 192 curr->bridge->driver->agp_destroy_page( agp_free_memory() 193 curr->pages[i], agp_free_memory() 196 for (i = 0; i < curr->page_count; i++) { agp_free_memory() 197 curr->bridge->driver->agp_destroy_page( agp_free_memory() 198 curr->pages[i], agp_free_memory() 203 agp_free_key(curr->key); agp_free_memory() 204 agp_free_page_array(curr); agp_free_memory() 205 kfree(curr); agp_free_memory() 405 * @curr: agp_memory pointer 411 int agp_bind_memory(struct agp_memory *curr, off_t pg_start) agp_bind_memory() argument 415 if (curr == NULL) agp_bind_memory() 418 if (curr->is_bound) { agp_bind_memory() 419 printk(KERN_INFO PFX "memory %p is already bound!\n", curr); agp_bind_memory() 422 if (!curr->is_flushed) { agp_bind_memory() 423 curr->bridge->driver->cache_flush(); agp_bind_memory() 424 curr->is_flushed = true; agp_bind_memory() 427 ret_val = curr->bridge->driver->insert_memory(curr, pg_start, curr->type); agp_bind_memory() 432 curr->is_bound = true; agp_bind_memory() 433 curr->pg_start = pg_start; agp_bind_memory() 435 list_add(&curr->mapped_list, &agp_bridge->mapped_list); agp_bind_memory() 446 * @curr: agp_memory pointer to be removed from the GATT. 451 int agp_unbind_memory(struct agp_memory *curr) agp_unbind_memory() argument 455 if (curr == NULL) agp_unbind_memory() 458 if (!curr->is_bound) { agp_unbind_memory() 459 printk(KERN_INFO PFX "memory %p was not bound!\n", curr); agp_unbind_memory() 463 ret_val = curr->bridge->driver->remove_memory(curr, curr->pg_start, curr->type); agp_unbind_memory() 468 curr->is_bound = false; agp_unbind_memory() 469 curr->pg_start = 0; agp_unbind_memory() 470 spin_lock(&curr->bridge->mapped_lock); agp_unbind_memory() 471 list_del(&curr->mapped_list); agp_unbind_memory() 472 spin_unlock(&curr->bridge->mapped_lock); agp_unbind_memory() 1159 void agp_generic_free_by_type(struct agp_memory *curr) agp_generic_free_by_type() argument 1161 agp_free_page_array(curr); agp_generic_free_by_type() 1162 agp_free_key(curr->key); agp_generic_free_by_type() 1163 kfree(curr); agp_generic_free_by_type()
|
H A D | frontend.c | 49 struct agp_memory *curr; agp_find_mem_by_key() local 54 curr = agp_fe.current_controller->pool; agp_find_mem_by_key() 56 while (curr != NULL) { agp_find_mem_by_key() 57 if (curr->key == key) agp_find_mem_by_key() 59 curr = curr->next; agp_find_mem_by_key() 62 DBG("key=%d -> mem=%p", key, curr); agp_find_mem_by_key() 63 return curr; agp_find_mem_by_key() 217 struct agp_file_private *curr; agp_find_private() local 219 curr = agp_fe.file_priv_list; agp_find_private() 221 while (curr != NULL) { agp_find_private() 222 if (curr->my_pid == pid) agp_find_private() 223 return curr; agp_find_private() 224 curr = curr->next; agp_find_private()
|
H A D | intel-gtt.c | 275 static void intel_i810_free_by_type(struct agp_memory *curr) intel_i810_free_by_type() argument 277 agp_free_key(curr->key); intel_i810_free_by_type() 278 if (curr->type == AGP_PHYS_MEMORY) { intel_i810_free_by_type() 279 if (curr->page_count == 4) intel_i810_free_by_type() 280 i8xx_destroy_pages(curr->pages[0]); intel_i810_free_by_type() 282 agp_bridge->driver->agp_destroy_page(curr->pages[0], intel_i810_free_by_type() 284 agp_bridge->driver->agp_destroy_page(curr->pages[0], intel_i810_free_by_type() 287 agp_free_page_array(curr); intel_i810_free_by_type() 289 kfree(curr); intel_i810_free_by_type()
|
H A D | agp.h | 200 void agp_generic_free_by_type(struct agp_memory *curr);
|
/linux-4.4.14/mm/ |
H A D | vmacache.c | 36 * mm seqnum is already set and curr's will for_each_process_thread() 68 struct task_struct *curr; vmacache_valid() local 73 curr = current; vmacache_valid() 74 if (mm->vmacache_seqnum != curr->vmacache_seqnum) { vmacache_valid() 79 curr->vmacache_seqnum = mm->vmacache_seqnum; vmacache_valid() 80 vmacache_flush(curr); vmacache_valid()
|
/linux-4.4.14/tools/perf/ui/gtk/ |
H A D | progress.c | 12 double fraction = p->total ? 1.0 * p->curr / p->total : 0.0; gtk_ui_progress__update() 35 snprintf(buf, sizeof(buf), "%"PRIu64" / %"PRIu64, p->curr, p->total); gtk_ui_progress__update()
|
/linux-4.4.14/drivers/dma/ |
H A D | img-mdc-dma.c | 265 struct mdc_hw_list_desc *curr, *next; mdc_list_desc_free() local 268 curr = mdesc->list; mdc_list_desc_free() 270 while (curr) { mdc_list_desc_free() 271 next = curr->next_desc; mdc_list_desc_free() 272 next_phys = curr->node_addr; mdc_list_desc_free() 273 dma_pool_free(mdma->desc_pool, curr, curr_phys); mdc_list_desc_free() 274 curr = next; mdc_list_desc_free() 294 struct mdc_hw_list_desc *curr, *prev = NULL; mdc_prep_dma_memcpy() local 309 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, &curr_phys); mdc_prep_dma_memcpy() 310 if (!curr) mdc_prep_dma_memcpy() 315 prev->next_desc = curr; mdc_prep_dma_memcpy() 318 mdesc->list = curr; mdc_prep_dma_memcpy() 323 mdc_list_desc_config(mchan, curr, DMA_MEM_TO_MEM, src, dest, mdc_prep_dma_memcpy() 326 prev = curr; mdc_prep_dma_memcpy() 377 struct mdc_hw_list_desc *curr, *prev = NULL; mdc_prep_dma_cyclic() local 404 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, mdc_prep_dma_cyclic() 406 if (!curr) mdc_prep_dma_cyclic() 411 mdesc->list = curr; mdc_prep_dma_cyclic() 414 prev->next_desc = curr; mdc_prep_dma_cyclic() 421 mdc_list_desc_config(mchan, curr, dir, mdc_prep_dma_cyclic() 426 mdc_list_desc_config(mchan, curr, dir, mdc_prep_dma_cyclic() 432 prev = curr; mdc_prep_dma_cyclic() 460 struct mdc_hw_list_desc *curr, *prev = NULL; mdc_prep_slave_sg() local 485 curr = dma_pool_alloc(mdma->desc_pool, GFP_NOWAIT, for_each_sg() 487 if (!curr) for_each_sg() 492 mdesc->list = curr; for_each_sg() 495 prev->next_desc = curr; for_each_sg() 502 mdc_list_desc_config(mchan, curr, dir, buf, for_each_sg() 506 mdc_list_desc_config(mchan, curr, dir, for_each_sg() 511 prev = curr; for_each_sg()
|
H A D | pxa_dma.c | 1175 u32 curr, start, len, end, residue = 0; pxad_residue() local 1195 curr = phy_readl_relaxed(chan->phy, DSADR); pxad_residue() 1197 curr = phy_readl_relaxed(chan->phy, DTADR); pxad_residue() 1200 * curr has to be actually read before checking descriptor pxad_residue() 1201 * completion, so that a curr inside a status updater pxad_residue() 1203 * preventing reordering of curr load and the test. pxad_residue() 1220 * which lies inside the boundaries of the curr pxad_residue() 1229 } else if (curr >= start && curr <= end) { pxad_residue() 1230 residue += end - curr; pxad_residue()
|
H A D | mmp_pdma.c | 759 u32 curr, residue = 0; mmp_pdma_residue() local 771 curr = readl(chan->phy->base + DTADR(chan->phy->idx)); mmp_pdma_residue() 773 curr = readl(chan->phy->base + DSADR(chan->phy->idx)); mmp_pdma_residue() 788 * lies inside the boundaries of the curr pointer. All mmp_pdma_residue() 796 } else if (curr >= start && curr <= end) { mmp_pdma_residue() 797 residue += end - curr; mmp_pdma_residue()
|
/linux-4.4.14/fs/btrfs/ |
H A D | delayed-inode.c | 727 struct btrfs_delayed_item *curr, *next; btrfs_batch_insert_items() local 759 curr = next; btrfs_batch_insert_items() 760 next = __btrfs_next_delayed_item(curr); btrfs_batch_insert_items() 764 if (!btrfs_is_continuous_delayed_item(curr, next)) btrfs_batch_insert_items() 809 list_for_each_entry_safe(curr, next, &head, tree_list) { btrfs_batch_insert_items() 811 write_extent_buffer(leaf, &curr->data, btrfs_batch_insert_items() 813 curr->data_len); btrfs_batch_insert_items() 816 btrfs_delayed_item_release_metadata(root, curr); btrfs_batch_insert_items() 818 list_del(&curr->tree_list); btrfs_batch_insert_items() 819 btrfs_release_delayed_item(curr); btrfs_batch_insert_items() 868 struct btrfs_delayed_item *curr, *prev; btrfs_insert_delayed_items() local 873 curr = __btrfs_first_delayed_insertion_item(node); btrfs_insert_delayed_items() 874 if (!curr) btrfs_insert_delayed_items() 877 ret = btrfs_insert_delayed_item(trans, root, path, curr); btrfs_insert_delayed_items() 883 prev = curr; btrfs_insert_delayed_items() 884 curr = __btrfs_next_delayed_item(prev); btrfs_insert_delayed_items() 885 if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { btrfs_insert_delayed_items() 888 btrfs_batch_insert_items(root, path, curr); btrfs_insert_delayed_items() 907 struct btrfs_delayed_item *curr, *next; btrfs_batch_delete_items() local 934 curr = next; btrfs_batch_delete_items() 935 next = __btrfs_next_delayed_item(curr); btrfs_batch_delete_items() 939 if (!btrfs_is_continuous_delayed_item(curr, next)) btrfs_batch_delete_items() 955 list_for_each_entry_safe(curr, next, &head, tree_list) { btrfs_batch_delete_items() 956 btrfs_delayed_item_release_metadata(root, curr); btrfs_batch_delete_items() 957 list_del(&curr->tree_list); btrfs_batch_delete_items() 958 btrfs_release_delayed_item(curr); btrfs_batch_delete_items() 970 struct btrfs_delayed_item *curr, *prev; btrfs_delete_delayed_items() local 975 curr = __btrfs_first_delayed_deletion_item(node); btrfs_delete_delayed_items() 976 if (!curr) btrfs_delete_delayed_items() 979 ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); btrfs_delete_delayed_items() 987 prev = curr; btrfs_delete_delayed_items() 988 curr = __btrfs_next_delayed_item(prev); btrfs_delete_delayed_items() 992 if (curr) { btrfs_delete_delayed_items() 999 btrfs_batch_delete_items(trans, root, path, curr); btrfs_delete_delayed_items() 1650 struct btrfs_delayed_item *curr, *next; btrfs_put_delayed_items() local 1652 list_for_each_entry_safe(curr, next, ins_list, readdir_list) { list_for_each_entry_safe() 1653 list_del(&curr->readdir_list); list_for_each_entry_safe() 1654 if (atomic_dec_and_test(&curr->refs)) list_for_each_entry_safe() 1655 kfree(curr); list_for_each_entry_safe() 1658 list_for_each_entry_safe(curr, next, del_list, readdir_list) { list_for_each_entry_safe() 1659 list_del(&curr->readdir_list); list_for_each_entry_safe() 1660 if (atomic_dec_and_test(&curr->refs)) list_for_each_entry_safe() 1661 kfree(curr); list_for_each_entry_safe() 1668 struct btrfs_delayed_item *curr, *next; btrfs_should_delete_dir_index() local 1674 list_for_each_entry_safe(curr, next, del_list, readdir_list) { list_for_each_entry_safe() 1675 if (curr->key.offset > index) list_for_each_entry_safe() 1678 list_del(&curr->readdir_list); list_for_each_entry_safe() 1679 ret = (curr->key.offset == index); list_for_each_entry_safe() 1681 if (atomic_dec_and_test(&curr->refs)) list_for_each_entry_safe() 1682 kfree(curr); list_for_each_entry_safe() 1700 struct btrfs_delayed_item *curr, *next; btrfs_readdir_delayed_dir_index() local 1715 list_for_each_entry_safe(curr, next, ins_list, readdir_list) { list_for_each_entry_safe() 1716 list_del(&curr->readdir_list); list_for_each_entry_safe() 1718 if (curr->key.offset < ctx->pos) { list_for_each_entry_safe() 1719 if (atomic_dec_and_test(&curr->refs)) list_for_each_entry_safe() 1720 kfree(curr); list_for_each_entry_safe() 1724 ctx->pos = curr->key.offset; list_for_each_entry_safe() 1726 di = (struct btrfs_dir_item *)curr->data; list_for_each_entry_safe() 1736 if (atomic_dec_and_test(&curr->refs)) list_for_each_entry_safe() 1737 kfree(curr); list_for_each_entry_safe()
|
H A D | scrub.c | 176 int curr; member in struct:scrub_ctx 428 if (sctx->curr != -1) { scrub_free_ctx() 429 struct scrub_bio *sbio = sctx->bios[sctx->curr]; scrub_free_ctx() 470 sctx->curr = -1; scrub_setup_ctx() 2039 if (sctx->curr == -1) scrub_submit() 2042 sbio = sctx->bios[sctx->curr]; scrub_submit() 2043 sctx->curr = -1; scrub_submit() 2059 while (sctx->curr == -1) { scrub_add_page_to_rd_bio() 2061 sctx->curr = sctx->first_free; scrub_add_page_to_rd_bio() 2062 if (sctx->curr != -1) { scrub_add_page_to_rd_bio() 2063 sctx->first_free = sctx->bios[sctx->curr]->next_free; scrub_add_page_to_rd_bio() 2064 sctx->bios[sctx->curr]->next_free = -1; scrub_add_page_to_rd_bio() 2065 sctx->bios[sctx->curr]->page_count = 0; scrub_add_page_to_rd_bio() 2072 sbio = sctx->bios[sctx->curr]; scrub_add_page_to_rd_bio() 2715 struct scrub_page *curr, *next; scrub_free_parity() local 2726 list_for_each_entry_safe(curr, next, &sparity->spages, list) { scrub_free_parity() 2727 list_del_init(&curr->list); scrub_free_parity() 2728 scrub_page_put(curr); scrub_free_parity()
|
/linux-4.4.14/arch/x86/um/ |
H A D | tls_32.c | 98 struct uml_tls_struct* curr = load_TLS() local 105 if (!curr->present) { load_TLS() 106 if (!curr->flushed) { load_TLS() 107 clear_user_desc(&curr->tls); load_TLS() 108 curr->tls.entry_number = idx; load_TLS() 110 WARN_ON(!LDT_empty(&curr->tls)); load_TLS() 115 if (!(flags & O_FORCE) && curr->flushed) load_TLS() 118 ret = do_set_thread_area(&curr->tls); load_TLS() 122 curr->flushed = 1; load_TLS() 138 struct uml_tls_struct* curr = needs_TLS_update() local 142 * Can't test curr->present, we may need to clear a descriptor needs_TLS_update() 145 if (curr->flushed) needs_TLS_update() 162 struct uml_tls_struct* curr = clear_flushed_tls() local 169 if (!curr->present) clear_flushed_tls() 172 curr->flushed = 0; clear_flushed_tls()
|
/linux-4.4.14/drivers/misc/mic/scif/ |
H A D | scif_rma_list.c | 30 struct scif_window *curr = NULL; scif_insert_tcw() local 37 curr = list_entry(head->prev, struct scif_window, list); scif_insert_tcw() 38 if (curr->va_for_temp < window->va_for_temp) { scif_insert_tcw() 44 curr = list_entry(item, struct scif_window, list); list_for_each() 45 if (curr->va_for_temp > window->va_for_temp) list_for_each() 47 prev = curr; list_for_each() 60 struct scif_window *curr = NULL, *prev = NULL; scif_insert_window() local 65 curr = list_entry(item, struct scif_window, list); list_for_each() 66 if (curr->offset > window->offset) list_for_each() 68 prev = curr; list_for_each()
|
/linux-4.4.14/arch/ia64/kernel/ |
H A D | unwind.c | 625 memcpy(rs, &sr->curr, sizeof(*rs)); push() 626 sr->curr.next = rs; 632 struct unw_reg_state *rs = sr->curr.next; pop() 638 memcpy(&sr->curr, rs, sizeof(*rs)); pop() 758 reg = sr->curr.reg + unw.save_order[i]; finish_prologue() 779 regs[0] = sr->curr.reg + UNW_REG_F2; finish_prologue() 780 regs[1] = sr->curr.reg + UNW_REG_R4; finish_prologue() 781 regs[2] = sr->curr.reg + UNW_REG_B1; finish_prologue() 788 spill_next_when(®s[kind - 1], sr->curr.reg + limit[kind - 1], finish_prologue() 797 alloc_spill_area(&off, 16, sr->curr.reg + UNW_REG_F2, sr->curr.reg + UNW_REG_F31); finish_prologue() 798 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_B1, sr->curr.reg + UNW_REG_B5); finish_prologue() 799 alloc_spill_area(&off, 8, sr->curr.reg + UNW_REG_R4, sr->curr.reg + UNW_REG_R7); 839 set_reg(sr->curr.reg + unw.save_order[i], UNW_WHERE_GR, desc_prologue() 873 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_GR, desc_br_gr() 886 set_reg(sr->curr.reg + UNW_REG_B1 + i, UNW_WHERE_SPILL_HOME, desc_br_mem() 901 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, desc_frgr_mem() 910 set_reg(sr->curr.reg + base + i, UNW_WHERE_SPILL_HOME, desc_frgr_mem() 925 set_reg(sr->curr.reg + UNW_REG_F2 + i, UNW_WHERE_SPILL_HOME, desc_fr_mem() 940 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_GR, desc_gr_gr() 953 set_reg(sr->curr.reg + UNW_REG_R4 + i, UNW_WHERE_SPILL_HOME, desc_gr_mem() 964 set_reg(sr->curr.reg + UNW_REG_PSP, UNW_WHERE_NONE, 971 sr->curr.reg[UNW_REG_PSP].when = sr->region_start + min_t(int, t, sr->region_len - 1); 977 set_reg(sr->curr.reg + reg, UNW_WHERE_GR, sr->region_start + sr->region_len - 1, dst); 983 set_reg(sr->curr.reg + reg, UNW_WHERE_PSPREL, sr->region_start + sr->region_len - 1, 990 set_reg(sr->curr.reg + reg, UNW_WHERE_SPREL, sr->region_start + sr->region_len - 1, 1003 struct unw_reg_info *reg = sr->curr.reg + regnum; desc_reg_when() 1040 free_state_stack(&sr->curr); desc_copy_state() 1041 memcpy(&sr->curr, &ls->saved_state, sizeof(sr->curr)); desc_copy_state() 1042 sr->curr.next = dup_state_stack(ls->saved_state.next); desc_copy_state() 1060 memcpy(&ls->saved_state, &sr->curr, sizeof(ls->saved_state)); desc_label_state() 1061 ls->saved_state.next = dup_state_stack(sr->curr.next); desc_label_state() 1093 r = sr->curr.reg + decode_abreg(abreg, 0); desc_restore_p() 1114 r = sr->curr.reg + decode_abreg(abreg, 0); desc_spill_reg_p() 1129 r = sr->curr.reg + decode_abreg(abreg, 1); desc_spill_psprel_p() 1144 r = sr->curr.reg + decode_abreg(abreg, 1); desc_spill_sprel_p() 1361 struct unw_reg_info *r = sr->curr.reg + i; emit_nat_info() 1406 struct unw_reg_info *r = sr->curr.reg + i; compile_reg() 1548 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) build_script() 1590 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; build_script() 1591 sr.curr.reg[UNW_REG_RP].when = -1; build_script() 1592 sr.curr.reg[UNW_REG_RP].val = 0; build_script() 1614 sr.curr.reg[UNW_REG_PSP].val = 0; build_script() 1615 sr.curr.reg[UNW_REG_PSP].where = UNW_WHERE_NONE; build_script() 1616 sr.curr.reg[UNW_REG_PSP].when = UNW_WHEN_NEVER; build_script() 1617 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) build_script() 1633 if (sr.curr.reg[UNW_REG_RP].when >= sr.when_target) { build_script() 1634 sr.curr.reg[UNW_REG_RP].where = UNW_WHERE_BR; build_script() 1635 sr.curr.reg[UNW_REG_RP].when = -1; build_script() 1636 sr.curr.reg[UNW_REG_RP].val = sr.return_link_reg; build_script() 1638 __func__, ip, sr.curr.reg[UNW_REG_RP].where, build_script() 1639 sr.curr.reg[UNW_REG_RP].val); build_script() 1645 for (r = sr.curr.reg; r < sr.curr.reg + UNW_NUM_REGS; ++r) { build_script() 1647 UNW_DPRINT(1, " %s <- ", unw.preg_name[r - sr.curr.reg]); build_script() 1655 UNW_DPRINT(1, "%s+0x%lx", unw.preg_name[r - sr.curr.reg], r->val); build_script() 1675 if (sr.when_target > sr.curr.reg[UNW_REG_PSP].when build_script() 1676 && (sr.curr.reg[UNW_REG_PSP].where == UNW_WHERE_NONE) build_script() 1677 && sr.curr.reg[UNW_REG_PSP].val != 0) { build_script() 1681 insn.val = sr.curr.reg[UNW_REG_PSP].val; /* frame size */ build_script() 1686 if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) build_script() 1688 else if (sr.when_target < sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when) build_script() 1690 else if (sr.curr.reg[UNW_REG_PRI_UNAT_MEM].when > sr.curr.reg[UNW_REG_PRI_UNAT_GR].when) build_script() 1708 free_state_stack(&sr.curr); build_script()
|
H A D | unwind_i.h | 114 struct unw_reg_state curr; /* current state */ member in struct:unw_state_record
|
H A D | mca.c | 500 const struct mca_table_entry *curr; search_mca_table() local 503 curr = first; search_mca_table() 504 while (curr <= last) { search_mca_table() 505 curr_start = (u64) &curr->start_addr + curr->start_addr; search_mca_table() 506 curr_end = (u64) &curr->end_addr + curr->end_addr; search_mca_table() 511 curr++; search_mca_table()
|
/linux-4.4.14/tools/perf/util/ |
H A D | callchain.h | 126 struct callchain_cursor_node *curr; member in struct:callchain_cursor 171 cursor->curr = cursor->first; callchain_cursor_commit() 182 return cursor->curr; callchain_cursor_current() 187 cursor->curr = cursor->curr->next; callchain_cursor_advance() 216 dest->first = src->curr; callchain_cursor_snapshot()
|
H A D | symbol.c | 152 struct symbol *curr, *next; symbols__fixup_duplicate() local 157 curr = rb_entry(nd, struct symbol, rb_node); symbols__fixup_duplicate() 159 nd = rb_next(&curr->rb_node); symbols__fixup_duplicate() 165 if (curr->start != next->start) symbols__fixup_duplicate() 168 if (choose_best_symbol(curr, next) == SYMBOL_A) { symbols__fixup_duplicate() 173 nd = rb_next(&curr->rb_node); symbols__fixup_duplicate() 174 rb_erase(&curr->rb_node, symbols); symbols__fixup_duplicate() 175 symbol__delete(curr); symbols__fixup_duplicate() 183 struct symbol *curr, *prev; symbols__fixup_end() local 188 curr = rb_entry(prevnd, struct symbol, rb_node); symbols__fixup_end() 191 prev = curr; symbols__fixup_end() 192 curr = rb_entry(nd, struct symbol, rb_node); symbols__fixup_end() 194 if (prev->end == prev->start && prev->end != curr->start) symbols__fixup_end() 195 prev->end = curr->start; symbols__fixup_end() 199 if (curr->end == curr->start) symbols__fixup_end() 200 curr->end = roundup(curr->start, 4096); symbols__fixup_end() 206 struct map *next, *curr; __map_groups__fixup_end() local 210 curr = maps__first(maps); __map_groups__fixup_end() 211 if (curr == NULL) __map_groups__fixup_end() 214 for (next = map__next(curr); next; next = map__next(curr)) { __map_groups__fixup_end() 215 curr->end = next->start; __map_groups__fixup_end() 216 curr = next; __map_groups__fixup_end() 223 curr->end = ~0ULL; __map_groups__fixup_end()
|
H A D | thread.c | 127 struct comm *new, *curr = thread__comm(thread); __thread__set_comm() local 132 err = comm__override(curr, str, timestamp, exec); __thread__set_comm()
|
H A D | hist.c | 590 iter->curr = 0; iter_prepare_branch_entry() 611 int i = iter->curr; iter_next_branch_entry() 616 if (iter->curr >= iter->total) iter_next_branch_entry() 632 int i = iter->curr; iter_add_next_branch_entry() 654 iter->curr++; iter_add_next_branch_entry() 665 return iter->curr >= iter->total ? 0 : -1; iter_finish_branch_entry() 728 iter->curr = 0; iter_prepare_cumulative_entry() 751 he_cache[iter->curr++] = he; iter_add_single_cumulative_entry() 810 for (i = 0; i < iter->curr; i++) { iter_add_next_cumulative_entry() 825 he_cache[iter->curr++] = he; iter_add_next_cumulative_entry()
|
/linux-4.4.14/fs/udf/ |
H A D | unicode.c | 436 uint8_t curr; udf_translate_to_linux() local 445 curr = udfName[index]; udf_translate_to_linux() 446 if (curr == '/' || curr == 0) { udf_translate_to_linux() 448 curr = ILLEGAL_CHAR_MARK; udf_translate_to_linux() 454 if (curr == EXT_MARK && udf_translate_to_linux() 465 newName[newIndex++] = curr; udf_translate_to_linux() 479 curr = udfName[extIndex + index + 1]; udf_translate_to_linux() 481 if (curr == '/' || curr == 0) { udf_translate_to_linux() 483 curr = ILLEGAL_CHAR_MARK; udf_translate_to_linux() 490 ext[localExtIndex++] = curr; udf_translate_to_linux()
|
H A D | inode.c | 902 int curr = *c; udf_split_extents() local 903 int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) + udf_split_extents() 905 int8_t etype = (laarr[curr].extLength >> 30); udf_split_extents() 910 laarr[curr + 2] = laarr[curr + 1]; udf_split_extents() 911 laarr[curr + 1] = laarr[curr]; udf_split_extents() 913 laarr[curr + 3] = laarr[curr + 1]; udf_split_extents() 914 laarr[curr + 2] = laarr[curr + 1] = laarr[curr]; udf_split_extents() 920 &laarr[curr].extLocation, udf_split_extents() 922 laarr[curr].extLength = udf_split_extents() 925 laarr[curr].extLocation.logicalBlockNum = 0; udf_split_extents() 926 laarr[curr].extLocation. udf_split_extents() 929 laarr[curr].extLength = (etype << 30) | udf_split_extents() 931 curr++; udf_split_extents() 936 laarr[curr].extLocation.logicalBlockNum = newblocknum; udf_split_extents() 938 laarr[curr].extLocation.partitionReferenceNum = udf_split_extents() 940 laarr[curr].extLength = EXT_RECORDED_ALLOCATED | udf_split_extents() 942 curr++; udf_split_extents() 946 laarr[curr].extLocation.logicalBlockNum += udf_split_extents() 948 laarr[curr].extLength = (etype << 30) | udf_split_extents() 950 curr++; udf_split_extents()
|
H A D | super.c | 1604 struct udf_vds_record *curr; udf_process_sequence() local 1634 curr = &vds[VDS_POS_PRIMARY_VOL_DESC]; udf_process_sequence() 1635 if (vdsn >= curr->volDescSeqNum) { udf_process_sequence() 1636 curr->volDescSeqNum = vdsn; udf_process_sequence() 1637 curr->block = block; udf_process_sequence() 1641 curr = &vds[VDS_POS_VOL_DESC_PTR]; udf_process_sequence() 1642 if (vdsn >= curr->volDescSeqNum) { udf_process_sequence() 1643 curr->volDescSeqNum = vdsn; udf_process_sequence() 1644 curr->block = block; udf_process_sequence() 1656 curr = &vds[VDS_POS_IMP_USE_VOL_DESC]; udf_process_sequence() 1657 if (vdsn >= curr->volDescSeqNum) { udf_process_sequence() 1658 curr->volDescSeqNum = vdsn; udf_process_sequence() 1659 curr->block = block; udf_process_sequence() 1663 curr = &vds[VDS_POS_PARTITION_DESC]; udf_process_sequence() 1664 if (!curr->block) udf_process_sequence() 1665 curr->block = block; udf_process_sequence() 1668 curr = &vds[VDS_POS_LOGICAL_VOL_DESC]; udf_process_sequence() 1669 if (vdsn >= curr->volDescSeqNum) { udf_process_sequence() 1670 curr->volDescSeqNum = vdsn; udf_process_sequence() 1671 curr->block = block; udf_process_sequence() 1675 curr = &vds[VDS_POS_UNALLOC_SPACE_DESC]; udf_process_sequence() 1676 if (vdsn >= curr->volDescSeqNum) { udf_process_sequence() 1677 curr->volDescSeqNum = vdsn; udf_process_sequence() 1678 curr->block = block; udf_process_sequence()
|
/linux-4.4.14/tools/perf/ui/tui/ |
H A D | progress.c | 30 bar = ((SLtt_Screen_Cols - 2) * p->curr) / p->total; tui_progress__update()
|
/linux-4.4.14/lib/zlib_inflate/ |
H A D | inftrees.c | 30 unsigned curr; /* number of index bits for current table */ zlib_inflate_table() local 137 filled is at next and has curr index bits. The code being used is huff zlib_inflate_table() 139 bits off of the bottom. For codes where len is less than drop + curr, zlib_inflate_table() 140 those top drop + curr - len bits are incremented through all values to zlib_inflate_table() 191 curr = root; /* current table index bits */ zlib_inflate_table() 220 fill = 1U << curr; zlib_inflate_table() 252 next += min; /* here min is 1 << curr */ zlib_inflate_table() 255 curr = len - drop; zlib_inflate_table() 256 left = (int)(1 << curr); zlib_inflate_table() 257 while (curr + drop < max) { zlib_inflate_table() 258 left -= count[curr + drop]; zlib_inflate_table() 260 curr++; zlib_inflate_table() 265 used += 1U << curr; zlib_inflate_table() 271 (*table)[low].op = (unsigned char)curr; zlib_inflate_table() 280 len is equal to curr + drop, so there is no loop needed to increment zlib_inflate_table()
|
/linux-4.4.14/sound/firewire/digi00x/ |
H A D | digi00x-stream.c | 138 u32 curr; begin_session() local 146 curr = be32_to_cpu(data); begin_session() 148 if (curr == 0) begin_session() 149 curr = 2; begin_session() 151 curr--; begin_session() 152 while (curr > 0) { begin_session() 153 data = cpu_to_be32(curr); begin_session() 163 curr--; begin_session()
|
/linux-4.4.14/kernel/ |
H A D | futex_compat.c | 45 * Walk curr->robust_list (very carefully, it's a userspace list!) 50 void compat_exit_robust_list(struct task_struct *curr) compat_exit_robust_list() argument 52 struct compat_robust_list_head __user *head = curr->compat_robust_list; compat_exit_robust_list() 97 if (handle_futex_death(uaddr, curr, pi)) compat_exit_robust_list() 116 handle_futex_death(uaddr, curr, pip); compat_exit_robust_list()
|
H A D | futex.c | 787 void exit_pi_state_list(struct task_struct *curr) exit_pi_state_list() argument 789 struct list_head *next, *head = &curr->pi_state_list; exit_pi_state_list() 801 raw_spin_lock_irq(&curr->pi_lock); exit_pi_state_list() 808 raw_spin_unlock_irq(&curr->pi_lock); exit_pi_state_list() 812 raw_spin_lock_irq(&curr->pi_lock); exit_pi_state_list() 822 WARN_ON(pi_state->owner != curr); exit_pi_state_list() 826 raw_spin_unlock_irq(&curr->pi_lock); exit_pi_state_list() 832 raw_spin_lock_irq(&curr->pi_lock); exit_pi_state_list() 834 raw_spin_unlock_irq(&curr->pi_lock); exit_pi_state_list() 2928 int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) handle_futex_death() argument 2936 if ((uval & FUTEX_TID_MASK) == task_pid_vnr(curr)) { handle_futex_death() 2994 * Walk curr->robust_list (very carefully, it's a userspace list!) 2999 void exit_robust_list(struct task_struct *curr) exit_robust_list() argument 3001 struct robust_list_head __user *head = curr->robust_list; exit_robust_list() 3042 curr, pi)) exit_robust_list() 3059 curr, pip); exit_robust_list()
|
/linux-4.4.14/drivers/media/common/saa7146/ |
H A D | saa7146_fops.c | 78 if (NULL == q->curr) { saa7146_buffer_queue() 79 q->curr = buf; saa7146_buffer_queue() 97 DEB_EE("q->curr:%p\n", q->curr); saa7146_buffer_finish() 99 BUG_ON(!q->curr); saa7146_buffer_finish() 102 if (NULL == q->curr) { saa7146_buffer_finish() 107 q->curr->vb.state = state; saa7146_buffer_finish() 108 v4l2_get_timestamp(&q->curr->vb.ts); saa7146_buffer_finish() 109 wake_up(&q->curr->vb.done); saa7146_buffer_finish() 111 q->curr = NULL; saa7146_buffer_finish() 130 q->curr = buf; saa7146_buffer_next() 175 if (q->curr) { saa7146_buffer_timeout() 176 DEB_D("timeout on %p\n", q->curr); saa7146_buffer_timeout()
|
H A D | saa7146_vbi.c | 338 if (vv->vbi_dmaq.curr) vbi_stop() 442 if (vv->vbi_dmaq.curr) { vbi_irq_done() 443 DEB_VBI("dev:%p, curr:%p\n", dev, vv->vbi_dmaq.curr); vbi_irq_done() 446 vv->vbi_dmaq.curr->vb.field_count = vv->vbi_fieldcount; vbi_irq_done()
|
/linux-4.4.14/drivers/iommu/ |
H A D | iova.c | 72 struct rb_node *curr; __cached_rbnode_delete_update() local 76 curr = iovad->cached32_node; __cached_rbnode_delete_update() 77 cached_iova = container_of(curr, struct iova, node); __cached_rbnode_delete_update() 105 struct rb_node *prev, *curr = NULL; __alloc_and_insert_iova_range() local 113 curr = __get_cached_rbnode(iovad, &limit_pfn); __alloc_and_insert_iova_range() 114 prev = curr; __alloc_and_insert_iova_range() 115 while (curr) { __alloc_and_insert_iova_range() 116 struct iova *curr_iova = container_of(curr, struct iova, node); __alloc_and_insert_iova_range() 131 prev = curr; __alloc_and_insert_iova_range() 132 curr = rb_prev(curr); __alloc_and_insert_iova_range() 135 if (!curr) { __alloc_and_insert_iova_range()
|
/linux-4.4.14/drivers/net/wireless/cw1200/ |
H A D | scan.c | 112 priv->scan.curr = priv->scan.begin; cw1200_hw_scan() 140 bool first_run = (priv->scan.begin == priv->scan.curr && cw1200_scan_work() 169 if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) { cw1200_scan_work() 195 struct ieee80211_channel *first = *priv->scan.curr; cw1200_scan_work() 196 for (it = priv->scan.curr + 1, i = 1; cw1200_scan_work() 218 scan.num_channels = it - priv->scan.curr; cw1200_scan_work() 230 sizeof(struct wsm_scan_ch) * (it - priv->scan.curr), cw1200_scan_work() 237 scan.ch[i].number = priv->scan.curr[i]->hw_value; cw1200_scan_work() 238 if (priv->scan.curr[i]->flags & IEEE80211_CHAN_NO_IR) { cw1200_scan_work() 256 priv->scan.curr = it; cw1200_scan_work() 262 priv->scan.curr = priv->scan.end; cw1200_scan_work() 346 priv->scan.curr = priv->scan.end; cw1200_scan_timeout()
|
H A D | scan.h | 30 struct ieee80211_channel **curr; member in struct:cw1200_scan
|
/linux-4.4.14/arch/tile/lib/ |
H A D | spinlock_64.c | 69 u32 curr = arch_spin_current(val); arch_spin_unlock_wait() local 72 if (arch_spin_next(val) == curr) arch_spin_unlock_wait() 78 } while (arch_spin_current(READ_ONCE(lock->lock)) == curr); arch_spin_unlock_wait()
|
H A D | spinlock_32.c | 68 int curr = READ_ONCE(lock->current_ticket); arch_spin_unlock_wait() local 72 if (next == curr) arch_spin_unlock_wait() 78 } while (READ_ONCE(lock->current_ticket) == curr); arch_spin_unlock_wait()
|
/linux-4.4.14/scripts/kconfig/ |
H A D | symbol.c | 16 .curr = { "y", yes }, 20 .curr = { "m", mod }, 24 .curr = { "n", no }, 28 .curr = { "", no }, 152 return strtoll(sym->curr.val, NULL, base); sym_get_range_val() 175 val = strtoll(sym->curr.val, NULL, base); sym_validate_range() 186 sym->curr.val = strdup(str); sym_validate_range() 308 sym->curr.tri = no; sym_calc_choice() 334 oldval = sym->curr; sym_calc_value() 340 newval = symbol_empty.curr; sym_calc_value() 344 newval = symbol_no.curr; sym_calc_value() 347 sym->curr.val = sym->name; sym_calc_value() 348 sym->curr.tri = no; sym_calc_value() 357 sym->curr = newval; sym_calc_value() 364 newval.tri = (prop_get_symbol(prop)->curr.val == sym) ? yes : no; sym_calc_value() 421 newval.val = ds->curr.val; sym_calc_value() 429 sym->curr = newval; sym_calc_value() 431 sym->curr.val = sym_calc_choice(sym); sym_calc_value() 434 if (memcmp(&oldval, &sym->curr, sizeof(oldval))) { sym_calc_value() 438 modules_val = modules_sym->curr.tri; sym_calc_value() 701 val = symbol_no.curr.tri; sym_get_string_default() 702 str = symbol_empty.curr.val; sym_get_string_default() 722 str = (const char *)ds->curr.val; sym_get_string_default() 732 if (!sym_is_choice_value(sym) && modules_sym->curr.tri == no) sym_get_string_default() 772 return (modules_sym->curr.tri == no) ? "n" : "m"; sym_get_string_value() 780 return (const char *)sym->curr.val; sym_get_string_value()
|
H A D | lkc.h | 148 return sym->curr.tri; sym_get_tristate_value() 154 return (struct symbol *)sym->curr.val; sym_get_choice_value()
|
H A D | expr.h | 81 struct symbol_value curr; member in struct:symbol 96 #define SYMBOL_VALID 0x0080 /* set when symbol.curr is calculated */
|
H A D | confdata.c | 436 if (!strcmp(sym->curr.val, sym->def[S_DEF_USER].val)) for_all_symbols() 1065 if (csym->curr.tri != yes) randomize_choice_values() 1218 * If curr.tri equals to mod then we can select several 1221 * If curr.tri equals yes then only one symbol can be
|
/linux-4.4.14/kernel/power/ |
H A D | process.c | 186 struct task_struct *curr = current; thaw_processes() local 204 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); for_each_process_thread() 209 WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); 210 curr->flags &= ~PF_SUSPEND_TASK;
|
/linux-4.4.14/drivers/hwmon/ |
H A D | ltc4245.c | 215 unsigned int curr; ltc4245_get_current() local 235 curr = voltage / 50; /* sense resistor 50 mOhm */ ltc4245_get_current() 239 curr = (voltage * 10) / 35; /* sense resistor 3.5 mOhm */ ltc4245_get_current() 243 curr = (voltage * 10) / 25; /* sense resistor 2.5 mOhm */ ltc4245_get_current() 247 curr = voltage / 100; /* sense resistor 100 mOhm */ ltc4245_get_current() 252 curr = 0; ltc4245_get_current() 256 return curr; ltc4245_get_current() 274 const unsigned int curr = ltc4245_get_current(dev, attr->index); ltc4245_show_current() local 276 return snprintf(buf, PAGE_SIZE, "%u\n", curr); ltc4245_show_current() 284 const unsigned int curr = ltc4245_get_current(dev, attr->index); ltc4245_show_power() local 288 const unsigned int power = abs(output_voltage * curr); ltc4245_show_power()
|
H A D | ltc4215.c | 134 const unsigned int curr = voltage / 4; ltc4215_get_current() local 136 return curr; ltc4215_get_current() 153 const unsigned int curr = ltc4215_get_current(dev); ltc4215_show_current() local 155 return snprintf(buf, PAGE_SIZE, "%u\n", curr); ltc4215_show_current() 162 const unsigned int curr = ltc4215_get_current(dev); ltc4215_show_power() local 166 const unsigned int power = abs(output_voltage * curr); ltc4215_show_power()
|
H A D | scpi-hwmon.c | 173 "curr%d_input", num_current + 1); scpi_hwmon_probe() 175 "curr%d_label", num_current + 1); scpi_hwmon_probe()
|
H A D | iio_hwmon.c | 123 "curr%d_input", iio_hwmon_probe()
|
/linux-4.4.14/drivers/media/dvb-frontends/ |
H A D | lnbp21.h | 54 0=pulsed (dynamic) curr limiting 55 1=static curr limiting */
|
/linux-4.4.14/drivers/pci/hotplug/ |
H A D | ibmphp_res.c | 49 static struct bus_node * __init alloc_error_bus (struct ebda_pci_rsrc *curr, u8 busno, int flag) alloc_error_bus() argument 53 if (!(curr) && !(flag)) { alloc_error_bus() 67 newbus->busno = curr->bus_num; alloc_error_bus() 72 static struct resource_node * __init alloc_resources (struct ebda_pci_rsrc *curr) alloc_resources() argument 76 if (!curr) { alloc_resources() 86 rs->busno = curr->bus_num; alloc_resources() 87 rs->devfunc = curr->dev_fun; alloc_resources() 88 rs->start = curr->start_addr; alloc_resources() 89 rs->end = curr->end_addr; alloc_resources() 90 rs->len = curr->end_addr - curr->start_addr + 1; alloc_resources() 94 static int __init alloc_bus_range (struct bus_node **new_bus, struct range_node **new_range, struct ebda_pci_rsrc *curr, int flag, u8 first_bus) alloc_bus_range() argument 106 newbus->busno = curr->bus_num; alloc_bus_range() 129 newrange->start = curr->start_addr; alloc_bus_range() 130 newrange->end = curr->end_addr; alloc_bus_range() 201 struct ebda_pci_rsrc *curr; ibmphp_rsrc_init() local 214 curr = list_entry (tmp_ebda, struct ebda_pci_rsrc, ebda_pci_rsrc_list); ibmphp_rsrc_init() 215 if (!(curr->rsrc_type & PCIDEVMASK)) { ibmphp_rsrc_init() 222 if (curr->rsrc_type & PRIMARYBUSMASK) { ibmphp_rsrc_init() 224 if ((curr->rsrc_type & RESTYPE) == MMASK) { ibmphp_rsrc_init() 227 rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1); ibmphp_rsrc_init() 233 bus_cur = find_bus_wprev (curr->bus_num, &bus_prev, 1); ibmphp_rsrc_init() 236 rc = alloc_bus_range (&bus_cur, &newrange, curr, MEM, 0); ibmphp_rsrc_init() 241 rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1); ibmphp_rsrc_init() 249 } else if ((curr->rsrc_type & RESTYPE) == PFMASK) { ibmphp_rsrc_init() 253 rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1); ibmphp_rsrc_init() 259 bus_cur = find_bus_wprev (curr->bus_num, &bus_prev, 1); ibmphp_rsrc_init() 262 rc = alloc_bus_range (&bus_cur, &newrange, curr, PFMEM, 0); ibmphp_rsrc_init() 267 rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1); ibmphp_rsrc_init() 274 } else if ((curr->rsrc_type & RESTYPE) == IOMASK) { ibmphp_rsrc_init() 278 rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1); ibmphp_rsrc_init() 284 bus_cur = find_bus_wprev (curr->bus_num, &bus_prev, 1); ibmphp_rsrc_init() 286 rc = alloc_bus_range (&bus_cur, &newrange, curr, IO, 0); ibmphp_rsrc_init() 291 rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1); ibmphp_rsrc_init() 305 if ((curr->rsrc_type & RESTYPE) == MMASK) { ibmphp_rsrc_init() 307 new_mem = alloc_resources (curr); ibmphp_rsrc_init() 319 newbus = alloc_error_bus (curr, 0, 0); ibmphp_rsrc_init() 328 } else if ((curr->rsrc_type & RESTYPE) == PFMASK) { ibmphp_rsrc_init() 330 new_pfmem = alloc_resources (curr); ibmphp_rsrc_init() 336 newbus = alloc_error_bus (curr, 0, 0); ibmphp_rsrc_init() 345 } else if ((curr->rsrc_type & RESTYPE) == IOMASK) { ibmphp_rsrc_init() 347 new_io = alloc_resources (curr); ibmphp_rsrc_init() 360 newbus = alloc_error_bus (curr, 0, 0); ibmphp_rsrc_init()
|
H A D | cpqphp_core.c | 200 * @curr: %NULL or pointer to previously returned structure 210 void __iomem *curr) get_subsequent_smbios_entry() 217 if (!smbios_table || !curr) get_subsequent_smbios_entry() 223 p_temp = curr; get_subsequent_smbios_entry() 224 p_temp += readb(curr + SMBIOS_GENERIC_LENGTH); get_subsequent_smbios_entry() 229 * and the second is the curr get_subsequent_smbios_entry() 208 get_subsequent_smbios_entry(void __iomem *smbios_start, void __iomem *smbios_table, void __iomem *curr) get_subsequent_smbios_entry() argument
|
/linux-4.4.14/tools/lib/traceevent/ |
H A D | kbuffer-parse.c | 47 * @index - index from @data to the @curr event data 48 * @curr - offset from @data to the start of current event 65 unsigned int curr; member in struct:kbuffer 298 void *ptr = kbuf->data + kbuf->curr; old_update_pointers() 322 kbuf->curr = kbuf->size; old_update_pointers() 349 kbuf->curr = kbuf->next; __old_next_event() 410 void *ptr = kbuf->data + kbuf->curr; update_pointers() 467 kbuf->curr = kbuf->next; __next_event() 534 kbuf->curr = 0; kbuffer_load_subbuffer() 576 if (kbuf->curr >= kbuf->size) kbuffer_read_event() 626 while (kbuf->curr < offset) { kbuffer_read_at_offset() 660 return kbuf->curr; kbuffer_curr_index() 672 return kbuf->curr + kbuf->start; kbuffer_curr_offset() 696 return kbuf->next - kbuf->curr; kbuffer_curr_size() 711 if (kbuf->curr) kbuffer_missed_events()
|
/linux-4.4.14/drivers/pnp/ |
H A D | interface.c | 26 char *curr; /* current position in buffer */ member in struct:pnp_info_buffer 43 res = vsnprintf(buffer->curr, buffer->len - buffer->size, fmt, args); pnp_printf() 49 buffer->curr += res; pnp_printf() 221 buffer->curr = buffer->buffer; options_show() 240 ret = (buffer->curr - buf); options_show() 264 buffer->curr = buffer->buffer; resources_show() 296 ret = (buffer->curr - buf); resources_show()
|
/linux-4.4.14/fs/nilfs2/ |
H A D | cpfile.c | 65 __u64 curr, nilfs_cpfile_checkpoints_in_block() 70 nilfs_cpfile_get_offset(cpfile, curr), nilfs_cpfile_checkpoints_in_block() 71 max - curr); nilfs_cpfile_checkpoints_in_block() 493 __u64 curr = *cnop, next; nilfs_cpfile_do_get_ssinfo() local 500 if (curr == 0) { nilfs_cpfile_do_get_ssinfo() 506 curr = le64_to_cpu(header->ch_snapshot_list.ssl_next); nilfs_cpfile_do_get_ssinfo() 509 if (curr == 0) { nilfs_cpfile_do_get_ssinfo() 513 } else if (unlikely(curr == ~(__u64)0)) { nilfs_cpfile_do_get_ssinfo() 518 curr_blkoff = nilfs_cpfile_get_blkoff(cpfile, curr); nilfs_cpfile_do_get_ssinfo() 519 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, 0, &bh); nilfs_cpfile_do_get_ssinfo() 527 cp = nilfs_cpfile_block_get_checkpoint(cpfile, curr, bh, kaddr); nilfs_cpfile_do_get_ssinfo() 528 curr = ~(__u64)0; /* Terminator */ nilfs_cpfile_do_get_ssinfo() 551 curr = next; nilfs_cpfile_do_get_ssinfo() 556 *cnop = curr; nilfs_cpfile_do_get_ssinfo() 633 __u64 curr, prev; nilfs_cpfile_set_snapshot() local 667 curr = 0; nilfs_cpfile_set_snapshot() 672 curr = prev; nilfs_cpfile_set_snapshot() 676 ret = nilfs_cpfile_get_checkpoint_block(cpfile, curr, nilfs_cpfile_set_snapshot() 684 cpfile, curr, curr_bh, kaddr); nilfs_cpfile_set_snapshot() 702 cpfile, curr, curr_bh, kaddr); nilfs_cpfile_set_snapshot() 708 cp->cp_snapshot_list.ssl_next = cpu_to_le64(curr); nilfs_cpfile_set_snapshot() 64 nilfs_cpfile_checkpoints_in_block(const struct inode *cpfile, __u64 curr, __u64 max) nilfs_cpfile_checkpoints_in_block() argument
|
H A D | alloc.c | 426 * @curr: current group number 431 unsigned long curr, unsigned long max) nilfs_palloc_rest_groups_in_desc_block() 435 curr % nilfs_palloc_groups_per_desc_block(inode), nilfs_palloc_rest_groups_in_desc_block() 436 max - curr + 1); nilfs_palloc_rest_groups_in_desc_block() 430 nilfs_palloc_rest_groups_in_desc_block(const struct inode *inode, unsigned long curr, unsigned long max) nilfs_palloc_rest_groups_in_desc_block() argument
|
/linux-4.4.14/drivers/media/pci/saa7134/ |
H A D | saa7134-ts.c | 86 dmaq->curr = NULL; saa7134_ts_buffer_init() 156 if (dmaq->curr) { saa7134_ts_start_streaming() 157 vb2_buffer_done(&dmaq->curr->vb2.vb2_buf, saa7134_ts_start_streaming() 159 dmaq->curr = NULL; saa7134_ts_start_streaming() 328 if (dev->ts_q.curr) { saa7134_irq_ts_done()
|
H A D | saa7134-core.c | 286 if (NULL == q->curr) { saa7134_buffer_queue() 288 q->curr = buf; saa7134_buffer_queue() 295 q->curr = buf; saa7134_buffer_queue() 309 core_dbg("buffer_finish %p\n", q->curr); saa7134_buffer_finish() 312 v4l2_get_timestamp(&q->curr->vb2.timestamp); saa7134_buffer_finish() 313 q->curr->vb2.sequence = q->seq_nr++; saa7134_buffer_finish() 314 vb2_buffer_done(&q->curr->vb2.vb2_buf, state); saa7134_buffer_finish() 315 q->curr = NULL; saa7134_buffer_finish() 324 BUG_ON(NULL != q->curr); saa7134_buffer_next() 334 q->curr = buf; saa7134_buffer_next() 361 if (q->curr) { saa7134_buffer_timeout() 362 core_dbg("timeout on %p\n", q->curr); saa7134_buffer_timeout() 404 if (dev->video_q.curr) { saa7134_set_dmabits() 413 if (dev->video_q.curr && dev->fmt->planar) { saa7134_set_dmabits() 426 if (dev->vbi_q.curr) { saa7134_set_dmabits() 444 if (dev->ts_q.curr) { saa7134_set_dmabits() 1208 buf = q->curr; saa7134_buffer_requeue()
|
H A D | saa7134-vbi.c | 168 dmaq->curr = NULL; buffer_init() 210 if (dev->vbi_q.curr) { saa7134_irq_vbi_done() 213 dev->vbi_q.curr->top_seen = 1; saa7134_irq_vbi_done() 216 if (!dev->vbi_q.curr->top_seen) saa7134_irq_vbi_done()
|
H A D | saa7134-video.c | 878 dmaq->curr = NULL; buffer_init() 962 if (dmaq->curr) { saa7134_vb2_start_streaming() 963 vb2_buffer_done(&dmaq->curr->vb2.vb2_buf, saa7134_vb2_start_streaming() 965 dmaq->curr = NULL; saa7134_vb2_start_streaming() 2232 if (dev->video_q.curr) { saa7134_irq_video_done() 2237 dev->video_q.curr->top_seen = 1; saa7134_irq_video_done() 2240 if (!dev->video_q.curr->top_seen) saa7134_irq_video_done()
|
/linux-4.4.14/drivers/net/wireless/mwifiex/ |
H A D | uap_event.c | 30 u8 *curr; mwifiex_check_uap_capabilties() local 39 curr = event->data; mwifiex_check_uap_capabilties() 47 tlv_hdr = (struct mwifiex_ie_types_data *)curr; mwifiex_check_uap_capabilties() 66 wmm_param_ie = (void *)(curr + 2); mwifiex_check_uap_capabilties() 84 curr += (tlv_len + sizeof(tlv_hdr->header)); mwifiex_check_uap_capabilties()
|
H A D | wmm.c | 916 u8 *curr = (u8 *) &resp->params.get_wmm_status; mwifiex_ret_wmm_get_status() local 931 tlv_hdr = (struct mwifiex_ie_types_data *) curr; mwifiex_ret_wmm_get_status() 964 (struct ieee_types_wmm_parameter *) (curr + mwifiex_ret_wmm_get_status() 986 curr += (tlv_len + sizeof(tlv_hdr->header)); mwifiex_ret_wmm_get_status() 1173 * curr bss node. imagine list to stay fixed while head is moved mwifiex_rotate_priolists()
|
/linux-4.4.14/sound/soc/blackfin/ |
H A D | bf5xx-ac97-pcm.c | 224 unsigned int curr; bf5xx_pcm_pointer() local 228 curr = sport->tx_delay_pos; bf5xx_pcm_pointer() 230 curr = sport->rx_pos; bf5xx_pcm_pointer() 234 curr = sport_curr_offset_tx(sport) / sizeof(struct ac97_frame); bf5xx_pcm_pointer() 236 curr = sport_curr_offset_rx(sport) / sizeof(struct ac97_frame); bf5xx_pcm_pointer() 239 return curr; bf5xx_pcm_pointer()
|
H A D | bf6xx-sport.c | 209 unsigned long curr = get_dma_curr_addr(sport->tx_dma_chan); sport_curr_offset_tx() local 211 return (unsigned char *)curr - sport->tx_buf; sport_curr_offset_tx() 217 unsigned long curr = get_dma_curr_addr(sport->rx_dma_chan); sport_curr_offset_rx() local 219 return (unsigned char *)curr - sport->rx_buf; sport_curr_offset_rx()
|
H A D | bf5xx-sport.c | 592 unsigned long curr = get_dma_curr_addr(sport->dma_rx_chan); sport_curr_offset_rx() local 594 return (unsigned char *)curr - sport->rx_buf; sport_curr_offset_rx() 600 unsigned long curr = get_dma_curr_addr(sport->dma_tx_chan); sport_curr_offset_tx() local 602 return (unsigned char *)curr - sport->tx_buf; sport_curr_offset_tx()
|
/linux-4.4.14/drivers/scsi/esas2r/ |
H A D | esas2r_io.c | 222 if (unlikely(sgc->sge.a64.curr > sgc->sge.a64.limit)) { esas2r_build_sg_list_sge() 237 sgelen = (u8)((u8 *)sgc->sge.a64.curr esas2r_build_sg_list_sge() 246 /* Figure out the new curr pointer in the new segment */ esas2r_build_sg_list_sge() 247 sgc->sge.a64.curr = esas2r_build_sg_list_sge() 306 sgc->sge.a64.last = sgc->sge.a64.curr; esas2r_build_sg_list_sge() 309 sgc->sge.a64.curr->length = cpu_to_le32(SGE_ADDR_64 | len); esas2r_build_sg_list_sge() 310 sgc->sge.a64.curr->address = cpu_to_le32(addr); esas2r_build_sg_list_sge() 311 sgc->sge.a64.curr++; esas2r_build_sg_list_sge() 336 ((u8 *)(sgc->sge.a64.curr) - esas2r_build_sg_list_sge() 418 sgc->sge.prd.curr->ctl_len = cpu_to_le32( esas2r_build_prd_iblk() 420 sgc->sge.prd.curr->address = cpu_to_le64(addr); esas2r_build_prd_iblk() 467 sgc->sge.prd.chain = sgc->sge.prd.curr; esas2r_build_prd_iblk() 478 sgc->sge.prd.curr = esas2r_build_prd_iblk() 487 sgc->sge.prd.curr->ctl_len = cpu_to_le32(PRD_DATA | len); esas2r_build_prd_iblk() 488 sgc->sge.prd.curr->address = cpu_to_le64(addr); esas2r_build_prd_iblk() 492 sgc->sge.prd.curr++; esas2r_build_prd_iblk() 614 (struct atto_physical_region_description *)sgc->sge.a64.curr; esas2r_build_sg_list_prd() 625 sgc->sge.prd.curr = curr_iblk_chn; esas2r_build_sg_list_prd()
|
H A D | esas2r.h | 608 struct atto_vda_sge *curr; member in struct:esas2r_sg_context::__anon9364::__anon9365 614 struct atto_physical_region_description *curr; member in struct:esas2r_sg_context::__anon9364::__anon9366 1188 sgc->sge.a64.curr = first; esas2r_sgc_init() 1194 sgc->sge.a64.curr = &rq->vrq->scsi.u.sge[0]; esas2r_sgc_init()
|
/linux-4.4.14/arch/tile/include/asm/ |
H A D | spinlock_32.h | 46 int curr = READ_ONCE(lock->current_ticket); arch_spin_is_locked() local 49 return next != curr; arch_spin_is_locked()
|
/linux-4.4.14/net/irda/ |
H A D | discovery.c | 153 discovery_t * curr; irlmp_expire_discoveries() local 165 curr = discovery; irlmp_expire_discoveries() 169 if ((curr->data.saddr == saddr) && irlmp_expire_discoveries() 171 ((jiffies - curr->timestamp) > DISCOVERY_EXPIRE_TIMEOUT))) irlmp_expire_discoveries() 190 memcpy(&(buffer[i]), &(curr->data), irlmp_expire_discoveries() 195 curr = hashbin_remove_this(log, (irda_queue_t *) curr); irlmp_expire_discoveries() 196 kfree(curr); irlmp_expire_discoveries()
|
H A D | irlmp.c | 1274 struct lsap_cb *curr; irlmp_status_indication() local 1277 curr = (struct lsap_cb *) hashbin_get_first( self->lsaps); irlmp_status_indication() 1278 while (NULL != hashbin_find_next(self->lsaps, (long) curr, NULL, irlmp_status_indication() 1280 IRDA_ASSERT(curr->magic == LMP_LSAP_MAGIC, return;); irlmp_status_indication() 1284 if (curr->notify.status_indication != NULL) irlmp_status_indication() 1285 curr->notify.status_indication(curr->notify.instance, irlmp_status_indication() 1290 curr = next; irlmp_status_indication() 1307 struct lsap_cb *curr; irlmp_flow_indication() local 1330 curr = hashbin_find_next(self->lsaps, (long) next, NULL, irlmp_flow_indication() 1333 if(curr == NULL) irlmp_flow_indication() 1335 pr_debug("%s() : curr is %p, next was %p and is now %p, still %d to go - queue len = %d\n", irlmp_flow_indication() 1336 __func__, curr, next, self->flow_next, lsap_todo, irlmp_flow_indication() 1340 if (curr->notify.flow_indication != NULL) irlmp_flow_indication() 1341 curr->notify.flow_indication(curr->notify.instance, irlmp_flow_indication() 1342 curr, flow); irlmp_flow_indication()
|
/linux-4.4.14/arch/s390/oprofile/ |
H A D | hwsampler.c | 335 unsigned long *curr; for_each_online_cpu() local 344 curr = (unsigned long *) sdbt; for_each_online_cpu() 349 if (!*curr || !sdbt) for_each_online_cpu() 353 if (is_link_entry(curr)) { for_each_online_cpu() 354 curr = get_next_sdbt(curr); for_each_online_cpu() 359 if ((unsigned long) curr == start) for_each_online_cpu() 362 sdbt = (unsigned long) curr; for_each_online_cpu() 365 if (*curr) { for_each_online_cpu() 366 free_page(*curr); for_each_online_cpu() 367 curr++; for_each_online_cpu()
|
/linux-4.4.14/drivers/power/ |
H A D | ab8500_btemp.c | 207 int curr; ab8500_btemp_curr_source_enable() local 222 curr = BAT_CTRL_60U_ENA; ab8500_btemp_curr_source_enable() 224 curr = BAT_CTRL_120U_ENA; ab8500_btemp_curr_source_enable() 227 curr = BAT_CTRL_16U_ENA; ab8500_btemp_curr_source_enable() 229 curr = BAT_CTRL_18U_ENA; ab8500_btemp_curr_source_enable() 232 curr = BAT_CTRL_7U_ENA; ab8500_btemp_curr_source_enable() 234 curr = BAT_CTRL_20U_ENA; ab8500_btemp_curr_source_enable() 257 FORCE_BAT_CTRL_CMP_HIGH | curr); ab8500_btemp_curr_source_enable() 264 dev_dbg(di->dev, "Disable BATCTRL curr source\n"); ab8500_btemp_curr_source_enable() 267 /* Write 0 to the curr bits */ ab8500_btemp_curr_source_enable() 274 /* Write 0 to the curr bits */ ab8500_btemp_curr_source_enable() 281 /* Write 0 to the curr bits */ ab8500_btemp_curr_source_enable() 331 /* Write 0 to the curr bits */ ab8500_btemp_curr_source_enable() 337 /* Write 0 to the curr bits */ ab8500_btemp_curr_source_enable() 343 /* Write 0 to the curr bits */ ab8500_btemp_curr_source_enable() 409 dev_err(di->dev, "%s curr source enabled failed\n", __func__); ab8500_btemp_get_batctrl_res() 450 dev_err(di->dev, "%s curr source disable failed\n", __func__); ab8500_btemp_get_batctrl_res()
|
H A D | bq27xxx_battery.c | 788 int curr; bq27xxx_battery_current() local 791 curr = bq27xxx_read(di, BQ27XXX_REG_AI, false); bq27xxx_battery_current() 792 if (curr < 0) { bq27xxx_battery_current() 794 return curr; bq27xxx_battery_current() 801 curr = -curr; bq27xxx_battery_current() 804 val->intval = curr * BQ27XXX_CURRENT_CONSTANT / BQ27XXX_RS; bq27xxx_battery_current() 807 val->intval = (int)((s16)curr) * 1000; bq27xxx_battery_current()
|
H A D | pm2301_charger.c | 530 static int pm2xxx_current_to_regval(int curr) pm2xxx_current_to_regval() argument 534 if (curr < pm2xxx_charger_current_map[0]) pm2xxx_current_to_regval() 538 if (curr < pm2xxx_charger_current_map[i]) pm2xxx_current_to_regval() 543 if (curr == pm2xxx_charger_current_map[i]) pm2xxx_current_to_regval() 549 static int pm2xxx_voltage_to_regval(int curr) pm2xxx_voltage_to_regval() argument 553 if (curr < pm2xxx_charger_voltage_map[0]) pm2xxx_voltage_to_regval() 557 if (curr < pm2xxx_charger_voltage_map[i]) pm2xxx_voltage_to_regval() 562 if (curr == pm2xxx_charger_voltage_map[i]) pm2xxx_voltage_to_regval()
|
H A D | twl4030_charger.c | 434 int v, curr; twl4030_current_worker() local 445 curr = twl4030_charger_get_current(); twl4030_current_worker() 447 dev_dbg(bci->dev, "v=%d cur=%d limit=%d target=%d\n", v, curr, twl4030_current_worker() 810 int curr; twl4030_charger_get_current() local 814 curr = twl4030bci_read_adc_val(TWL4030_BCIICHG); twl4030_charger_get_current() 815 if (curr < 0) twl4030_charger_get_current() 816 return curr; twl4030_charger_get_current() 822 return regval2ua(curr, bcictl1 & TWL4030_CGAIN); twl4030_charger_get_current()
|
H A D | bq24190_charger.c | 717 int curr, ret; bq24190_charger_get_current() local 722 ARRAY_SIZE(bq24190_ccc_ichg_values), &curr); bq24190_charger_get_current() 734 curr /= 5; bq24190_charger_get_current() 736 val->intval = curr; bq24190_charger_get_current() 753 int ret, curr = val->intval; bq24190_charger_set_current() local 763 curr *= 5; bq24190_charger_set_current() 768 ARRAY_SIZE(bq24190_ccc_ichg_values), curr); bq24190_charger_set_current()
|
H A D | ab8500_charger.c | 664 * ab8500_charger_max_usb_curr() - get the max curr for the USB type 1005 static int ab8500_current_to_regval(struct ab8500_charger *di, int curr) ab8500_current_to_regval() argument 1009 if (curr < di->bm->chg_output_curr[0]) ab8500_current_to_regval() 1013 if (curr < di->bm->chg_output_curr[i]) ab8500_current_to_regval() 1019 if (curr == di->bm->chg_output_curr[i]) ab8500_current_to_regval() 1025 static int ab8500_vbus_in_curr_to_regval(struct ab8500_charger *di, int curr) ab8500_vbus_in_curr_to_regval() argument 1029 if (curr < di->bm->chg_input_curr[0]) ab8500_vbus_in_curr_to_regval() 1033 if (curr < di->bm->chg_input_curr[i]) ab8500_vbus_in_curr_to_regval() 1039 if (curr == di->bm->chg_input_curr[i]) ab8500_vbus_in_curr_to_regval() 1194 dev_dbg(di->dev, "curr change_1 to: %x for 0x%02x\n", ab8500_charger_set_current() 1208 dev_dbg(di->dev, "curr change_2 to: %x for 0x%02x\n", ab8500_charger_set_current() 2007 dev_dbg(di->dev, "Vbat did cross threshold, curr: %d, new: %d," ab8500_charger_check_vbat_work() 2704 int ret, curr; ab8500_charger_vbus_drop_end_work() local 2725 curr = di->bm->chg_input_curr[ ab8500_charger_vbus_drop_end_work() 2728 curr = di->bm->chg_input_curr[ ab8500_charger_vbus_drop_end_work() 2731 if (di->max_usb_in_curr.calculated_max != curr) { ab8500_charger_vbus_drop_end_work() 2733 di->max_usb_in_curr.calculated_max = curr; ab8500_charger_vbus_drop_end_work()
|
H A D | axp288_fuel_gauge.c | 306 ret = pmic_read_adc_val("axp288-chrg-curr", &raw_val, info); fuel_gauge_debug_show() 309 ret = pmic_read_adc_val("axp288-chrg-d-curr", &raw_val, info); fuel_gauge_debug_show() 361 ret = pmic_read_adc_val("axp288-chrg-curr", &charge, info); fuel_gauge_get_status() 367 ret = pmic_read_adc_val("axp288-chrg-d-curr", &discharge, info); fuel_gauge_get_status() 404 ret = pmic_read_adc_val("axp288-chrg-curr", &charge, info); fuel_gauge_get_current() 407 ret = pmic_read_adc_val("axp288-chrg-d-curr", &discharge, info); fuel_gauge_get_current()
|
/linux-4.4.14/drivers/net/bonding/ |
H A D | bond_procfs.c | 60 struct slave *curr, *primary; bond_info_show_master() local 63 curr = rcu_dereference(bond->curr_active_slave); bond_info_show_master() 96 (curr) ? curr->dev->name : "None"); bond_info_show_master()
|
H A D | bond_3ad.c | 1473 struct aggregator *curr) ad_agg_selection_test() 1500 return curr; ad_agg_selection_test() 1502 if (!curr->is_individual && best->is_individual) ad_agg_selection_test() 1503 return curr; ad_agg_selection_test() 1505 if (curr->is_individual && !best->is_individual) ad_agg_selection_test() 1508 if (__agg_has_partner(curr) && !__agg_has_partner(best)) ad_agg_selection_test() 1509 return curr; ad_agg_selection_test() 1511 if (!__agg_has_partner(curr) && __agg_has_partner(best)) ad_agg_selection_test() 1514 switch (__get_agg_selection_mode(curr->lag_ports)) { ad_agg_selection_test() 1516 if (curr->num_of_ports > best->num_of_ports) ad_agg_selection_test() 1517 return curr; ad_agg_selection_test() 1519 if (curr->num_of_ports < best->num_of_ports) ad_agg_selection_test() 1525 if (__get_agg_bandwidth(curr) > __get_agg_bandwidth(best)) ad_agg_selection_test() 1526 return curr; ad_agg_selection_test() 1532 curr->slave->bond->dev->name, ad_agg_selection_test() 1533 __get_agg_selection_mode(curr->lag_ports)); ad_agg_selection_test() 1472 ad_agg_selection_test(struct aggregator *best, struct aggregator *curr) ad_agg_selection_test() argument
|
H A D | bond_main.c | 717 struct slave *curr = rtnl_dereference(bond->curr_active_slave); bond_choose_primary_or_current() local 720 if (!curr || curr->link != BOND_LINK_UP) bond_choose_primary_or_current() 722 return curr; bond_choose_primary_or_current() 730 if (!curr || curr->link != BOND_LINK_UP) bond_choose_primary_or_current() 733 /* At this point, prim and curr are both up */ bond_choose_primary_or_current() 738 if (prim->speed < curr->speed) bond_choose_primary_or_current() 739 return curr; bond_choose_primary_or_current() 740 if (prim->speed == curr->speed && prim->duplex <= curr->duplex) bond_choose_primary_or_current() 741 return curr; bond_choose_primary_or_current() 744 return curr; bond_choose_primary_or_current() 748 return curr; bond_choose_primary_or_current()
|
/linux-4.4.14/include/drm/bridge/ |
H A D | dw_hdmi.h | 40 u16 curr[DW_HDMI_RES_MAX]; member in struct:dw_hdmi_curr_ctrl
|
/linux-4.4.14/tools/perf/tests/ |
H A D | builtin-test.c | 182 static bool perf_test__matches(struct test *test, int curr, int argc, const char *argv[]) perf_test__matches() argument 194 if (nr == curr + 1) perf_test__matches() 255 int curr = i++, err; for_each_test() local 257 if (!perf_test__matches(t, curr, argc, argv)) for_each_test()
|
H A D | perf-record.c | 182 pr_debug("%s going backwards in time, prev=%" PRIu64 ", curr=%" PRIu64 "\n", test__PERF_RECORD()
|
/linux-4.4.14/drivers/staging/lustre/lustre/fld/ |
H A D | fld_cache.c | 218 struct list_head *curr; fld_cache_shrink() local 226 curr = cache->fci_lru.prev; fld_cache_shrink() 229 cache->fci_cache_size && curr != &cache->fci_lru) { fld_cache_shrink() 231 flde = list_entry(curr, struct fld_cache_entry, fce_lru); fld_cache_shrink() 232 curr = curr->prev; fld_cache_shrink() 355 CERROR("NEW range ="DRANGE" curr = "DRANGE"\n", fld_cache_overlap_handle()
|
/linux-4.4.14/drivers/pinctrl/mvebu/ |
H A D | pinctrl-mvebu.c | 177 struct mvebu_mpp_ctrl_setting *curr; mvebu_pinconf_group_dbg_show() local 184 curr = mvebu_pinctrl_find_setting_by_val(pctl, grp, config); mvebu_pinconf_group_dbg_show() 186 if (curr) { mvebu_pinconf_group_dbg_show() 187 seq_printf(s, "current: %s", curr->name); mvebu_pinconf_group_dbg_show() 188 if (curr->subname) mvebu_pinconf_group_dbg_show() 189 seq_printf(s, "(%s)", curr->subname); mvebu_pinconf_group_dbg_show() 190 if (curr->flags & (MVEBU_SETTING_GPO | MVEBU_SETTING_GPI)) { mvebu_pinconf_group_dbg_show() 192 if (curr->flags & MVEBU_SETTING_GPI) mvebu_pinconf_group_dbg_show() 194 if (curr->flags & MVEBU_SETTING_GPO) mvebu_pinconf_group_dbg_show() 204 if (curr == &grp->settings[n]) mvebu_pinconf_group_dbg_show()
|
/linux-4.4.14/kernel/gcov/ |
H A D | fs.c | 574 char *curr; add_node() local 584 for (curr = filename; (next = strchr(curr, '/')); curr = next + 1) { add_node() 585 if (curr == next) add_node() 588 if (strcmp(curr, ".") == 0) add_node() 590 if (strcmp(curr, "..") == 0) { add_node() 596 node = get_child_by_name(parent, curr); add_node() 598 node = new_node(parent, NULL, curr); add_node() 605 node = new_node(parent, info, curr); add_node()
|
/linux-4.4.14/net/atm/ |
H A D | clip.c | 790 struct clip_vcc *curr) clip_seq_next_vcc() 792 if (!curr) { clip_seq_next_vcc() 793 curr = e->vccs; clip_seq_next_vcc() 794 if (!curr) clip_seq_next_vcc() 796 return curr; clip_seq_next_vcc() 798 if (curr == SEQ_NO_VCC_TOKEN) clip_seq_next_vcc() 801 curr = curr->next; clip_seq_next_vcc() 803 return curr; clip_seq_next_vcc() 789 clip_seq_next_vcc(struct atmarp_entry *e, struct clip_vcc *curr) clip_seq_next_vcc() argument
|
H A D | mpc.c | 220 struct atm_mpoa_qos *curr; atm_mpoa_delete_qos() local 230 curr = qos_head; atm_mpoa_delete_qos() 231 while (curr != NULL) { atm_mpoa_delete_qos() 232 if (curr->next == entry) { atm_mpoa_delete_qos() 233 curr->next = entry->next; atm_mpoa_delete_qos() 237 curr = curr->next; atm_mpoa_delete_qos()
|
/linux-4.4.14/security/apparmor/ |
H A D | policy.c | 224 * aa_ns_visible - test if @view is visible from @curr 225 * @curr: namespace to treat as the parent (NOT NULL) 226 * @view: namespace to test if visible from @curr (NOT NULL) 228 * Returns: true if @view is visible from @curr else false 230 bool aa_ns_visible(struct aa_namespace *curr, struct aa_namespace *view) aa_ns_visible() argument 232 if (curr == view) aa_ns_visible() 236 if (view->parent == curr) aa_ns_visible() 243 * aa_na_name - Find the ns name to display for @view from @curr 244 * @curr - current namespace (NOT NULL) 247 * Returns: name of @view visible from @curr 249 const char *aa_ns_name(struct aa_namespace *curr, struct aa_namespace *view) aa_ns_name() argument 251 /* if view == curr then the namespace name isn't displayed */ aa_ns_name() 252 if (curr == view) aa_ns_name() 255 if (aa_ns_visible(curr, view)) { aa_ns_name() 257 * thus the curr ns.hname is a prefix of its name. aa_ns_name() 259 * Add + 2 to skip over // separating curr hname prefix aa_ns_name() 262 return view->base.hname + strlen(curr->base.hname) + 2; aa_ns_name()
|
/linux-4.4.14/drivers/iio/adc/ |
H A D | axp288_adc.c | 108 AXP288_ADC_MAP("BATT_CHG_I", "axp288-chrg", "axp288-chrg-curr"), 109 AXP288_ADC_MAP("BATT_DISCHRG_I", "axp288-chrg", "axp288-chrg-d-curr"),
|
H A D | twl4030-madc.c | 235 int temp, curr, volt, res, ret; twl4030battery_temperature() local 244 curr = ((val & TWL4030_BCI_ITHSENS) + 1) * 10; twl4030battery_temperature() 246 res = volt * 1000 / curr; twl4030battery_temperature()
|
/linux-4.4.14/arch/x86/kernel/cpu/mtrr/ |
H A D | generic.c | 82 static int check_type_overlap(u8 *prev, u8 *curr) check_type_overlap() argument 84 if (*prev == MTRR_TYPE_UNCACHABLE || *curr == MTRR_TYPE_UNCACHABLE) { check_type_overlap() 86 *curr = MTRR_TYPE_UNCACHABLE; check_type_overlap() 90 if ((*prev == MTRR_TYPE_WRBACK && *curr == MTRR_TYPE_WRTHROUGH) || check_type_overlap() 91 (*prev == MTRR_TYPE_WRTHROUGH && *curr == MTRR_TYPE_WRBACK)) { check_type_overlap() 93 *curr = MTRR_TYPE_WRTHROUGH; check_type_overlap() 96 if (*prev != *curr) { check_type_overlap() 98 *curr = MTRR_TYPE_UNCACHABLE; check_type_overlap()
|
/linux-4.4.14/drivers/usb/chipidea/ |
H A D | udc.h | 55 u32 curr; member in struct:ci_hw_qh
|
/linux-4.4.14/drivers/media/platform/s5p-g2d/ |
H A D | g2d.h | 32 struct g2d_ctx *curr; member in struct:g2d_dev
|
H A D | g2d.c | 488 if (dev->curr == NULL) /* No job currently running */ job_abort() 492 dev->curr == NULL, job_abort() 504 dev->curr = ctx; device_run() 540 struct g2d_ctx *ctx = dev->curr; g2d_isr() 564 dev->curr = NULL; g2d_isr()
|
/linux-4.4.14/drivers/acpi/ |
H A D | ec.c | 465 (ec->curr && ec->curr->command == ACPI_EC_COMMAND_QUERY)) acpi_ec_guard_event() 477 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_POLL)) ec_transaction_polled() 489 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) ec_transaction_completed() 497 ec->curr->flags |= flag; ec_transaction_transition() 498 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) { ec_transaction_transition() 526 t = ec->curr; advance_transaction() 605 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0; start_transaction() 606 ec->curr->flags = 0; start_transaction() 682 ec->curr = t; acpi_ec_transaction_unlocked() 693 ec->curr = NULL; acpi_ec_transaction_unlocked() 1124 if (!ec->curr) acpi_ec_check_event()
|
H A D | acpi_video.c | 194 int curr; member in struct:acpi_video_device_brightness 371 device->brightness->curr = level; acpi_video_device_lcd_set_level() 575 device->brightness->curr = *level; acpi_video_device_lcd_get_level_current() 590 * dev->brightness->curr is a cached value which stores acpi_video_device_lcd_get_level_current() 600 *level = device->brightness->curr; acpi_video_device_lcd_get_level_current() 842 br->curr = level = max_level; acpi_video_init_brightness() 1597 video_device->brightness->curr); acpi_video_resume()
|
H A D | internal.h | 153 struct transaction *curr; acpi_early_processor_osc() member in struct:acpi_ec
|
/linux-4.4.14/arch/mips/include/asm/ |
H A D | sgialib.h | 40 extern struct linux_mdesc *prom_getmdesc(struct linux_mdesc *curr);
|
/linux-4.4.14/drivers/net/ethernet/stmicro/stmmac/ |
H A D | stmmac_mdio.c | 42 unsigned long curr; stmmac_mdio_busy_wait() local 46 curr = jiffies; stmmac_mdio_busy_wait() 51 } while (!time_after_eq(curr, finish)); stmmac_mdio_busy_wait()
|
/linux-4.4.14/drivers/media/platform/ |
H A D | rcar_jpu.c | 200 * @curr: pointer to current context 216 struct jpu_ctx *curr; member in struct:jpu 296 * @curr: current position in the buffer 300 void *curr; member in struct:jpeg_buffer 587 if (buf->curr >= buf->end) get_byte() 590 return *(u8 *)buf->curr++; get_byte() 595 if (buf->end - buf->curr < 2) get_word_be() 598 *word = get_unaligned_be16(buf->curr); get_word_be() 599 buf->curr += 2; get_word_be() 606 buf->curr += min((unsigned long)(buf->end - buf->curr), len); skip() 617 jpeg_buffer.curr = buffer; jpu_parse_hdr() 1373 jpu->curr = ctx; jpu_device_run() 1502 if (!wait_event_timeout(ctx->jpu->irq_queue, !ctx->jpu->curr, jpu_job_abort() 1586 jpu->curr = NULL; jpu_irq_handler()
|
/linux-4.4.14/drivers/net/wireless/prism54/ |
H A D | islpci_mgt.c | 112 u32 curr = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ]); islpci_mgmt_rx_fill() local 118 while (curr - priv->index_mgmt_rx < ISL38XX_CB_MGMT_QSIZE) { islpci_mgmt_rx_fill() 119 u32 index = curr % ISL38XX_CB_MGMT_QSIZE; islpci_mgmt_rx_fill() 144 curr++; islpci_mgmt_rx_fill() 150 cb->driver_curr_frag[ISL38XX_CB_RX_MGMTQ] = cpu_to_le32(curr); islpci_mgmt_rx_fill()
|
/linux-4.4.14/drivers/net/ethernet/sun/ |
H A D | sunvnet.c | 1211 struct sk_buff *curr = segs; vnet_handle_offloads() local 1214 curr->next = NULL; vnet_handle_offloads() 1215 if (port->tso && curr->len > dev->mtu) { vnet_handle_offloads() 1216 skb_shinfo(curr)->gso_size = gso_size; vnet_handle_offloads() 1217 skb_shinfo(curr)->gso_type = gso_type; vnet_handle_offloads() 1218 skb_shinfo(curr)->gso_segs = vnet_handle_offloads() 1219 DIV_ROUND_UP(curr->len - hlen, gso_size); vnet_handle_offloads() 1221 skb_shinfo(curr)->gso_size = 0; vnet_handle_offloads() 1223 skb_push(curr, maclen); vnet_handle_offloads() 1224 skb_reset_mac_header(curr); vnet_handle_offloads() 1225 memcpy(skb_mac_header(curr), skb_mac_header(skb), vnet_handle_offloads() 1227 curr->csum_start = skb_transport_header(curr) - curr->head; vnet_handle_offloads() 1228 if (ip_hdr(curr)->protocol == IPPROTO_TCP) vnet_handle_offloads() 1229 curr->csum_offset = offsetof(struct tcphdr, check); vnet_handle_offloads() 1230 else if (ip_hdr(curr)->protocol == IPPROTO_UDP) vnet_handle_offloads() 1231 curr->csum_offset = offsetof(struct udphdr, check); vnet_handle_offloads() 1234 status = vnet_start_xmit(curr, dev); vnet_handle_offloads() 1236 dev_kfree_skb_any(curr); vnet_handle_offloads()
|
/linux-4.4.14/net/ipv6/ |
H A D | exthdrs.c | 99 const struct tlvtype_proc *curr; ip6_parse_tlv() local 145 for (curr = procs; curr->type >= 0; curr++) { ip6_parse_tlv() 146 if (curr->type == nh[off]) { ip6_parse_tlv() 150 if (curr->func(skb, off) == false) ip6_parse_tlv() 155 if (curr->type < 0) { ip6_parse_tlv()
|
/linux-4.4.14/drivers/staging/vt6655/ |
H A D | device_main.c | 547 dma_addr_t curr = priv->rd0_pool_dma; device_init_rd0_ring() local 552 i ++, curr += sizeof(struct vnt_rx_desc)) { device_init_rd0_ring() 560 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc)); device_init_rd0_ring() 571 dma_addr_t curr = priv->rd1_pool_dma; device_init_rd1_ring() local 576 i ++, curr += sizeof(struct vnt_rx_desc)) { device_init_rd1_ring() 584 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_rx_desc)); device_init_rd1_ring() 629 dma_addr_t curr; device_init_td0_ring() local 632 curr = priv->td0_pool_dma; device_init_td0_ring() 634 i++, curr += sizeof(struct vnt_tx_desc)) { device_init_td0_ring() 642 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc)); device_init_td0_ring() 653 dma_addr_t curr; device_init_td1_ring() local 657 curr = priv->td1_pool_dma; device_init_td1_ring() 659 i++, curr += sizeof(struct vnt_tx_desc)) { device_init_td1_ring() 667 desc->next_desc = cpu_to_le32(curr + sizeof(struct vnt_tx_desc)); device_init_td1_ring()
|
/linux-4.4.14/ipc/ |
H A D | sem.c | 219 struct sem *curr; unmerge_queues() local 220 curr = &sma->sem_base[q->sops[0].sem_num]; unmerge_queues() 222 list_add_tail(&q->list, &curr->pending_alter); unmerge_queues() 617 struct sem *curr; perform_atomic_semop() local 626 curr = sma->sem_base + sop->sem_num; perform_atomic_semop() 628 result = curr->semval; perform_atomic_semop() 647 curr->semval = result; perform_atomic_semop() 1272 struct sem *curr; semctl_setval() local 1321 curr = &sma->sem_base[semnum]; semctl_setval() 1327 curr->semval = val; semctl_setval() 1328 curr->sempid = task_tgid_vnr(current); semctl_setval() 1342 struct sem *curr; semctl_main() local 1472 curr = &sma->sem_base[semnum]; semctl_main() 1476 err = curr->semval; semctl_main() 1479 err = curr->sempid; semctl_main() 1923 struct sem *curr; SYSCALL_DEFINE4() local 1924 curr = &sma->sem_base[sops->sem_num]; SYSCALL_DEFINE4() 1933 &curr->pending_alter); SYSCALL_DEFINE4() 1936 list_add_tail(&queue.list, &curr->pending_const); SYSCALL_DEFINE4()
|
/linux-4.4.14/net/netfilter/ |
H A D | x_tables.c | 1220 struct list_head *head, *curr; member in struct:nf_mttg_trav 1244 trav->head = trav->curr = is_target ? xt_mttg_seq_next() 1248 trav->curr = trav->curr->next; xt_mttg_seq_next() 1249 if (trav->curr != trav->head) xt_mttg_seq_next() 1253 trav->head = trav->curr = is_target ? xt_mttg_seq_next() 1258 trav->curr = trav->curr->next; xt_mttg_seq_next() 1259 if (trav->curr != trav->head) xt_mttg_seq_next() 1316 if (trav->curr == trav->head) xt_match_seq_show() 1318 match = list_entry(trav->curr, struct xt_match, list); xt_match_seq_show() 1369 if (trav->curr == trav->head) xt_target_seq_show() 1371 target = list_entry(trav->curr, struct xt_target, list); xt_target_seq_show()
|
/linux-4.4.14/fs/ |
H A D | coredump.c | 420 struct core_thread *curr, *next; coredump_finish() local 431 while ((curr = next) != NULL) { coredump_finish() 432 next = curr->next; coredump_finish() 433 task = curr->task; coredump_finish() 435 * see exit_mm(), curr->task must not see coredump_finish() 439 curr->task = NULL; coredump_finish()
|
/linux-4.4.14/sound/pci/hda/ |
H A D | hda_proc.c | 617 int c, curr = -1; print_conn_list() local 625 curr = snd_hda_codec_read(codec, nid, 0, print_conn_list() 632 if (c == curr) print_conn_list() 699 int i, curr = -1; print_device_list() local 709 curr = snd_hda_codec_read(codec, nid, 0, print_device_list() 713 if (i == curr) print_device_list()
|
/linux-4.4.14/drivers/mmc/host/ |
H A D | sdhci-st.c | 234 unsigned long curr, value; st_mmcss_lock_dll() local 239 curr = jiffies; st_mmcss_lock_dll() 245 } while (!time_after_eq(curr, finish)); st_mmcss_lock_dll()
|
H A D | sdhci.c | 117 pr_debug(DRIVER_NAME ": Cmd: 0x%08x | Max curr: 0x%08x\n", sdhci_dumpregs() 3250 int curr = regulator_get_current_limit(mmc->supply.vmmc); sdhci_add_host() local 3251 if (curr > 0) { sdhci_add_host() 3254 curr = curr/1000; /* convert to mA */ sdhci_add_host() 3255 curr = curr/SDHCI_MAX_CURRENT_MULTIPLIER; sdhci_add_host() 3257 curr = min_t(u32, curr, SDHCI_MAX_CURRENT_LIMIT); sdhci_add_host() 3259 (curr << SDHCI_MAX_CURRENT_330_SHIFT) | sdhci_add_host() 3260 (curr << SDHCI_MAX_CURRENT_300_SHIFT) | sdhci_add_host() 3261 (curr << SDHCI_MAX_CURRENT_180_SHIFT); sdhci_add_host()
|
/linux-4.4.14/drivers/net/ethernet/mellanox/mlx4/ |
H A D | en_port.c | 137 __be64 *curr = start; en_stats_adder() local 143 ret += be64_to_cpu(*curr); en_stats_adder() 144 curr += offset; en_stats_adder()
|
/linux-4.4.14/drivers/atm/ |
H A D | zatm.c | 1263 unsigned long curr; zatm_start() local 1289 curr = rx*RX_SIZE/4; zatm_start() 1290 DPRINTK("RX pool 0x%08lx\n",curr); zatm_start() 1291 zpokel(zatm_dev,curr,uPD98401_PMA); /* receive pool */ zatm_start() 1292 zatm_dev->pool_base = curr; zatm_start() 1293 curr += pools*POOL_SIZE/4; zatm_start() 1294 DPRINTK("Shapers 0x%08lx\n",curr); zatm_start() 1295 zpokel(zatm_dev,curr,uPD98401_SMA); /* shapers */ zatm_start() 1296 curr += NR_SHAPERS*SHAPER_SIZE/4; zatm_start() 1297 DPRINTK("Free 0x%08lx\n",curr); zatm_start() 1298 zpokel(zatm_dev,curr,uPD98401_TOS); /* free pool */ zatm_start() 1301 (zatm_dev->mem-curr*4)/VC_SIZE); zatm_start()
|
H A D | eni.c | 658 struct atm_vcc *curr; poll_rx() local 661 while ((curr = eni_dev->fast)) { poll_rx() 663 if (rx_vcc(curr)) return; poll_rx() 664 eni_dev->fast = ENI_VCC(curr)->next; poll_rx() 665 ENI_VCC(curr)->next = ENI_VCC_NOS; poll_rx() 667 ENI_VCC(curr)->servicing--; poll_rx() 669 while ((curr = eni_dev->slow)) { poll_rx() 671 if (rx_vcc(curr)) return; poll_rx() 672 eni_dev->slow = ENI_VCC(curr)->next; poll_rx() 673 ENI_VCC(curr)->next = ENI_VCC_NOS; poll_rx() 675 ENI_VCC(curr)->servicing--; poll_rx() 1221 DPRINTK("dequeue_tx: next 0x%lx curr 0x%x\n",ENI_PRV_POS(skb), dequeue_tx()
|
/linux-4.4.14/drivers/leds/ |
H A D | leds-lp55xx-common.c | 99 unsigned long curr; lp55xx_store_current() local 101 if (kstrtoul(buf, 0, &curr)) lp55xx_store_current() 104 if (curr > led->max_current) lp55xx_store_current() 111 chip->cfg->set_led_current(led, (u8)curr); lp55xx_store_current()
|
/linux-4.4.14/drivers/i2c/busses/ |
H A D | i2c-ocores.c | 321 u32 curr, wr; oc_setreg_grlib() local 326 curr = ioread32be(i2c->base + (rreg << i2c->reg_shift)); oc_setreg_grlib() 328 wr = (curr & 0xff00) | value; oc_setreg_grlib() 330 wr = (((u32)value) << 8) | (curr & 0xff); oc_setreg_grlib()
|
/linux-4.4.14/drivers/cpufreq/ |
H A D | powernow-k8.c | 186 pr_err("fid trans failed, fid 0x%x, curr 0x%x\n", fid, write_new_fid() 228 pr_err("vid trans failed, vid 0x%x, curr 0x%x\n", write_new_vid() 272 pr_err("failed (cpu%d): req 0x%x 0x%x, curr 0x%x 0x%x\n", transition_fid_vid() 306 pr_debug("ph1: curr 0x%x, req vid 0x%x\n", core_voltage_pre_transition() 393 pr_err("ph2: mismatch, failed fid transition, curr 0x%x, req 0x%x\n", core_frequency_transition() 399 pr_err("ph2: vid changed, save 0x%x, curr 0x%x\n", core_frequency_transition() 426 pr_err("ph3: bad fid change, save 0x%x, curr 0x%x\n", core_voltage_post_transition() 432 pr_err("ph3: failed vid transition\n, req 0x%x, curr 0x%x", core_voltage_post_transition() 967 pr_debug("targ: curr fid 0x%x, vid 0x%x\n", powernowk8_target_fn()
|
/linux-4.4.14/arch/frv/include/asm/ |
H A D | processor.h | 44 struct task_struct *curr; /* [GR29] current pointer for this thread */ member in struct:thread_struct
|
/linux-4.4.14/arch/frv/kernel/ |
H A D | asm-offsets.c | 89 OFFSET(__THREAD_CURR, thread_struct, curr); foo()
|
H A D | process.c | 141 p->thread.curr = p; copy_thread()
|
/linux-4.4.14/drivers/scsi/aic7xxx/ |
H A D | aic79xx_osm.c | 845 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { ahd_linux_dev_reset() 1579 || (tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { ahd_linux_run_command() 1609 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ) != 0) ahd_linux_run_command() 1707 if (tinfo->curr.period != tinfo->goal.period ahd_send_async() 1708 || tinfo->curr.width != tinfo->goal.width ahd_send_async() 1709 || tinfo->curr.offset != tinfo->goal.offset ahd_send_async() 1710 || tinfo->curr.ppr_options != tinfo->goal.ppr_options) ahd_send_async() 1732 if (tinfo->curr.period == spi_period(starget) ahd_send_async() 1733 && tinfo->curr.width == spi_width(starget) ahd_send_async() 1734 && tinfo->curr.offset == spi_offset(starget) ahd_send_async() 1735 && tinfo->curr.ppr_options == target_ppr_options) ahd_send_async() 1739 spi_period(starget) = tinfo->curr.period; ahd_send_async() 1740 spi_width(starget) = tinfo->curr.width; ahd_send_async() 1741 spi_offset(starget) = tinfo->curr.offset; ahd_send_async() 1742 spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0; ahd_send_async() 1743 spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0; ahd_send_async() 1744 spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0; ahd_send_async() 1745 spi_rd_strm(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RD_STRM ? 1 : 0; ahd_send_async() 1746 spi_pcomp_en(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_PCOMP_EN ? 1 : 0; ahd_send_async() 1747 spi_rti(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_RTI ? 1 : 0; ahd_send_async() 1748 spi_wr_flow(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_WR_FLOW ? 1 : 0; ahd_send_async() 1749 spi_hold_mcs(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_HOLD_MCS ? 1 : 0; ahd_send_async()
|
H A D | aic7xxx_osm.c | 1487 hscb->scsioffset = tinfo->curr.offset; ahc_linux_run_command() 1631 if (tinfo->curr.period != tinfo->goal.period ahc_send_async() 1632 || tinfo->curr.width != tinfo->goal.width ahc_send_async() 1633 || tinfo->curr.offset != tinfo->goal.offset ahc_send_async() 1634 || tinfo->curr.ppr_options != tinfo->goal.ppr_options) ahc_send_async() 1655 if (tinfo->curr.period == spi_period(starget) ahc_send_async() 1656 && tinfo->curr.width == spi_width(starget) ahc_send_async() 1657 && tinfo->curr.offset == spi_offset(starget) ahc_send_async() 1658 && tinfo->curr.ppr_options == target_ppr_options) ahc_send_async() 1662 spi_period(starget) = tinfo->curr.period; ahc_send_async() 1663 spi_width(starget) = tinfo->curr.width; ahc_send_async() 1664 spi_offset(starget) = tinfo->curr.offset; ahc_send_async() 1665 spi_dt(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_DT_REQ ? 1 : 0; ahc_send_async() 1666 spi_qas(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_QAS_REQ ? 1 : 0; ahc_send_async() 1667 spi_iu(starget) = tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ ? 1 : 0; ahc_send_async()
|
H A D | aic7xxx_core.c | 1079 tinfo = &targ_info->curr; ahc_handle_seqint() 1884 tinfo->curr.transport_version = 2; ahc_handle_scsiint() 2170 memset(&tstate->transinfo[i].curr, 0, ahc_alloc_tstate() 2171 sizeof(tstate->transinfo[i].curr)); ahc_alloc_tstate() 2465 tinfo->curr.width = AHC_WIDTH_UNKNOWN; ahc_update_neg_request() 2466 tinfo->curr.period = AHC_PERIOD_UNKNOWN; ahc_update_neg_request() 2467 tinfo->curr.offset = AHC_OFFSET_UNKNOWN; ahc_update_neg_request() 2469 if (tinfo->curr.period != tinfo->goal.period ahc_update_neg_request() 2470 || tinfo->curr.width != tinfo->goal.width ahc_update_neg_request() 2471 || tinfo->curr.offset != tinfo->goal.offset ahc_update_neg_request() 2472 || tinfo->curr.ppr_options != tinfo->goal.ppr_options ahc_update_neg_request() 2485 * Update the user/goal/curr tables of synchronous negotiation 2528 old_period = tinfo->curr.period; ahc_set_syncrate() 2529 old_offset = tinfo->curr.offset; ahc_set_syncrate() 2530 old_ppr = tinfo->curr.ppr_options; ahc_set_syncrate() 2583 tinfo->curr.period = period; ahc_set_syncrate() 2584 tinfo->curr.offset = offset; ahc_set_syncrate() 2585 tinfo->curr.ppr_options = ppr_options; ahc_set_syncrate() 2612 * Update the user/goal/curr tables of wide negotiation 2640 oldwidth = tinfo->curr.width; ahc_set_width() 2655 tinfo->curr.width = width; ahc_set_width() 2720 pending_hscb->scsioffset = tinfo->curr.offset; ahc_update_pending_scbs() 2995 dowide = tinfo->curr.width != tinfo->goal.width; ahc_build_transfer_msg() 2996 dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; ahc_build_transfer_msg() 3042 : tinfo->curr.width, ahc_build_transfer_msg() 4041 tinfo->curr.transport_version = 2; ahc_handle_msg_reject() 4064 if (tinfo->goal.offset != tinfo->curr.offset) { ahc_handle_msg_reject() 5579 tinfo->curr.protocol_version = 2; ahc_init() 5580 tinfo->curr.transport_version = 2; ahc_init()
|
H A D | aic79xx_proc.c | 177 ahd_format_transinfo(m, &tinfo->curr); ahd_dump_target_state()
|
H A D | aic7xxx_proc.c | 157 ahc_format_transinfo(m, &tinfo->curr); ahc_dump_target_state()
|
H A D | aic79xx_core.c | 1952 tinfo = &targ_info->curr; ahd_handle_seqint() 3155 if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) { ahd_handle_nonpkt_busfree() 3171 tinfo->curr.transport_version = 2; ahd_handle_nonpkt_busfree() 3654 memset(&tstate->transinfo[i].curr, 0, ahd_alloc_tstate() 3655 sizeof(tstate->transinfo[i].curr)); ahd_alloc_tstate() 3861 tinfo->curr.width = AHD_WIDTH_UNKNOWN; ahd_update_neg_request() 3862 tinfo->curr.period = AHD_PERIOD_UNKNOWN; ahd_update_neg_request() 3863 tinfo->curr.offset = AHD_OFFSET_UNKNOWN; ahd_update_neg_request() 3865 if (tinfo->curr.period != tinfo->goal.period ahd_update_neg_request() 3866 || tinfo->curr.width != tinfo->goal.width ahd_update_neg_request() 3867 || tinfo->curr.offset != tinfo->goal.offset ahd_update_neg_request() 3868 || tinfo->curr.ppr_options != tinfo->goal.ppr_options ahd_update_neg_request() 3881 * Update the user/goal/curr tables of synchronous negotiation 3924 old_period = tinfo->curr.period; ahd_set_syncrate() 3925 old_offset = tinfo->curr.offset; ahd_set_syncrate() 3926 old_ppr = tinfo->curr.ppr_options; ahd_set_syncrate() 3935 tinfo->curr.period = period; ahd_set_syncrate() 3936 tinfo->curr.offset = offset; ahd_set_syncrate() 3937 tinfo->curr.ppr_options = ppr_options; ahd_set_syncrate() 3994 ahd_update_neg_table(ahd, devinfo, &tinfo->curr); ahd_set_syncrate() 4027 * Update the user/goal/curr tables of wide negotiation 4055 oldwidth = tinfo->curr.width; ahd_set_width() 4060 tinfo->curr.width = width; ahd_set_width() 4073 ahd_update_neg_table(ahd, devinfo, &tinfo->curr); ahd_set_width() 4554 dowide = tinfo->curr.width != tinfo->goal.width; ahd_build_transfer_msg() 4555 dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; ahd_build_transfer_msg() 4600 : tinfo->curr.width, ahd_build_transfer_msg() 5179 tinfo->curr.width, devinfo->role); ahd_parse_msg() 5573 tinfo->curr.transport_version = 2; ahd_handle_msg_reject() 5597 if (tinfo->goal.offset != tinfo->curr.offset) { ahd_handle_msg_reject() 7478 ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); ahd_chip_init() 7691 tinfo->curr.protocol_version = 2; ahd_default_config() 7692 tinfo->curr.transport_version = 2; ahd_default_config() 7805 tinfo->curr.protocol_version = 2; ahd_parse_cfgdata() 7806 tinfo->curr.transport_version = 2; ahd_parse_cfgdata() 9054 tinfo = &targ_info->curr; ahd_handle_scsi_status()
|
/linux-4.4.14/arch/s390/kernel/ |
H A D | perf_cpum_sf.c | 118 unsigned long *sdbt, *curr; free_sampling_buffer() local 124 curr = sdbt; free_sampling_buffer() 128 if (!*curr || !sdbt) free_sampling_buffer() 132 if (is_link_entry(curr)) { free_sampling_buffer() 133 curr = get_next_sdbt(curr); free_sampling_buffer() 138 if (curr == sfb->sdbt) free_sampling_buffer() 141 sdbt = curr; free_sampling_buffer() 144 if (*curr) { free_sampling_buffer() 145 free_page(*curr); free_sampling_buffer() 146 curr++; free_sampling_buffer()
|
/linux-4.4.14/drivers/net/can/c_can/ |
H A D | c_can.c | 1005 u16 curr, last = priv->last_status; c_can_poll() local 1008 priv->last_status = curr = priv->read_reg(priv, C_CAN_STS_REG); c_can_poll() 1014 if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { c_can_poll() 1019 if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) { c_can_poll() 1024 if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) { c_can_poll() 1031 if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) { c_can_poll() 1035 if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) { c_can_poll() 1041 work_done += c_can_handle_bus_err(dev, curr & LEC_MASK); c_can_poll()
|
/linux-4.4.14/drivers/media/platform/omap3isp/ |
H A D | ispstat.c | 193 struct ispstat_buffer *curr = &stat->buf[i]; __isp_stat_buf_find() local 199 if (curr == stat->locked_buf || curr == stat->active_buf) __isp_stat_buf_find() 203 if (!look_empty && curr->empty) __isp_stat_buf_find() 207 if (curr->empty) { __isp_stat_buf_find() 208 found = curr; __isp_stat_buf_find() 214 (s32)curr->frame_number - (s32)found->frame_number < 0) __isp_stat_buf_find() 215 found = curr; __isp_stat_buf_find()
|
/linux-4.4.14/drivers/media/platform/s5p-jpeg/ |
H A D | jpeg-core.h | 245 * @curr: current position in the buffer 250 unsigned long curr; member in struct:s5p_jpeg_buffer
|
H A D | jpeg-core.c | 799 jpeg_buffer.curr = 0; exynos4_jpeg_parse_decode_h_tbl() 807 jpeg_buffer.curr = 0; exynos4_jpeg_parse_decode_h_tbl() 837 jpeg_buffer.curr = 0; exynos4_jpeg_parse_huff_tbl() 840 while (jpeg_buffer.curr < jpeg_buffer.size) { exynos4_jpeg_parse_huff_tbl() 894 jpeg_buffer.curr = 0; exynos4_jpeg_parse_decode_q_tbl() 927 jpeg_buffer.curr = 0; exynos4_jpeg_parse_q_tbl() 930 while (jpeg_buffer.size - jpeg_buffer.curr >= 65) { exynos4_jpeg_parse_q_tbl() 1065 if (buf->curr >= buf->size) get_byte() 1068 return ((unsigned char *)buf->data)[buf->curr++]; get_byte() 1110 jpeg_buffer.curr = 0; s5p_jpeg_parse_hdr() 1135 sof = jpeg_buffer.curr; /* after 0xffc0 */ s5p_jpeg_parse_hdr() 1168 dqt[n_dqt] = jpeg_buffer.curr; /* after 0xffdb */ s5p_jpeg_parse_hdr() 1181 dht[n_dht] = jpeg_buffer.curr; /* after 0xffc4 */ s5p_jpeg_parse_hdr() 1187 sos = jpeg_buffer.curr - 2; /* 0xffda */ s5p_jpeg_parse_hdr()
|
/linux-4.4.14/drivers/net/fddi/skfp/h/ |
H A D | fplustm.h | 101 HW_PTR tx_bmu_dsc ; /* BMU addr for curr dsc. */ 114 HW_PTR rx_bmu_dsc ; /* BMU addr for curr dsc. */
|
/linux-4.4.14/drivers/gpu/drm/i915/ |
H A D | i915_cmd_parser.c | 571 u32 curr = desc->cmd.value & desc->cmd.mask; validate_cmds_sorted() local 573 if (curr < previous) { validate_cmds_sorted() 575 ring->id, i, j, curr, previous); validate_cmds_sorted() 579 previous = curr; validate_cmds_sorted() 595 u32 curr = reg_table[i].addr; check_sorted() local 597 if (curr < previous) { check_sorted() 599 ring_id, i, curr, previous); check_sorted() 603 previous = curr; check_sorted()
|
/linux-4.4.14/arch/arc/include/asm/ |
H A D | entry.h | 240 * 1. Determine curr CPU id. 250 * 1. Determine curr CPU id.
|
/linux-4.4.14/drivers/char/ipmi/ |
H A D | ipmi_si_intf.c | 1746 static int parse_str(struct hotmod_vals *v, int *val, char *name, char **curr) parse_str() argument 1751 s = strchr(*curr, ','); parse_str() 1759 if (strcmp(*curr, v[i].name) == 0) { parse_str() 1761 *curr = s; parse_str() 1766 printk(KERN_WARNING PFX "Invalid hotmod %s '%s'\n", name, *curr); parse_str() 1770 static int check_hotmod_int_op(const char *curr, const char *option, check_hotmod_int_op() argument 1775 if (strcmp(curr, name) == 0) { check_hotmod_int_op() 1779 curr); check_hotmod_int_op() 1786 curr); check_hotmod_int_op() 1807 char *next, *curr, *s, *n, *o; hotmod_handler() local 1832 for (curr = str; curr; curr = next) { hotmod_handler() 1839 next = strchr(curr, ':'); hotmod_handler() 1845 rv = parse_str(hotmod_ops, &ival, "operation", &curr); hotmod_handler() 1850 rv = parse_str(hotmod_si, &ival, "interface type", &curr); hotmod_handler() 1855 rv = parse_str(hotmod_as, &addr_space, "address space", &curr); hotmod_handler() 1859 s = strchr(curr, ','); hotmod_handler() 1864 addr = simple_strtoul(curr, &n, 0); hotmod_handler() 1865 if ((*n != '\0') || (*curr == '\0')) { hotmod_handler() 1867 " '%s'\n", curr); hotmod_handler() 1872 curr = s; hotmod_handler() 1873 s = strchr(curr, ','); hotmod_handler() 1878 o = strchr(curr, '='); hotmod_handler() 1883 rv = check_hotmod_int_op(curr, o, "rsp", ®spacing); hotmod_handler() 1888 rv = check_hotmod_int_op(curr, o, "rsi", ®size); hotmod_handler() 1893 rv = check_hotmod_int_op(curr, o, "rsh", ®shift); hotmod_handler() 1898 rv = check_hotmod_int_op(curr, o, "irq", &irq); hotmod_handler() 1903 rv = check_hotmod_int_op(curr, o, "ipmb", &ipmb); hotmod_handler() 1912 curr); hotmod_handler()
|
/linux-4.4.14/drivers/media/pci/bt8xx/ |
H A D | bttv-risc.c | 469 if (NULL != btv->curr.top) btv->cap_ctl |= 0x02; bttv_set_dma() 470 if (NULL != btv->curr.bottom) btv->cap_ctl |= 0x01; bttv_set_dma() 481 btv->curr.top ? (unsigned long long)btv->curr.top->top.dma : 0, bttv_set_dma() 483 btv->curr.bottom ? (unsigned long long)btv->curr.bottom->bottom.dma : 0); bttv_set_dma() 491 if (btv->curr.frame_irq || btv->loop_irq || btv->cvbi) { bttv_set_dma()
|
H A D | bttv-driver.c | 1199 if (btv->curr.frame_irq) { set_input() 1692 if (!btv->curr.frame_irq) { buffer_queue() 3445 btv->curr.top bttv_print_riscaddr() 3446 ? (unsigned long long)btv->curr.top->top.dma : 0, bttv_print_riscaddr() 3447 btv->curr.bottom bttv_print_riscaddr() 3448 ? (unsigned long long)btv->curr.bottom->bottom.dma : 0); bttv_print_riscaddr() 3600 struct bttv_buffer_set *curr, unsigned int state) bttv_irq_wakeup_video() 3607 if (NULL != wakeup->top && curr->top != wakeup->top) { bttv_irq_wakeup_video() 3617 if (NULL != wakeup->top && curr->top != wakeup->top) { bttv_irq_wakeup_video() 3626 if (NULL != wakeup->bottom && curr->bottom != wakeup->bottom) { bttv_irq_wakeup_video() 3671 old = btv->curr; bttv_irq_timeout() 3673 btv->curr = new; bttv_irq_timeout() 3705 struct bttv_buffer *wakeup = btv->curr.top; bttv_irq_wakeup_top() 3711 btv->curr.top_irq = 0; bttv_irq_wakeup_top() 3712 btv->curr.top = NULL; bttv_irq_wakeup_top() 3743 if ((btv->curr.top && is_active(&btv->curr.top->top, rc)) || bttv_irq_switch_video() 3744 (btv->curr.bottom && is_active(&btv->curr.bottom->bottom, rc))) { bttv_irq_switch_video() 3753 old = btv->curr; bttv_irq_switch_video() 3754 btv->curr = new; bttv_irq_switch_video() 4319 btv->state.video = btv->curr; bttv_suspend() 4322 btv->curr = idle; bttv_suspend() 4378 btv->curr = btv->state.video; bttv_resume() 4381 bttv_buffer_activate_video(btv, &btv->curr); bttv_resume() 3599 bttv_irq_wakeup_video(struct bttv *btv, struct bttv_buffer_set *wakeup, struct bttv_buffer_set *curr, unsigned int state) bttv_irq_wakeup_video() argument
|
/linux-4.4.14/drivers/net/wireless/ti/wl1251/ |
H A D | rx.c | 161 wl1251_warning("curr ID:%d, last ID inc:%d", wl1251_rx_body()
|
/linux-4.4.14/kernel/trace/ |
H A D | trace_sched_wakeup.c | 401 struct task_struct *curr, tracing_sched_wakeup_trace() 414 entry->prev_pid = curr->pid; tracing_sched_wakeup_trace() 415 entry->prev_prio = curr->prio; tracing_sched_wakeup_trace() 416 entry->prev_state = curr->state; tracing_sched_wakeup_trace() 399 tracing_sched_wakeup_trace(struct trace_array *tr, struct task_struct *wakee, struct task_struct *curr, unsigned long flags, int pc) tracing_sched_wakeup_trace() argument
|
H A D | trace_functions_graph.c | 571 struct ftrace_graph_ent_entry *curr) get_return_for_leaf() 583 curr = &data->ent; get_return_for_leaf() 613 data->ent = *curr; get_return_for_leaf() 629 if (curr->ent.pid != next->ent.pid || get_return_for_leaf() 630 curr->graph_ent.func != next->ret.func) get_return_for_leaf() 570 get_return_for_leaf(struct trace_iterator *iter, struct ftrace_graph_ent_entry *curr) get_return_for_leaf() argument
|
/linux-4.4.14/include/scsi/ |
H A D | libiscsi_tcp.h | 91 ISCSI_TCP_SEGMENT_DONE, /* curr seg has been processed */
|
/linux-4.4.14/drivers/usb/isp1760/ |
H A D | isp1760-hcd.c | 305 int i, curr; init_memory() local 316 curr = i; init_memory() 318 priv->memory_pool[curr + i].start = payload_addr; init_memory() 319 priv->memory_pool[curr + i].size = BLOCK_2_SIZE; init_memory() 320 priv->memory_pool[curr + i].free = 1; init_memory() 321 payload_addr += priv->memory_pool[curr + i].size; init_memory() 324 curr = i; init_memory() 326 priv->memory_pool[curr + i].start = payload_addr; init_memory() 327 priv->memory_pool[curr + i].size = BLOCK_3_SIZE; init_memory() 328 priv->memory_pool[curr + i].free = 1; init_memory() 329 payload_addr += priv->memory_pool[curr + i].size; init_memory()
|
/linux-4.4.14/drivers/usb/gadget/udc/ |
H A D | goku_udc.c | 638 u32 curr, master; abort_dma() local 659 curr = readl(®s->in_dma_current); abort_dma() 661 writel(curr, ®s->in_dma_end); abort_dma() 662 writel(curr, ®s->in_dma_start); abort_dma() 674 curr = readl(®s->out_dma_current); abort_dma() 676 writel(curr, ®s->out_dma_end); abort_dma() 677 writel(curr, ®s->out_dma_start); abort_dma() 686 req->req.actual = (curr - req->req.dma) + 1; abort_dma()
|
/linux-4.4.14/drivers/video/backlight/ |
H A D | as3711_bl.c | 313 if (of_find_property(bl, "su2-feedback-curr-auto", NULL)) { as3711_backlight_parse_dt() 355 * At least one su2-auto-curr* must be specified iff as3711_backlight_parse_dt()
|
/linux-4.4.14/net/batman-adv/ |
H A D | bridge_loop_avoidance.c | 1304 int i, curr, ret = 0; batadv_bla_check_bcast_duplist() local 1317 curr = (bat_priv->bla.bcast_duplist_curr + i); batadv_bla_check_bcast_duplist() 1318 curr %= BATADV_DUPLIST_SIZE; batadv_bla_check_bcast_duplist() 1319 entry = &bat_priv->bla.bcast_duplist[curr]; batadv_bla_check_bcast_duplist() 1343 curr = (bat_priv->bla.bcast_duplist_curr + BATADV_DUPLIST_SIZE - 1); batadv_bla_check_bcast_duplist() 1344 curr %= BATADV_DUPLIST_SIZE; batadv_bla_check_bcast_duplist() 1345 entry = &bat_priv->bla.bcast_duplist[curr]; batadv_bla_check_bcast_duplist() 1349 bat_priv->bla.bcast_duplist_curr = curr; batadv_bla_check_bcast_duplist()
|
H A D | main.c | 504 int (*curr)(struct sk_buff *, batadv_recv_handler_register() 506 curr = batadv_rx_handler[packet_type]; batadv_recv_handler_register() 508 if ((curr != batadv_recv_unhandled_packet) && batadv_recv_handler_register() 509 (curr != batadv_recv_unhandled_unicast_packet)) batadv_recv_handler_register()
|
/linux-4.4.14/drivers/tty/serial/ |
H A D | bfin_uart.c | 250 struct timeval curr; bfin_serial_rx_chars() local 256 do_gettimeofday(&curr); bfin_serial_rx_chars() 257 if (curr.tv_sec - anomaly_start.tv_sec > 1) bfin_serial_rx_chars() 261 if (curr.tv_sec != anomaly_start.tv_sec) bfin_serial_rx_chars() 263 usecs += curr.tv_usec - anomaly_start.tv_usec; bfin_serial_rx_chars() 271 anomaly_start = curr; bfin_serial_rx_chars()
|
/linux-4.4.14/drivers/scsi/ |
H A D | xen-scsifront.c | 126 struct task_struct *curr; member in struct:vscsifrnt_info 680 if (info && current == info->curr) scsifront_sdev_configure() 691 if (info && current == info->curr) scsifront_sdev_destroy() 995 BUG_ON(info->curr); scsifront_do_lun_hotplug() 996 info->curr = current; scsifront_do_lun_hotplug() 1054 info->curr = NULL; scsifront_do_lun_hotplug()
|
/linux-4.4.14/include/uapi/linux/ |
H A D | cdrom.h | 761 __u8 curr : 1; member in struct:mrw_feature_desc 763 __u8 curr : 1; member in struct:mrw_feature_desc 788 __u8 curr : 1; member in struct:rwrt_feature_desc 790 __u8 curr : 1; member in struct:rwrt_feature_desc 918 __u8 curr:1; member in struct:rm_feature_desc 920 __u8 curr:1; member in struct:rm_feature_desc
|
/linux-4.4.14/net/netfilter/ipset/ |
H A D | ip_set_hash_gen.h | 47 tune_ahash_max(u8 curr, u32 multi) tune_ahash_max() argument 51 if (multi < curr) tune_ahash_max() 52 return curr; tune_ahash_max() 54 n = curr + AHASH_INIT_SIZE; tune_ahash_max() 58 return n > curr && n <= AHASH_MAX_TUNED ? n : curr; tune_ahash_max()
|
/linux-4.4.14/sound/isa/gus/ |
H A D | gus_pcm.c | 114 unsigned int curr, begin, end; snd_gf1_pcm_trigger_up() local 138 curr = begin + (pcmp->bpos * pcmp->block_size) / runtime->channels; snd_gf1_pcm_trigger_up() 139 end = curr + (pcmp->block_size / runtime->channels); snd_gf1_pcm_trigger_up() 142 snd_printk(KERN_DEBUG "init: curr=0x%x, begin=0x%x, end=0x%x, " snd_gf1_pcm_trigger_up() 144 curr, begin, end, voice_ctrl, ramp_ctrl, rate); snd_gf1_pcm_trigger_up() 154 snd_gf1_write_addr(gus, SNDRV_GF1_VA_CURRENT, curr << 4, voice_ctrl & 4); snd_gf1_pcm_trigger_up()
|
/linux-4.4.14/kernel/events/ |
H A D | uprobes.c | 715 struct map_info *curr = NULL; build_map_info() local 746 info->next = curr; build_map_info() 747 curr = info; build_map_info() 757 prev = curr; build_map_info() 758 while (curr) { build_map_info() 759 mmput(curr->mm); build_map_info() 760 curr = curr->next; build_map_info() 766 curr = ERR_PTR(-ENOMEM); build_map_info() 777 return curr; build_map_info()
|
/linux-4.4.14/drivers/video/fbdev/matrox/ |
H A D | matroxfb_base.c | 324 pos = (minfo->fbcon.var.yoffset * minfo->fbcon.var.xres_virtual + minfo->fbcon.var.xoffset) * minfo->curr.final_bppShift / 32; matrox_pan_var() 325 pos += minfo->curr.ydstorg.chunks; matrox_pan_var() 657 if (regno >= minfo->curr.cmap_len) matroxfb_setcolreg() 725 fix->smem_start = minfo->video.base + minfo->curr.ydstorg.bytes; matroxfb_update_fix() 726 fix->smem_len = minfo->video.len_usable - minfo->curr.ydstorg.bytes; matroxfb_update_fix() 773 minfo->curr.cmap_len = cmap_len; 775 minfo->curr.ydstorg.bytes = ydstorg; 776 minfo->curr.ydstorg.chunks = ydstorg >> (isInterleave(minfo) ? 3 : 2); 778 minfo->curr.ydstorg.pixels = ydstorg; 780 minfo->curr.ydstorg.pixels = (ydstorg * 8) / var->bits_per_pixel; 781 minfo->curr.final_bppShift = matroxfb_get_final_bppShift(minfo, var->bits_per_pixel); 810 pos = (var->yoffset * var->xres_virtual + var->xoffset) * minfo->curr.final_bppShift / 32; 811 pos += minfo->curr.ydstorg.chunks;
|
H A D | matroxfb_misc.c | 246 divider = minfo->curr.final_bppShift; matroxfb_vgaHWinit() 276 wd = minfo->fbcon.var.xres_virtual * minfo->curr.final_bppShift / 64; matroxfb_vgaHWinit()
|
/linux-4.4.14/drivers/staging/unisys/visornic/ |
H A D | visornic_main.c | 1160 struct sk_buff *skb, *prev, *curr; visornic_rx() local 1258 curr = (struct sk_buff *)cmdrsp->net.rcv.rcvbuf[cc]; visornic_rx() 1259 curr->next = NULL; visornic_rx() 1261 skb_shinfo(skb)->frag_list = curr; visornic_rx() 1263 prev->next = curr; visornic_rx() 1264 prev = curr; visornic_rx() 1271 curr->len = currsize; visornic_rx() 1272 curr->tail += currsize; visornic_rx() 1273 curr->data_len = 0; visornic_rx()
|
/linux-4.4.14/drivers/net/wan/ |
H A D | cosa.c | 1524 int i=0, id=0, prev=0, curr=0; cosa_reset_and_read_id() 1542 for (i=0; i<COSA_MAX_ID_STRING-1; i++, prev=curr) { cosa_reset_and_read_id() 1543 if ((curr = get_wait_data(cosa)) == -1) { cosa_reset_and_read_id() 1546 curr &= 0xff; cosa_reset_and_read_id() 1547 if (curr != '\r' && curr != '\n' && curr != 0x2e) cosa_reset_and_read_id() 1548 idstring[id++] = curr; cosa_reset_and_read_id() 1549 if (curr == 0x2e && prev == '\n') cosa_reset_and_read_id() 1520 int i=0, id=0, prev=0, curr=0; cosa_reset_and_read_id() local
|
/linux-4.4.14/arch/x86/kernel/cpu/ |
H A D | common.c | 1430 struct task_struct *curr = current; cpu_init() local 1432 struct thread_struct *thread = &curr->thread; cpu_init() 1456 curr->active_mm = &init_mm; cpu_init() 1457 BUG_ON(curr->mm); cpu_init() 1458 enter_lazy_tlb(&init_mm, curr); cpu_init()
|
/linux-4.4.14/fs/afs/ |
H A D | dir.c | 236 unsigned offset, next, curr; afs_dir_iterate_block() local 242 curr = (ctx->pos - blkoff) / sizeof(union afs_dirent); afs_dir_iterate_block() 256 if (offset >= curr) afs_dir_iterate_block() 270 (offset < curr ? "skip" : "fill"), afs_dir_iterate_block() 299 if (offset < curr) afs_dir_iterate_block()
|
/linux-4.4.14/drivers/hv/ |
H A D | channel.c | 379 struct list_head *curr; vmbus_establish_gpadl() local 411 list_for_each(curr, &msginfo->submsglist) { vmbus_establish_gpadl() 413 submsginfo = (struct vmbus_channel_msginfo *)curr; vmbus_establish_gpadl()
|
/linux-4.4.14/fs/ocfs2/ |
H A D | suballoc.c | 422 u16 curr, best; ocfs2_find_smallest_chain() local 424 best = curr = 0; ocfs2_find_smallest_chain() 425 while (curr < le16_to_cpu(cl->cl_count)) { ocfs2_find_smallest_chain() 427 le32_to_cpu(cl->cl_recs[curr].c_total)) ocfs2_find_smallest_chain() 428 best = curr; ocfs2_find_smallest_chain() 429 curr++; ocfs2_find_smallest_chain() 1375 u16 curr, best; ocfs2_find_victim_chain() local 1379 best = curr = 0; ocfs2_find_victim_chain() 1380 while (curr < le16_to_cpu(cl->cl_next_free_rec)) { ocfs2_find_victim_chain() 1381 if (le32_to_cpu(cl->cl_recs[curr].c_free) > ocfs2_find_victim_chain() 1383 best = curr; ocfs2_find_victim_chain() 1384 curr++; ocfs2_find_victim_chain()
|
/linux-4.4.14/drivers/hwmon/pmbus/ |
H A D | pmbus_core.c | 995 struct pmbus_sensor *curr; pmbus_add_limit_attrs() local 999 curr = pmbus_add_sensor(data, name, l->attr, index, pmbus_add_limit_attrs() 1003 if (!curr) pmbus_add_limit_attrs() 1008 attr->compare ? l->low ? curr : base pmbus_add_limit_attrs() 1010 attr->compare ? l->low ? base : curr pmbus_add_limit_attrs() 1669 ret = pmbus_add_sensor_attrs(client, data, "curr", current_attributes, pmbus_find_attributes()
|
/linux-4.4.14/drivers/net/ethernet/marvell/ |
H A D | pxa168_eth.c | 1078 int curr; rxq_deinit() local 1081 for (curr = 0; pep->rx_desc_count && curr < pep->rx_ring_size; curr++) { rxq_deinit() 1082 if (pep->rx_skb[curr]) { rxq_deinit() 1083 dev_kfree_skb(pep->rx_skb[curr]); rxq_deinit()
|
/linux-4.4.14/drivers/staging/android/ |
H A D | sync.c | 315 int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode, sync_fence_wake_up_wq() argument 320 wait = container_of(curr, struct sync_fence_waiter, work); sync_fence_wake_up_wq()
|
H A D | sync.h | 353 int sync_fence_wake_up_wq(wait_queue_t *curr, unsigned mode,
|
/linux-4.4.14/arch/powerpc/kernel/ |
H A D | vio.c | 87 * @curr: bytes currently allocated 100 size_t curr; member in struct:vio_cmo 165 vio_cmo.curr += size; vio_cmo_alloc() 166 if (vio_cmo.curr > vio_cmo.high) vio_cmo_alloc() 167 vio_cmo.high = vio_cmo.curr; vio_cmo_alloc() 200 vio_cmo.curr -= size; vio_cmo_dealloc() 1030 viobus_cmo_rd_attr(curr); variable 1046 vio_cmo.high = vio_cmo.curr; cmo_high_store()
|
/linux-4.4.14/drivers/input/mouse/ |
H A D | hgpk.c | 109 static int approx_half(int curr, int prev) approx_half() argument 113 if (curr < 5 || prev < 5) approx_half() 119 return belowhalf < curr && curr <= abovehalf; approx_half()
|
/linux-4.4.14/drivers/message/fusion/ |
H A D | mptlan.c | 1158 u32 curr, buckets, count, max; mpt_lan_post_receive_buckets() local 1163 curr = atomic_read(&priv->buckets_out); mpt_lan_post_receive_buckets() 1164 buckets = (priv->max_buckets_out - curr); mpt_lan_post_receive_buckets() 1168 __func__, buckets, curr)); mpt_lan_post_receive_buckets()
|
/linux-4.4.14/include/media/ |
H A D | saa7146_vv.h | 74 struct saa7146_buf *curr; member in struct:saa7146_dmaqueue
|
/linux-4.4.14/include/sound/ |
H A D | info.h | 32 unsigned int curr; /* current position in buffer */ member in struct:snd_info_buffer
|
/linux-4.4.14/include/uapi/linux/can/ |
H A D | gw.h | 93 /* CAN frame elements that are affected by curr. 3 CAN frame modifications */
|
/linux-4.4.14/drivers/staging/unisys/visorbus/ |
H A D | visorchipset.c | 99 u8 *curr; member in struct:parser_context 390 ctx->curr = NULL; parser_init_byte_stream() 468 ctx->curr = ctx->data + phdr->initiator_offset; parser_param_start() 472 ctx->curr = ctx->data + phdr->target_offset; parser_param_start() 476 ctx->curr = ctx->data + phdr->connection_offset; parser_param_start() 480 ctx->curr = ctx->data + phdr->name_offset; parser_param_start() 510 pscan = ctx->curr; parser_string_get()
|
/linux-4.4.14/drivers/staging/lustre/lustre/obdclass/ |
H A D | cl_io.c | 289 struct cl_io_lock_link *curr; cl_io_locks_sort() local 296 list_for_each_entry_safe(curr, temp, cl_io_locks_sort() 301 &curr->cill_descr)) { cl_io_locks_sort() 311 list_move_tail(&curr->cill_linkage, cl_io_locks_sort() 320 prev = curr; cl_io_locks_sort()
|
/linux-4.4.14/drivers/net/wireless/brcm80211/brcmsmac/ |
H A D | dma.c | 871 uint i, curr; dma64_getnextrxp() local 881 curr = dma64_getnextrxp() 886 /* ignore curr if forceall */ dma64_getnextrxp() 887 if (!forceall && (i == curr)) dma64_getnextrxp() 1536 brcms_dbg_dma(di->core, "bogus curr: start %d end %d txout %d\n", dma_getnexttxp()
|
/linux-4.4.14/net/packet/ |
H A D | af_packet.c | 1023 static void prb_fill_curr_block(char *curr, prb_fill_curr_block() argument 1030 ppd = (struct tpacket3_hdr *)curr; prb_fill_curr_block() 1032 pkc->prev = curr; prb_fill_curr_block() 1049 char *curr, *end; __packet_lookup_frame_in_block() local 1075 curr = pkc->nxt_offset; __packet_lookup_frame_in_block() 1080 if (curr+TOTAL_PKT_LEN_INCL_ALIGN(len) < end) { __packet_lookup_frame_in_block() 1081 prb_fill_curr_block(curr, pkc, pbd, len); __packet_lookup_frame_in_block() 1082 return (void *)curr; __packet_lookup_frame_in_block() 1089 curr = (char *)prb_dispatch_next_block(pkc, po); __packet_lookup_frame_in_block() 1090 if (curr) { __packet_lookup_frame_in_block() 1092 prb_fill_curr_block(curr, pkc, pbd, len); __packet_lookup_frame_in_block() 1093 return (void *)curr; __packet_lookup_frame_in_block() 1107 char *curr = NULL; packet_current_rx_frame() local 1111 curr = packet_lookup_frame(po, &po->rx_ring, packet_current_rx_frame() 1113 return curr; packet_current_rx_frame()
|
/linux-4.4.14/net/sctp/ |
H A D | associola.c | 1243 static struct sctp_transport *sctp_trans_elect_best(struct sctp_transport *curr, sctp_trans_elect_best() argument 1248 if (best == NULL || curr == best) sctp_trans_elect_best() 1249 return curr; sctp_trans_elect_best() 1251 score_curr = sctp_trans_score(curr); sctp_trans_elect_best() 1259 return curr; sctp_trans_elect_best() 1261 return sctp_trans_elect_tie(curr, best); sctp_trans_elect_best()
|
/linux-4.4.14/sound/core/ |
H A D | info.c | 635 c = buffer->buffer[buffer->curr++]; snd_info_get_line() 636 if (buffer->curr >= buffer->size) snd_info_get_line()
|
/linux-4.4.14/drivers/video/fbdev/ |
H A D | pm2fb.c | 255 s32 curr; pm2_mnp() local 264 curr = (clk > f) ? clk - f : f - clk; pm2_mnp() 265 if (curr < delta) { pm2_mnp() 266 delta = curr; pm2_mnp()
|
/linux-4.4.14/drivers/infiniband/core/ |
H A D | sa_query.c | 620 const struct nlattr *head, *curr; ib_nl_process_good_resolve_rsp() local 641 nla_for_each_attr(curr, head, len, rem) { nla_for_each_attr() 642 if (curr->nla_type == LS_NLA_TYPE_PATH_RECORD) { nla_for_each_attr() 643 rec = nla_data(curr); nla_for_each_attr()
|