Lines Matching refs:curr

573 static void lockdep_print_held_locks(struct task_struct *curr)  in lockdep_print_held_locks()  argument
575 int i, depth = curr->lockdep_depth; in lockdep_print_held_locks()
578 printk("no locks held by %s/%d.\n", curr->comm, task_pid_nr(curr)); in lockdep_print_held_locks()
582 depth, depth > 1 ? "s" : "", curr->comm, task_pid_nr(curr)); in lockdep_print_held_locks()
586 print_lock(curr->held_locks + i); in lockdep_print_held_locks()
1163 struct task_struct *curr = current; in print_circular_bug_header() local
1174 curr->comm, task_pid_nr(curr)); in print_circular_bug_header()
1196 struct task_struct *curr = current; in print_circular_bug() local
1223 lockdep_print_held_locks(curr); in print_circular_bug()
1492 print_bad_irq_dependency(struct task_struct *curr, in print_bad_irq_dependency() argument
1513 curr->comm, task_pid_nr(curr), in print_bad_irq_dependency()
1514 curr->hardirq_context, hardirq_count() >> HARDIRQ_SHIFT, in print_bad_irq_dependency()
1515 curr->softirq_context, softirq_count() >> SOFTIRQ_SHIFT, in print_bad_irq_dependency()
1516 curr->hardirqs_enabled, in print_bad_irq_dependency()
1517 curr->softirqs_enabled); in print_bad_irq_dependency()
1546 lockdep_print_held_locks(curr); in print_bad_irq_dependency()
1567 check_usage(struct task_struct *curr, struct held_lock *prev, in check_usage() argument
1593 return print_bad_irq_dependency(curr, &this, &that, in check_usage()
1640 static int check_irq_usage(struct task_struct *curr, struct held_lock *prev, in check_irq_usage() argument
1649 if (!check_usage(curr, prev, next, bit, in check_irq_usage()
1661 if (!check_usage(curr, prev, next, bit, in check_irq_usage()
1669 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, in check_prev_add_irq() argument
1673 if (!check_irq_usage(curr, prev, next, LOCK_USED_IN_##__STATE)) \ in check_prev_add_irq()
1696 check_prev_add_irq(struct task_struct *curr, struct held_lock *prev, in check_prev_add_irq() argument
1730 print_deadlock_bug(struct task_struct *curr, struct held_lock *prev, in print_deadlock_bug() argument
1742 curr->comm, task_pid_nr(curr)); in print_deadlock_bug()
1749 lockdep_print_held_locks(curr); in print_deadlock_bug()
1766 check_deadlock(struct task_struct *curr, struct held_lock *next, in check_deadlock() argument
1773 for (i = 0; i < curr->lockdep_depth; i++) { in check_deadlock()
1774 prev = curr->held_locks + i; in check_deadlock()
1796 return print_deadlock_bug(curr, prev, next); in check_deadlock()
1824 check_prev_add(struct task_struct *curr, struct held_lock *prev, in check_prev_add() argument
1857 if (!check_prev_add_irq(curr, prev, next)) in check_prev_add()
1929 check_prevs_add(struct task_struct *curr, struct held_lock *next) in check_prevs_add() argument
1931 int depth = curr->lockdep_depth; in check_prevs_add()
1946 if (curr->held_locks[depth].irq_context != in check_prevs_add()
1947 curr->held_locks[depth-1].irq_context) in check_prevs_add()
1951 int distance = curr->lockdep_depth - depth + 1; in check_prevs_add()
1952 hlock = curr->held_locks + depth - 1; in check_prevs_add()
1958 if (!check_prev_add(curr, hlock, next, in check_prevs_add()
1979 if (curr->held_locks[depth].irq_context != in check_prevs_add()
1980 curr->held_locks[depth-1].irq_context) in check_prevs_add()
2015 static inline int lookup_chain_cache(struct task_struct *curr, in lookup_chain_cache() argument
2078 for (i = curr->lockdep_depth - 1; i >= 0; i--) { in lookup_chain_cache()
2079 hlock_curr = curr->held_locks + i; in lookup_chain_cache()
2084 chain->depth = curr->lockdep_depth + 1 - i; in lookup_chain_cache()
2089 int lock_id = curr->held_locks[i].class_idx - 1; in lookup_chain_cache()
2101 static int validate_chain(struct task_struct *curr, struct lockdep_map *lock, in validate_chain() argument
2115 lookup_chain_cache(curr, hlock, chain_key)) { in validate_chain()
2128 int ret = check_deadlock(curr, hlock, lock, hlock->read); in validate_chain()
2144 if (!check_prevs_add(curr, hlock)) in validate_chain()
2155 static inline int validate_chain(struct task_struct *curr, in validate_chain() argument
2167 static void check_chain_key(struct task_struct *curr) in check_chain_key() argument
2174 for (i = 0; i < curr->lockdep_depth; i++) { in check_chain_key()
2175 hlock = curr->held_locks + i; in check_chain_key()
2183 curr->lockdep_depth, i, in check_chain_key()
2201 if (chain_key != curr->curr_chain_key) { in check_chain_key()
2208 curr->lockdep_depth, i, in check_chain_key()
2210 (unsigned long long)curr->curr_chain_key); in check_chain_key()
2234 print_usage_bug(struct task_struct *curr, struct held_lock *this, in print_usage_bug() argument
2250 curr->comm, task_pid_nr(curr), in print_usage_bug()
2251 trace_hardirq_context(curr), hardirq_count() >> HARDIRQ_SHIFT, in print_usage_bug()
2252 trace_softirq_context(curr), softirq_count() >> SOFTIRQ_SHIFT, in print_usage_bug()
2253 trace_hardirqs_enabled(curr), in print_usage_bug()
2254 trace_softirqs_enabled(curr)); in print_usage_bug()
2260 print_irqtrace_events(curr); in print_usage_bug()
2264 lockdep_print_held_locks(curr); in print_usage_bug()
2276 valid_state(struct task_struct *curr, struct held_lock *this, in valid_state() argument
2280 return print_usage_bug(curr, this, bad_bit, new_bit); in valid_state()
2284 static int mark_lock(struct task_struct *curr, struct held_lock *this,
2293 print_irq_inversion_bug(struct task_struct *curr, in print_irq_inversion_bug() argument
2311 curr->comm, task_pid_nr(curr)); in print_irq_inversion_bug()
2340 lockdep_print_held_locks(curr); in print_irq_inversion_bug()
2358 check_usage_forwards(struct task_struct *curr, struct held_lock *this, in check_usage_forwards() argument
2373 return print_irq_inversion_bug(curr, &root, target_entry, in check_usage_forwards()
2382 check_usage_backwards(struct task_struct *curr, struct held_lock *this, in check_usage_backwards() argument
2397 return print_irq_inversion_bug(curr, &root, target_entry, in check_usage_backwards()
2401 void print_irqtrace_events(struct task_struct *curr) in print_irqtrace_events() argument
2403 printk("irq event stamp: %u\n", curr->irq_events); in print_irqtrace_events()
2404 printk("hardirqs last enabled at (%u): ", curr->hardirq_enable_event); in print_irqtrace_events()
2405 print_ip_sym(curr->hardirq_enable_ip); in print_irqtrace_events()
2406 printk("hardirqs last disabled at (%u): ", curr->hardirq_disable_event); in print_irqtrace_events()
2407 print_ip_sym(curr->hardirq_disable_ip); in print_irqtrace_events()
2408 printk("softirqs last enabled at (%u): ", curr->softirq_enable_event); in print_irqtrace_events()
2409 print_ip_sym(curr->softirq_enable_ip); in print_irqtrace_events()
2410 printk("softirqs last disabled at (%u): ", curr->softirq_disable_event); in print_irqtrace_events()
2411 print_ip_sym(curr->softirq_disable_ip); in print_irqtrace_events()
2457 mark_lock_irq(struct task_struct *curr, struct held_lock *this, in mark_lock_irq() argument
2478 if (!valid_state(curr, this, new_bit, excl_bit)) in mark_lock_irq()
2486 !usage(curr, this, excl_bit, state_name(new_bit & ~1))) in mark_lock_irq()
2493 if (!valid_state(curr, this, new_bit, excl_bit + 1)) in mark_lock_irq()
2497 !usage(curr, this, excl_bit + 1, in mark_lock_irq()
2518 mark_held_locks(struct task_struct *curr, enum mark_type mark) in mark_held_locks() argument
2524 for (i = 0; i < curr->lockdep_depth; i++) { in mark_held_locks()
2525 hlock = curr->held_locks + i; in mark_held_locks()
2536 if (!mark_lock(curr, hlock, usage_bit)) in mark_held_locks()
2548 struct task_struct *curr = current; in __trace_hardirqs_on_caller() local
2551 curr->hardirqs_enabled = 1; in __trace_hardirqs_on_caller()
2557 if (!mark_held_locks(curr, HARDIRQ)) in __trace_hardirqs_on_caller()
2564 if (curr->softirqs_enabled) in __trace_hardirqs_on_caller()
2565 if (!mark_held_locks(curr, SOFTIRQ)) in __trace_hardirqs_on_caller()
2568 curr->hardirq_enable_ip = ip; in __trace_hardirqs_on_caller()
2569 curr->hardirq_enable_event = ++curr->irq_events; in __trace_hardirqs_on_caller()
2628 struct task_struct *curr = current; in trace_hardirqs_off_caller() local
2642 if (curr->hardirqs_enabled) { in trace_hardirqs_off_caller()
2646 curr->hardirqs_enabled = 0; in trace_hardirqs_off_caller()
2647 curr->hardirq_disable_ip = ip; in trace_hardirqs_off_caller()
2648 curr->hardirq_disable_event = ++curr->irq_events; in trace_hardirqs_off_caller()
2666 struct task_struct *curr = current; in trace_softirqs_on() local
2678 if (curr->softirqs_enabled) { in trace_softirqs_on()
2687 curr->softirqs_enabled = 1; in trace_softirqs_on()
2688 curr->softirq_enable_ip = ip; in trace_softirqs_on()
2689 curr->softirq_enable_event = ++curr->irq_events; in trace_softirqs_on()
2696 if (curr->hardirqs_enabled) in trace_softirqs_on()
2697 mark_held_locks(curr, SOFTIRQ); in trace_softirqs_on()
2706 struct task_struct *curr = current; in trace_softirqs_off() local
2717 if (curr->softirqs_enabled) { in trace_softirqs_off()
2721 curr->softirqs_enabled = 0; in trace_softirqs_off()
2722 curr->softirq_disable_ip = ip; in trace_softirqs_off()
2723 curr->softirq_disable_event = ++curr->irq_events; in trace_softirqs_off()
2735 struct task_struct *curr = current; in __lockdep_trace_alloc() local
2745 if ((curr->flags & PF_MEMALLOC) && !(gfp_mask & __GFP_NOMEMALLOC)) in __lockdep_trace_alloc()
2758 mark_held_locks(curr, RECLAIM_FS); in __lockdep_trace_alloc()
2778 static int mark_irqflags(struct task_struct *curr, struct held_lock *hlock) in mark_irqflags() argument
2786 if (curr->hardirq_context) in mark_irqflags()
2787 if (!mark_lock(curr, hlock, in mark_irqflags()
2790 if (curr->softirq_context) in mark_irqflags()
2791 if (!mark_lock(curr, hlock, in mark_irqflags()
2795 if (curr->hardirq_context) in mark_irqflags()
2796 if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) in mark_irqflags()
2798 if (curr->softirq_context) in mark_irqflags()
2799 if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) in mark_irqflags()
2805 if (!mark_lock(curr, hlock, in mark_irqflags()
2808 if (curr->softirqs_enabled) in mark_irqflags()
2809 if (!mark_lock(curr, hlock, in mark_irqflags()
2813 if (!mark_lock(curr, hlock, in mark_irqflags()
2816 if (curr->softirqs_enabled) in mark_irqflags()
2817 if (!mark_lock(curr, hlock, in mark_irqflags()
2829 if (!hlock->trylock && (curr->lockdep_reclaim_gfp & __GFP_FS)) { in mark_irqflags()
2831 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS_READ)) in mark_irqflags()
2834 if (!mark_lock(curr, hlock, LOCK_USED_IN_RECLAIM_FS)) in mark_irqflags()
2842 static int separate_irq_context(struct task_struct *curr, in separate_irq_context() argument
2845 unsigned int depth = curr->lockdep_depth; in separate_irq_context()
2850 hlock->irq_context = 2*(curr->hardirq_context ? 1 : 0) + in separate_irq_context()
2851 curr->softirq_context; in separate_irq_context()
2855 prev_hlock = curr->held_locks + depth-1; in separate_irq_context()
2870 int mark_lock_irq(struct task_struct *curr, struct held_lock *this, in mark_lock_irq() argument
2877 static inline int mark_irqflags(struct task_struct *curr, in mark_irqflags() argument
2883 static inline int separate_irq_context(struct task_struct *curr, in separate_irq_context() argument
2898 static int mark_lock(struct task_struct *curr, struct held_lock *this, in mark_lock() argument
2933 ret = mark_lock_irq(curr, this, new_bit); in mark_lock()
2955 print_irqtrace_events(curr); in mark_lock()
3029 print_lock_nested_lock_not_held(struct task_struct *curr, in print_lock_nested_lock_not_held() argument
3044 printk("%s/%d is trying to lock:\n", curr->comm, task_pid_nr(curr)); in print_lock_nested_lock_not_held()
3054 lockdep_print_held_locks(curr); in print_lock_nested_lock_not_held()
3073 struct task_struct *curr = current; in __lock_acquire() local
3119 depth = curr->lockdep_depth; in __lock_acquire()
3129 hlock = curr->held_locks + depth - 1; in __lock_acquire()
3140 hlock = curr->held_locks + depth; in __lock_acquire()
3162 if (check && !mark_irqflags(curr, hlock)) in __lock_acquire()
3166 if (!mark_lock(curr, hlock, LOCK_USED)) in __lock_acquire()
3186 chain_key = curr->curr_chain_key; in __lock_acquire()
3197 if (separate_irq_context(curr, hlock)) { in __lock_acquire()
3204 return print_lock_nested_lock_not_held(curr, hlock, ip); in __lock_acquire()
3206 if (!validate_chain(curr, lock, hlock, chain_head, chain_key)) in __lock_acquire()
3209 curr->curr_chain_key = chain_key; in __lock_acquire()
3210 curr->lockdep_depth++; in __lock_acquire()
3211 check_chain_key(curr); in __lock_acquire()
3216 if (unlikely(curr->lockdep_depth >= MAX_LOCK_DEPTH)) { in __lock_acquire()
3220 curr->lockdep_depth, MAX_LOCK_DEPTH); in __lock_acquire()
3229 if (unlikely(curr->lockdep_depth > max_lockdep_depth)) in __lock_acquire()
3230 max_lockdep_depth = curr->lockdep_depth; in __lock_acquire()
3236 print_unlock_imbalance_bug(struct task_struct *curr, struct lockdep_map *lock, in print_unlock_imbalance_bug() argument
3250 curr->comm, task_pid_nr(curr)); in print_unlock_imbalance_bug()
3256 lockdep_print_held_locks(curr); in print_unlock_imbalance_bug()
3304 struct task_struct *curr = current; in __lock_set_class() local
3310 depth = curr->lockdep_depth; in __lock_set_class()
3320 hlock = curr->held_locks + i; in __lock_set_class()
3330 return print_unlock_imbalance_bug(curr, lock, ip); in __lock_set_class()
3337 curr->lockdep_depth = i; in __lock_set_class()
3338 curr->curr_chain_key = hlock->prev_chain_key; in __lock_set_class()
3341 hlock = curr->held_locks + i; in __lock_set_class()
3354 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth)) in __lock_set_class()
3369 struct task_struct *curr = current; in __lock_release() local
3377 depth = curr->lockdep_depth; in __lock_release()
3383 return print_unlock_imbalance_bug(curr, lock, ip); in __lock_release()
3391 hlock = curr->held_locks + i; in __lock_release()
3401 return print_unlock_imbalance_bug(curr, lock, ip); in __lock_release()
3427 curr->lockdep_depth = i; in __lock_release()
3428 curr->curr_chain_key = hlock->prev_chain_key; in __lock_release()
3431 hlock = curr->held_locks + i; in __lock_release()
3444 if (DEBUG_LOCKS_WARN_ON(curr->lockdep_depth != depth - 1)) in __lock_release()
3452 struct task_struct *curr = current; in __lock_is_held() local
3455 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_is_held()
3456 struct held_lock *hlock = curr->held_locks + i; in __lock_is_held()
3467 struct task_struct *curr = current; in __lock_pin_lock() local
3473 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_pin_lock()
3474 struct held_lock *hlock = curr->held_locks + i; in __lock_pin_lock()
3487 struct task_struct *curr = current; in __lock_unpin_lock() local
3493 for (i = 0; i < curr->lockdep_depth; i++) { in __lock_unpin_lock()
3494 struct held_lock *hlock = curr->held_locks + i; in __lock_unpin_lock()
3677 print_lock_contention_bug(struct task_struct *curr, struct lockdep_map *lock, in print_lock_contention_bug() argument
3691 curr->comm, task_pid_nr(curr)); in print_lock_contention_bug()
3697 lockdep_print_held_locks(curr); in print_lock_contention_bug()
3708 struct task_struct *curr = current; in __lock_contended() local
3714 depth = curr->lockdep_depth; in __lock_contended()
3724 hlock = curr->held_locks + i; in __lock_contended()
3734 print_lock_contention_bug(curr, lock, ip); in __lock_contended()
3760 struct task_struct *curr = current; in __lock_acquired() local
3767 depth = curr->lockdep_depth; in __lock_acquired()
3777 hlock = curr->held_locks + i; in __lock_acquired()
3787 print_lock_contention_bug(curr, lock, _RET_IP_); in __lock_acquired()
4075 print_freed_lock_bug(struct task_struct *curr, const void *mem_from, in print_freed_lock_bug() argument
4089 curr->comm, task_pid_nr(curr), mem_from, mem_to-1); in print_freed_lock_bug()
4091 lockdep_print_held_locks(curr); in print_freed_lock_bug()
4111 struct task_struct *curr = current; in debug_check_no_locks_freed() local
4120 for (i = 0; i < curr->lockdep_depth; i++) { in debug_check_no_locks_freed()
4121 hlock = curr->held_locks + i; in debug_check_no_locks_freed()
4127 print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock); in debug_check_no_locks_freed()
4235 struct task_struct *curr = current; in lockdep_sys_exit() local
4237 if (unlikely(curr->lockdep_depth)) { in lockdep_sys_exit()
4246 curr->comm, curr->pid); in lockdep_sys_exit()
4247 lockdep_print_held_locks(curr); in lockdep_sys_exit()
4253 struct task_struct *curr = current; in lockdep_rcu_suspicious() local
4296 lockdep_print_held_locks(curr); in lockdep_rcu_suspicious()