hlock 156 kernel/locking/lockdep.c static inline struct lock_class *hlock_class(struct held_lock *hlock) hlock 158 kernel/locking/lockdep.c unsigned int class_idx = hlock->class_idx; hlock 277 kernel/locking/lockdep.c static void lock_release_holdtime(struct held_lock *hlock) hlock 285 kernel/locking/lockdep.c holdtime = lockstat_clock() - hlock->holdtime_stamp; hlock 287 kernel/locking/lockdep.c stats = get_lock_stats(hlock_class(hlock)); hlock 288 kernel/locking/lockdep.c if (hlock->read) hlock 294 kernel/locking/lockdep.c static inline void lock_release_holdtime(struct held_lock *hlock) hlock 671 kernel/locking/lockdep.c static void print_lock(struct held_lock *hlock) hlock 683 kernel/locking/lockdep.c struct lock_class *lock = hlock_class(hlock); hlock 690 kernel/locking/lockdep.c printk(KERN_CONT "%px", hlock->instance); hlock 692 kernel/locking/lockdep.c printk(KERN_CONT ", at: %pS\n", (void *)hlock->acquire_ip); hlock 2558 kernel/locking/lockdep.c struct held_lock *hlock; hlock 2577 kernel/locking/lockdep.c hlock = curr->held_locks + depth - 1; hlock 2583 kernel/locking/lockdep.c if (hlock->read != 2 && hlock->check) { hlock 2584 kernel/locking/lockdep.c int ret = check_prev_add(curr, hlock, next, distance, hlock 2595 kernel/locking/lockdep.c if (!hlock->trylock) hlock 2641 kernel/locking/lockdep.c struct held_lock *hlock) hlock 2648 kernel/locking/lockdep.c if (hlock_curr->irq_context != hlock->irq_context) hlock 2673 kernel/locking/lockdep.c struct held_lock *hlock; hlock 2681 kernel/locking/lockdep.c hlock = curr->held_locks + i; hlock 2682 kernel/locking/lockdep.c chain_key = print_chain_key_iteration(hlock->class_idx, chain_key); hlock 2684 kernel/locking/lockdep.c print_lock(hlock); hlock 2737 kernel/locking/lockdep.c struct held_lock *hlock, hlock 2743 kernel/locking/lockdep.c i = get_first_held_lock(curr, hlock); hlock 2746 kernel/locking/lockdep.c print_collision(curr, hlock, chain); hlock 2754 kernel/locking/lockdep.c print_collision(curr, hlock, chain); hlock 2797 kernel/locking/lockdep.c struct held_lock *hlock, hlock 2800 kernel/locking/lockdep.c struct lock_class *class = hlock_class(hlock); hlock 2823 kernel/locking/lockdep.c chain->irq_context = hlock->irq_context; hlock 2824 kernel/locking/lockdep.c i = get_first_held_lock(curr, hlock); hlock 2880 kernel/locking/lockdep.c struct held_lock *hlock, hlock 2883 kernel/locking/lockdep.c struct lock_class *class = hlock_class(hlock); hlock 2888 kernel/locking/lockdep.c if (!check_no_collision(curr, hlock, chain)) hlock 2918 kernel/locking/lockdep.c if (!add_chain_cache(curr, hlock, chain_key)) hlock 2925 kernel/locking/lockdep.c struct held_lock *hlock, hlock 2938 kernel/locking/lockdep.c if (!hlock->trylock && hlock->check && hlock 2939 kernel/locking/lockdep.c lookup_chain_cache_add(curr, hlock, chain_key)) { hlock 2958 kernel/locking/lockdep.c int ret = check_deadlock(curr, hlock); hlock 2968 kernel/locking/lockdep.c hlock->read = 2; hlock 2974 kernel/locking/lockdep.c if (!check_prevs_add(curr, hlock)) hlock 2989 kernel/locking/lockdep.c struct held_lock *hlock, hlock 3003 kernel/locking/lockdep.c struct held_lock *hlock, *prev_hlock = NULL; hlock 3008 kernel/locking/lockdep.c hlock = curr->held_locks + i; hlock 3009 kernel/locking/lockdep.c if (chain_key != hlock->prev_chain_key) { hlock 3018 kernel/locking/lockdep.c (unsigned long long)hlock->prev_chain_key); hlock 3026 kernel/locking/lockdep.c if (DEBUG_LOCKS_WARN_ON(!test_bit(hlock->class_idx, lock_classes_in_use))) hlock 3030 kernel/locking/lockdep.c hlock->irq_context)) hlock 3032 kernel/locking/lockdep.c chain_key = iterate_chain_key(chain_key, hlock->class_idx); hlock 3033 kernel/locking/lockdep.c prev_hlock = hlock; hlock 3348 kernel/locking/lockdep.c struct held_lock *hlock; hlock 3353 kernel/locking/lockdep.c hlock = curr->held_locks + i; hlock 3355 kernel/locking/lockdep.c if (hlock->read) hlock 3360 kernel/locking/lockdep.c if (!hlock->check) hlock 3363 kernel/locking/lockdep.c if (!mark_lock(curr, hlock, hlock_bit)) hlock 3545 kernel/locking/lockdep.c mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) hlock 3554 kernel/locking/lockdep.c if (!hlock->trylock) { hlock 3555 kernel/locking/lockdep.c if (hlock->read) { hlock 3557 kernel/locking/lockdep.c if (!mark_lock(curr, hlock, hlock 3561 kernel/locking/lockdep.c if (!mark_lock(curr, hlock, hlock 3566 kernel/locking/lockdep.c if (!mark_lock(curr, hlock, LOCK_USED_IN_HARDIRQ)) hlock 3569 kernel/locking/lockdep.c if (!mark_lock(curr, hlock, LOCK_USED_IN_SOFTIRQ)) hlock 3573 kernel/locking/lockdep.c if (!hlock->hardirqs_off) { hlock 3574 kernel/locking/lockdep.c if (hlock->read) { hlock 3575 kernel/locking/lockdep.c if (!mark_lock(curr, hlock, hlock 3579 kernel/locking/lockdep.c if (!mark_lock(curr, hlock, hlock 3583 kernel/locking/lockdep.c if (!mark_lock(curr, hlock, hlock 3587 kernel/locking/lockdep.c if (!mark_lock(curr, hlock, hlock 3595 kernel/locking/lockdep.c if (!mark_lock(curr, hlock, LOCK_USED)) hlock 3607 kernel/locking/lockdep.c struct held_lock *hlock) hlock 3623 kernel/locking/lockdep.c if (prev_hlock->irq_context != hlock->irq_context) hlock 3692 kernel/locking/lockdep.c mark_usage(struct task_struct *curr, struct held_lock *hlock, int check) hlock 3703 kernel/locking/lockdep.c struct held_lock *hlock) hlock 3775 kernel/locking/lockdep.c struct held_lock *hlock, hlock 3790 kernel/locking/lockdep.c print_lock(hlock); hlock 3793 kernel/locking/lockdep.c pr_warn("%s\n", hlock->nest_lock->name); hlock 3822 kernel/locking/lockdep.c struct held_lock *hlock; hlock 3870 kernel/locking/lockdep.c hlock = curr->held_locks + depth - 1; hlock 3871 kernel/locking/lockdep.c if (hlock->class_idx == class_idx && nest_lock) { hlock 3875 kernel/locking/lockdep.c if (!hlock->references) hlock 3876 kernel/locking/lockdep.c hlock->references++; hlock 3878 kernel/locking/lockdep.c hlock->references += references; hlock 3881 kernel/locking/lockdep.c if (DEBUG_LOCKS_WARN_ON(hlock->references < references)) hlock 3888 kernel/locking/lockdep.c hlock = curr->held_locks + depth; hlock 3895 kernel/locking/lockdep.c hlock->class_idx = class_idx; hlock 3896 kernel/locking/lockdep.c hlock->acquire_ip = ip; hlock 3897 kernel/locking/lockdep.c hlock->instance = lock; hlock 3898 kernel/locking/lockdep.c hlock->nest_lock = nest_lock; hlock 3899 kernel/locking/lockdep.c hlock->irq_context = task_irq_context(curr); hlock 3900 kernel/locking/lockdep.c hlock->trylock = trylock; hlock 3901 kernel/locking/lockdep.c hlock->read = read; hlock 3902 kernel/locking/lockdep.c hlock->check = check; hlock 3903 kernel/locking/lockdep.c hlock->hardirqs_off = !!hardirqs_off; hlock 3904 kernel/locking/lockdep.c hlock->references = references; hlock 3906 kernel/locking/lockdep.c hlock->waittime_stamp = 0; hlock 3907 kernel/locking/lockdep.c hlock->holdtime_stamp = lockstat_clock(); hlock 3909 kernel/locking/lockdep.c hlock->pin_count = pin_count; hlock 3912 kernel/locking/lockdep.c if (!mark_usage(curr, hlock, check)) hlock 3941 kernel/locking/lockdep.c hlock->prev_chain_key = chain_key; hlock 3942 kernel/locking/lockdep.c if (separate_irq_context(curr, hlock)) { hlock 3949 kernel/locking/lockdep.c print_lock_nested_lock_not_held(curr, hlock, ip); hlock 3954 kernel/locking/lockdep.c WARN_ON_ONCE(depth && !hlock_class(hlock - 1)->key); hlock 3955 kernel/locking/lockdep.c WARN_ON_ONCE(!hlock_class(hlock)->key); hlock 3958 kernel/locking/lockdep.c if (!validate_chain(curr, hlock, chain_head, chain_key)) hlock 4014 kernel/locking/lockdep.c static int match_held_lock(const struct held_lock *hlock, hlock 4017 kernel/locking/lockdep.c if (hlock->instance == lock) hlock 4020 kernel/locking/lockdep.c if (hlock->references) { hlock 4040 kernel/locking/lockdep.c if (DEBUG_LOCKS_WARN_ON(!hlock->nest_lock)) hlock 4043 kernel/locking/lockdep.c if (hlock->class_idx == class - lock_classes) hlock 4055 kernel/locking/lockdep.c struct held_lock *ret, *hlock, *prev_hlock; hlock 4059 kernel/locking/lockdep.c hlock = curr->held_locks + i; hlock 4060 kernel/locking/lockdep.c ret = hlock; hlock 4061 kernel/locking/lockdep.c if (match_held_lock(hlock, lock)) hlock 4065 kernel/locking/lockdep.c for (i--, prev_hlock = hlock--; hlock 4067 kernel/locking/lockdep.c i--, prev_hlock = hlock--) { hlock 4071 kernel/locking/lockdep.c if (prev_hlock->irq_context != hlock->irq_context) { hlock 4075 kernel/locking/lockdep.c if (match_held_lock(hlock, lock)) { hlock 4076 kernel/locking/lockdep.c ret = hlock; hlock 4089 kernel/locking/lockdep.c struct held_lock *hlock; hlock 4095 kernel/locking/lockdep.c for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) { hlock 4096 kernel/locking/lockdep.c switch (__lock_acquire(hlock->instance, hlock 4097 kernel/locking/lockdep.c hlock_class(hlock)->subclass, hlock 4098 kernel/locking/lockdep.c hlock->trylock, hlock 4099 kernel/locking/lockdep.c hlock->read, hlock->check, hlock 4100 kernel/locking/lockdep.c hlock->hardirqs_off, hlock 4101 kernel/locking/lockdep.c hlock->nest_lock, hlock->acquire_ip, hlock 4102 kernel/locking/lockdep.c hlock->references, hlock->pin_count)) { hlock 4125 kernel/locking/lockdep.c struct held_lock *hlock; hlock 4140 kernel/locking/lockdep.c hlock = find_held_lock(curr, lock, depth, &i); hlock 4141 kernel/locking/lockdep.c if (!hlock) { hlock 4148 kernel/locking/lockdep.c hlock->class_idx = class - lock_classes; hlock 4151 kernel/locking/lockdep.c curr->curr_chain_key = hlock->prev_chain_key; hlock 4169 kernel/locking/lockdep.c struct held_lock *hlock; hlock 4183 kernel/locking/lockdep.c hlock = find_held_lock(curr, lock, depth, &i); hlock 4184 kernel/locking/lockdep.c if (!hlock) { hlock 4190 kernel/locking/lockdep.c curr->curr_chain_key = hlock->prev_chain_key; hlock 4192 kernel/locking/lockdep.c WARN(hlock->read, "downgrading a read lock"); hlock 4193 kernel/locking/lockdep.c hlock->read = 1; hlock 4194 kernel/locking/lockdep.c hlock->acquire_ip = ip; hlock 4225 kernel/locking/lockdep.c struct held_lock *hlock; hlock 4245 kernel/locking/lockdep.c hlock = find_held_lock(curr, lock, depth, &i); hlock 4246 kernel/locking/lockdep.c if (!hlock) { hlock 4251 kernel/locking/lockdep.c if (hlock->instance == lock) hlock 4252 kernel/locking/lockdep.c lock_release_holdtime(hlock); hlock 4254 kernel/locking/lockdep.c WARN(hlock->pin_count, "releasing a pinned lock\n"); hlock 4256 kernel/locking/lockdep.c if (hlock->references) { hlock 4257 kernel/locking/lockdep.c hlock->references--; hlock 4258 kernel/locking/lockdep.c if (hlock->references) { hlock 4275 kernel/locking/lockdep.c curr->curr_chain_key = hlock->prev_chain_key; hlock 4309 kernel/locking/lockdep.c struct held_lock *hlock = curr->held_locks + i; hlock 4311 kernel/locking/lockdep.c if (match_held_lock(hlock, lock)) { hlock 4312 kernel/locking/lockdep.c if (read == -1 || hlock->read == read) hlock 4332 kernel/locking/lockdep.c struct held_lock *hlock = curr->held_locks + i; hlock 4334 kernel/locking/lockdep.c if (match_held_lock(hlock, lock)) { hlock 4341 kernel/locking/lockdep.c hlock->pin_count += cookie.val; hlock 4359 kernel/locking/lockdep.c struct held_lock *hlock = curr->held_locks + i; hlock 4361 kernel/locking/lockdep.c if (match_held_lock(hlock, lock)) { hlock 4362 kernel/locking/lockdep.c hlock->pin_count += cookie.val; hlock 4379 kernel/locking/lockdep.c struct held_lock *hlock = curr->held_locks + i; hlock 4381 kernel/locking/lockdep.c if (match_held_lock(hlock, lock)) { hlock 4382 kernel/locking/lockdep.c if (WARN(!hlock->pin_count, "unpinning an unpinned lock\n")) hlock 4385 kernel/locking/lockdep.c hlock->pin_count -= cookie.val; hlock 4387 kernel/locking/lockdep.c if (WARN((int)hlock->pin_count < 0, "pin count corrupted\n")) hlock 4388 kernel/locking/lockdep.c hlock->pin_count = 0; hlock 4623 kernel/locking/lockdep.c struct held_lock *hlock; hlock 4636 kernel/locking/lockdep.c hlock = find_held_lock(curr, lock, depth, &i); hlock 4637 kernel/locking/lockdep.c if (!hlock) { hlock 4642 kernel/locking/lockdep.c if (hlock->instance != lock) hlock 4645 kernel/locking/lockdep.c hlock->waittime_stamp = lockstat_clock(); hlock 4647 kernel/locking/lockdep.c contention_point = lock_point(hlock_class(hlock)->contention_point, ip); hlock 4648 kernel/locking/lockdep.c contending_point = lock_point(hlock_class(hlock)->contending_point, hlock 4651 kernel/locking/lockdep.c stats = get_lock_stats(hlock_class(hlock)); hlock 4657 kernel/locking/lockdep.c stats->bounces[bounce_contended + !!hlock->read]++; hlock 4664 kernel/locking/lockdep.c struct held_lock *hlock; hlock 4678 kernel/locking/lockdep.c hlock = find_held_lock(curr, lock, depth, &i); hlock 4679 kernel/locking/lockdep.c if (!hlock) { hlock 4684 kernel/locking/lockdep.c if (hlock->instance != lock) hlock 4688 kernel/locking/lockdep.c if (hlock->waittime_stamp) { hlock 4690 kernel/locking/lockdep.c waittime = now - hlock->waittime_stamp; hlock 4691 kernel/locking/lockdep.c hlock->holdtime_stamp = now; hlock 4696 kernel/locking/lockdep.c stats = get_lock_stats(hlock_class(hlock)); hlock 4698 kernel/locking/lockdep.c if (hlock->read) hlock 4704 kernel/locking/lockdep.c stats->bounces[bounce_acquired + !!hlock->read]++; hlock 5250 kernel/locking/lockdep.c const void *mem_to, struct held_lock *hlock) hlock 5264 kernel/locking/lockdep.c print_lock(hlock); hlock 5286 kernel/locking/lockdep.c struct held_lock *hlock; hlock 5295 kernel/locking/lockdep.c hlock = curr->held_locks + i; hlock 5297 kernel/locking/lockdep.c if (not_in_range(mem_from, mem_len, hlock->instance, hlock 5298 kernel/locking/lockdep.c sizeof(*hlock->instance))) hlock 5301 kernel/locking/lockdep.c print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);