1/* See include/linux/lglock.h for description */
2#include <linux/module.h>
3#include <linux/lglock.h>
4#include <linux/cpu.h>
5#include <linux/string.h>
6
7/*
8 * Note there is no uninit, so lglocks cannot be defined in
9 * modules (but it's fine to use them from there)
10 * Could be added though, just undo lg_lock_init
11 */
12
13void lg_lock_init(struct lglock *lg, char *name)
14{
15	LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
16}
17EXPORT_SYMBOL(lg_lock_init);
18
19void lg_local_lock(struct lglock *lg)
20{
21	arch_spinlock_t *lock;
22
23	preempt_disable();
24	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
25	lock = this_cpu_ptr(lg->lock);
26	arch_spin_lock(lock);
27}
28EXPORT_SYMBOL(lg_local_lock);
29
30void lg_local_unlock(struct lglock *lg)
31{
32	arch_spinlock_t *lock;
33
34	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
35	lock = this_cpu_ptr(lg->lock);
36	arch_spin_unlock(lock);
37	preempt_enable();
38}
39EXPORT_SYMBOL(lg_local_unlock);
40
41void lg_local_lock_cpu(struct lglock *lg, int cpu)
42{
43	arch_spinlock_t *lock;
44
45	preempt_disable();
46	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
47	lock = per_cpu_ptr(lg->lock, cpu);
48	arch_spin_lock(lock);
49}
50EXPORT_SYMBOL(lg_local_lock_cpu);
51
52void lg_local_unlock_cpu(struct lglock *lg, int cpu)
53{
54	arch_spinlock_t *lock;
55
56	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
57	lock = per_cpu_ptr(lg->lock, cpu);
58	arch_spin_unlock(lock);
59	preempt_enable();
60}
61EXPORT_SYMBOL(lg_local_unlock_cpu);
62
63void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
64{
65	BUG_ON(cpu1 == cpu2);
66
67	/* lock in cpu order, just like lg_global_lock */
68	if (cpu2 < cpu1)
69		swap(cpu1, cpu2);
70
71	preempt_disable();
72	lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
73	arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
74	arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
75}
76
77void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
78{
79	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
80	arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
81	arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
82	preempt_enable();
83}
84
85void lg_global_lock(struct lglock *lg)
86{
87	int i;
88
89	preempt_disable();
90	lock_acquire_exclusive(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
91	for_each_possible_cpu(i) {
92		arch_spinlock_t *lock;
93		lock = per_cpu_ptr(lg->lock, i);
94		arch_spin_lock(lock);
95	}
96}
97EXPORT_SYMBOL(lg_global_lock);
98
99void lg_global_unlock(struct lglock *lg)
100{
101	int i;
102
103	lock_release(&lg->lock_dep_map, 1, _RET_IP_);
104	for_each_possible_cpu(i) {
105		arch_spinlock_t *lock;
106		lock = per_cpu_ptr(lg->lock, i);
107		arch_spin_unlock(lock);
108	}
109	preempt_enable();
110}
111EXPORT_SYMBOL(lg_global_unlock);
112