This source file includes following definitions.
- queue_initialise
- queue_free
- __queue_add
- __queue_remove
- queue_remove_exclude
- queue_remove
- queue_remove_tgtluntag
- queue_remove_all_target
- queue_probetgtlun
- queue_remove_cmd
1
2
3
4
5
6
7
8
9
10
11
12
13
14 #include <linux/module.h>
15 #include <linux/blkdev.h>
16 #include <linux/kernel.h>
17 #include <linux/string.h>
18 #include <linux/slab.h>
19 #include <linux/spinlock.h>
20 #include <linux/list.h>
21 #include <linux/init.h>
22
23 #include "../scsi.h"
24
25 #define DEBUG
26
27 typedef struct queue_entry {
28 struct list_head list;
29 struct scsi_cmnd *SCpnt;
30 #ifdef DEBUG
31 unsigned long magic;
32 #endif
33 } QE_t;
34
35 #ifdef DEBUG
36 #define QUEUE_MAGIC_FREE 0xf7e1c9a3
37 #define QUEUE_MAGIC_USED 0xf7e1cc33
38
39 #define SET_MAGIC(q,m) ((q)->magic = (m))
40 #define BAD_MAGIC(q,m) ((q)->magic != (m))
41 #else
42 #define SET_MAGIC(q,m) do { } while (0)
43 #define BAD_MAGIC(q,m) (0)
44 #endif
45
46 #include "queue.h"
47
48 #define NR_QE 32
49
50
51
52
53
54
55 int queue_initialise (Queue_t *queue)
56 {
57 unsigned int nqueues = NR_QE;
58 QE_t *q;
59
60 spin_lock_init(&queue->queue_lock);
61 INIT_LIST_HEAD(&queue->head);
62 INIT_LIST_HEAD(&queue->free);
63
64
65
66
67
68
69
70 queue->alloc = q = kmalloc_array(nqueues, sizeof(QE_t), GFP_KERNEL);
71 if (q) {
72 for (; nqueues; q++, nqueues--) {
73 SET_MAGIC(q, QUEUE_MAGIC_FREE);
74 q->SCpnt = NULL;
75 list_add(&q->list, &queue->free);
76 }
77 }
78
79 return queue->alloc != NULL;
80 }
81
82
83
84
85
86
87 void queue_free (Queue_t *queue)
88 {
89 if (!list_empty(&queue->head))
90 printk(KERN_WARNING "freeing non-empty queue %p\n", queue);
91 kfree(queue->alloc);
92 }
93
94
95
96
97
98
99
100
101
102
103 int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head)
104 {
105 unsigned long flags;
106 struct list_head *l;
107 QE_t *q;
108 int ret = 0;
109
110 spin_lock_irqsave(&queue->queue_lock, flags);
111 if (list_empty(&queue->free))
112 goto empty;
113
114 l = queue->free.next;
115 list_del(l);
116
117 q = list_entry(l, QE_t, list);
118 BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_FREE));
119
120 SET_MAGIC(q, QUEUE_MAGIC_USED);
121 q->SCpnt = SCpnt;
122
123 if (head)
124 list_add(l, &queue->head);
125 else
126 list_add_tail(l, &queue->head);
127
128 ret = 1;
129 empty:
130 spin_unlock_irqrestore(&queue->queue_lock, flags);
131 return ret;
132 }
133
134 static struct scsi_cmnd *__queue_remove(Queue_t *queue, struct list_head *ent)
135 {
136 QE_t *q;
137
138
139
140
141 list_del(ent);
142 q = list_entry(ent, QE_t, list);
143 BUG_ON(BAD_MAGIC(q, QUEUE_MAGIC_USED));
144
145 SET_MAGIC(q, QUEUE_MAGIC_FREE);
146 list_add(ent, &queue->free);
147
148 return q->SCpnt;
149 }
150
151
152
153
154
155
156
157
158 struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, unsigned long *exclude)
159 {
160 unsigned long flags;
161 struct list_head *l;
162 struct scsi_cmnd *SCpnt = NULL;
163
164 spin_lock_irqsave(&queue->queue_lock, flags);
165 list_for_each(l, &queue->head) {
166 QE_t *q = list_entry(l, QE_t, list);
167 if (!test_bit(q->SCpnt->device->id * 8 +
168 (u8)(q->SCpnt->device->lun & 0x7), exclude)) {
169 SCpnt = __queue_remove(queue, l);
170 break;
171 }
172 }
173 spin_unlock_irqrestore(&queue->queue_lock, flags);
174
175 return SCpnt;
176 }
177
178
179
180
181
182
183
184 struct scsi_cmnd *queue_remove(Queue_t *queue)
185 {
186 unsigned long flags;
187 struct scsi_cmnd *SCpnt = NULL;
188
189 spin_lock_irqsave(&queue->queue_lock, flags);
190 if (!list_empty(&queue->head))
191 SCpnt = __queue_remove(queue, queue->head.next);
192 spin_unlock_irqrestore(&queue->queue_lock, flags);
193
194 return SCpnt;
195 }
196
197
198
199
200
201
202
203
204
205
206 struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, int lun,
207 int tag)
208 {
209 unsigned long flags;
210 struct list_head *l;
211 struct scsi_cmnd *SCpnt = NULL;
212
213 spin_lock_irqsave(&queue->queue_lock, flags);
214 list_for_each(l, &queue->head) {
215 QE_t *q = list_entry(l, QE_t, list);
216 if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun &&
217 q->SCpnt->tag == tag) {
218 SCpnt = __queue_remove(queue, l);
219 break;
220 }
221 }
222 spin_unlock_irqrestore(&queue->queue_lock, flags);
223
224 return SCpnt;
225 }
226
227
228
229
230
231
232
233
234 void queue_remove_all_target(Queue_t *queue, int target)
235 {
236 unsigned long flags;
237 struct list_head *l;
238
239 spin_lock_irqsave(&queue->queue_lock, flags);
240 list_for_each(l, &queue->head) {
241 QE_t *q = list_entry(l, QE_t, list);
242 if (q->SCpnt->device->id == target)
243 __queue_remove(queue, l);
244 }
245 spin_unlock_irqrestore(&queue->queue_lock, flags);
246 }
247
248
249
250
251
252
253
254
255
256
257 int queue_probetgtlun (Queue_t *queue, int target, int lun)
258 {
259 unsigned long flags;
260 struct list_head *l;
261 int found = 0;
262
263 spin_lock_irqsave(&queue->queue_lock, flags);
264 list_for_each(l, &queue->head) {
265 QE_t *q = list_entry(l, QE_t, list);
266 if (q->SCpnt->device->id == target && q->SCpnt->device->lun == lun) {
267 found = 1;
268 break;
269 }
270 }
271 spin_unlock_irqrestore(&queue->queue_lock, flags);
272
273 return found;
274 }
275
276
277
278
279
280
281
282
283 int queue_remove_cmd(Queue_t *queue, struct scsi_cmnd *SCpnt)
284 {
285 unsigned long flags;
286 struct list_head *l;
287 int found = 0;
288
289 spin_lock_irqsave(&queue->queue_lock, flags);
290 list_for_each(l, &queue->head) {
291 QE_t *q = list_entry(l, QE_t, list);
292 if (q->SCpnt == SCpnt) {
293 __queue_remove(queue, l);
294 found = 1;
295 break;
296 }
297 }
298 spin_unlock_irqrestore(&queue->queue_lock, flags);
299
300 return found;
301 }
302
303 EXPORT_SYMBOL(queue_initialise);
304 EXPORT_SYMBOL(queue_free);
305 EXPORT_SYMBOL(__queue_add);
306 EXPORT_SYMBOL(queue_remove);
307 EXPORT_SYMBOL(queue_remove_exclude);
308 EXPORT_SYMBOL(queue_remove_tgtluntag);
309 EXPORT_SYMBOL(queue_remove_cmd);
310 EXPORT_SYMBOL(queue_remove_all_target);
311 EXPORT_SYMBOL(queue_probetgtlun);
312
313 MODULE_AUTHOR("Russell King");
314 MODULE_DESCRIPTION("SCSI command queueing");
315 MODULE_LICENSE("GPL");