This source file includes following definitions.
- qla4xxx_space_in_req_ring
- qla4xxx_advance_req_ring_ptr
- qla4xxx_get_req_pkt
- qla4xxx_send_marker_iocb
- qla4xxx_alloc_cont_entry
- qla4xxx_calc_request_entries
- qla4xxx_build_scsi_iocbs
- qla4_83xx_queue_iocb
- qla4_83xx_complete_iocb
- qla4_82xx_queue_iocb
- qla4_82xx_complete_iocb
- qla4xxx_queue_iocb
- qla4xxx_complete_iocb
- qla4xxx_send_command_to_isp
- qla4xxx_send_passthru0
- qla4xxx_get_new_mrb
- qla4xxx_send_mbox_iocb
- qla4xxx_ping_iocb
1
2
3
4
5
6
7
8 #include "ql4_def.h"
9 #include "ql4_glbl.h"
10 #include "ql4_dbg.h"
11 #include "ql4_inline.h"
12
13 #include <scsi/scsi_tcq.h>
14
15 static int
16 qla4xxx_space_in_req_ring(struct scsi_qla_host *ha, uint16_t req_cnt)
17 {
18 uint16_t cnt;
19
20
21 if ((req_cnt + 2) >= ha->req_q_count) {
22 cnt = (uint16_t) ha->isp_ops->rd_shdw_req_q_out(ha);
23 if (ha->request_in < cnt)
24 ha->req_q_count = cnt - ha->request_in;
25 else
26 ha->req_q_count = REQUEST_QUEUE_DEPTH -
27 (ha->request_in - cnt);
28 }
29
30
31 if ((req_cnt + 2) < ha->req_q_count)
32 return 1;
33 else
34 return 0;
35 }
36
37 static void qla4xxx_advance_req_ring_ptr(struct scsi_qla_host *ha)
38 {
39
40 if (ha->request_in == (REQUEST_QUEUE_DEPTH - 1)) {
41 ha->request_in = 0;
42 ha->request_ptr = ha->request_ring;
43 } else {
44 ha->request_in++;
45 ha->request_ptr++;
46 }
47 }
48
49
50
51
52
53
54
55
56
57
58
59 static int qla4xxx_get_req_pkt(struct scsi_qla_host *ha,
60 struct queue_entry **queue_entry)
61 {
62 uint16_t req_cnt = 1;
63
64 if (qla4xxx_space_in_req_ring(ha, req_cnt)) {
65 *queue_entry = ha->request_ptr;
66 memset(*queue_entry, 0, sizeof(**queue_entry));
67
68 qla4xxx_advance_req_ring_ptr(ha);
69 ha->req_q_count -= req_cnt;
70 return QLA_SUCCESS;
71 }
72
73 return QLA_ERROR;
74 }
75
76
77
78
79
80
81
82
83
84
85 int qla4xxx_send_marker_iocb(struct scsi_qla_host *ha,
86 struct ddb_entry *ddb_entry, uint64_t lun, uint16_t mrkr_mod)
87 {
88 struct qla4_marker_entry *marker_entry;
89 unsigned long flags = 0;
90 uint8_t status = QLA_SUCCESS;
91
92
93 spin_lock_irqsave(&ha->hardware_lock, flags);
94
95
96 if (qla4xxx_get_req_pkt(ha, (struct queue_entry **) &marker_entry) !=
97 QLA_SUCCESS) {
98 status = QLA_ERROR;
99 goto exit_send_marker;
100 }
101
102
103 marker_entry->hdr.entryType = ET_MARKER;
104 marker_entry->hdr.entryCount = 1;
105 marker_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
106 marker_entry->modifier = cpu_to_le16(mrkr_mod);
107 int_to_scsilun(lun, &marker_entry->lun);
108 wmb();
109
110
111 ha->isp_ops->queue_iocb(ha);
112
113 exit_send_marker:
114 spin_unlock_irqrestore(&ha->hardware_lock, flags);
115 return status;
116 }
117
118 static struct continuation_t1_entry *
119 qla4xxx_alloc_cont_entry(struct scsi_qla_host *ha)
120 {
121 struct continuation_t1_entry *cont_entry;
122
123 cont_entry = (struct continuation_t1_entry *)ha->request_ptr;
124
125 qla4xxx_advance_req_ring_ptr(ha);
126
127
128 cont_entry->hdr.entryType = ET_CONTINUE;
129 cont_entry->hdr.entryCount = 1;
130 cont_entry->hdr.systemDefined = (uint8_t) cpu_to_le16(ha->request_in);
131
132 return cont_entry;
133 }
134
135 static uint16_t qla4xxx_calc_request_entries(uint16_t dsds)
136 {
137 uint16_t iocbs;
138
139 iocbs = 1;
140 if (dsds > COMMAND_SEG) {
141 iocbs += (dsds - COMMAND_SEG) / CONTINUE_SEG;
142 if ((dsds - COMMAND_SEG) % CONTINUE_SEG)
143 iocbs++;
144 }
145 return iocbs;
146 }
147
148 static void qla4xxx_build_scsi_iocbs(struct srb *srb,
149 struct command_t3_entry *cmd_entry,
150 uint16_t tot_dsds)
151 {
152 struct scsi_qla_host *ha;
153 uint16_t avail_dsds;
154 struct data_seg_a64 *cur_dsd;
155 struct scsi_cmnd *cmd;
156 struct scatterlist *sg;
157 int i;
158
159 cmd = srb->cmd;
160 ha = srb->ha;
161
162 if (!scsi_bufflen(cmd) || cmd->sc_data_direction == DMA_NONE) {
163
164 cmd_entry->ttlByteCnt = __constant_cpu_to_le32(0);
165 return;
166 }
167
168 avail_dsds = COMMAND_SEG;
169 cur_dsd = (struct data_seg_a64 *) & (cmd_entry->dataseg[0]);
170
171 scsi_for_each_sg(cmd, sg, tot_dsds, i) {
172 dma_addr_t sle_dma;
173
174
175 if (avail_dsds == 0) {
176 struct continuation_t1_entry *cont_entry;
177
178 cont_entry = qla4xxx_alloc_cont_entry(ha);
179 cur_dsd =
180 (struct data_seg_a64 *)
181 &cont_entry->dataseg[0];
182 avail_dsds = CONTINUE_SEG;
183 }
184
185 sle_dma = sg_dma_address(sg);
186 cur_dsd->base.addrLow = cpu_to_le32(LSDW(sle_dma));
187 cur_dsd->base.addrHigh = cpu_to_le32(MSDW(sle_dma));
188 cur_dsd->count = cpu_to_le32(sg_dma_len(sg));
189 avail_dsds--;
190
191 cur_dsd++;
192 }
193 }
194
195 void qla4_83xx_queue_iocb(struct scsi_qla_host *ha)
196 {
197 writel(ha->request_in, &ha->qla4_83xx_reg->req_q_in);
198 readl(&ha->qla4_83xx_reg->req_q_in);
199 }
200
201 void qla4_83xx_complete_iocb(struct scsi_qla_host *ha)
202 {
203 writel(ha->response_out, &ha->qla4_83xx_reg->rsp_q_out);
204 readl(&ha->qla4_83xx_reg->rsp_q_out);
205 }
206
207
208
209
210
211
212
213
214 void qla4_82xx_queue_iocb(struct scsi_qla_host *ha)
215 {
216 uint32_t dbval = 0;
217
218 dbval = 0x14 | (ha->func_num << 5);
219 dbval = dbval | (0 << 8) | (ha->request_in << 16);
220
221 qla4_82xx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
222 }
223
224
225
226
227
228
229
230
231
232 void qla4_82xx_complete_iocb(struct scsi_qla_host *ha)
233 {
234 writel(ha->response_out, &ha->qla4_82xx_reg->rsp_q_out);
235 readl(&ha->qla4_82xx_reg->rsp_q_out);
236 }
237
238
239
240
241
242
243
244
245 void qla4xxx_queue_iocb(struct scsi_qla_host *ha)
246 {
247 writel(ha->request_in, &ha->reg->req_q_in);
248 readl(&ha->reg->req_q_in);
249 }
250
251
252
253
254
255
256
257
258
259 void qla4xxx_complete_iocb(struct scsi_qla_host *ha)
260 {
261 writel(ha->response_out, &ha->reg->rsp_q_out);
262 readl(&ha->reg->rsp_q_out);
263 }
264
265
266
267
268
269
270
271
272
273 int qla4xxx_send_command_to_isp(struct scsi_qla_host *ha, struct srb * srb)
274 {
275 struct scsi_cmnd *cmd = srb->cmd;
276 struct ddb_entry *ddb_entry;
277 struct command_t3_entry *cmd_entry;
278 int nseg;
279 uint16_t tot_dsds;
280 uint16_t req_cnt;
281 unsigned long flags;
282 uint32_t index;
283
284
285 ddb_entry = srb->ddb;
286
287 tot_dsds = 0;
288
289
290 spin_lock_irqsave(&ha->hardware_lock, flags);
291
292 index = (uint32_t)cmd->request->tag;
293
294
295
296
297
298
299
300 if (!test_bit(AF_ONLINE, &ha->flags)) {
301 DEBUG2(printk("scsi%ld: %s: Adapter OFFLINE! "
302 "Do not issue command.\n",
303 ha->host_no, __func__));
304 goto queuing_error;
305 }
306
307
308 nseg = scsi_dma_map(cmd);
309 if (nseg < 0)
310 goto queuing_error;
311 tot_dsds = nseg;
312
313 req_cnt = qla4xxx_calc_request_entries(tot_dsds);
314 if (!qla4xxx_space_in_req_ring(ha, req_cnt))
315 goto queuing_error;
316
317
318 if ((ha->iocb_cnt + req_cnt) >= ha->iocb_hiwat)
319 goto queuing_error;
320
321
322 cmd_entry = (struct command_t3_entry *) ha->request_ptr;
323 memset(cmd_entry, 0, sizeof(struct command_t3_entry));
324 cmd_entry->hdr.entryType = ET_COMMAND;
325 cmd_entry->handle = cpu_to_le32(index);
326 cmd_entry->target = cpu_to_le16(ddb_entry->fw_ddb_index);
327
328 int_to_scsilun(cmd->device->lun, &cmd_entry->lun);
329 cmd_entry->ttlByteCnt = cpu_to_le32(scsi_bufflen(cmd));
330 memcpy(cmd_entry->cdb, cmd->cmnd, cmd->cmd_len);
331 cmd_entry->dataSegCnt = cpu_to_le16(tot_dsds);
332 cmd_entry->hdr.entryCount = req_cnt;
333
334
335
336
337
338 cmd_entry->control_flags = CF_NO_DATA;
339 if (scsi_bufflen(cmd)) {
340 if (cmd->sc_data_direction == DMA_TO_DEVICE)
341 cmd_entry->control_flags = CF_WRITE;
342 else if (cmd->sc_data_direction == DMA_FROM_DEVICE)
343 cmd_entry->control_flags = CF_READ;
344
345 ha->bytes_xfered += scsi_bufflen(cmd);
346 if (ha->bytes_xfered & ~0xFFFFF){
347 ha->total_mbytes_xferred += ha->bytes_xfered >> 20;
348 ha->bytes_xfered &= 0xFFFFF;
349 }
350 }
351
352
353 cmd_entry->control_flags |= CF_SIMPLE_TAG;
354
355 qla4xxx_advance_req_ring_ptr(ha);
356 qla4xxx_build_scsi_iocbs(srb, cmd_entry, tot_dsds);
357 wmb();
358
359 srb->cmd->host_scribble = (unsigned char *)(unsigned long)index;
360
361
362 srb->state = SRB_ACTIVE_STATE;
363 srb->flags |= SRB_DMA_VALID;
364
365
366 ha->iocb_cnt += req_cnt;
367 srb->iocb_cnt = req_cnt;
368 ha->req_q_count -= req_cnt;
369
370 ha->isp_ops->queue_iocb(ha);
371 spin_unlock_irqrestore(&ha->hardware_lock, flags);
372
373 return QLA_SUCCESS;
374
375 queuing_error:
376 if (tot_dsds)
377 scsi_dma_unmap(cmd);
378
379 spin_unlock_irqrestore(&ha->hardware_lock, flags);
380
381 return QLA_ERROR;
382 }
383
384 int qla4xxx_send_passthru0(struct iscsi_task *task)
385 {
386 struct passthru0 *passthru_iocb;
387 struct iscsi_session *sess = task->conn->session;
388 struct ddb_entry *ddb_entry = sess->dd_data;
389 struct scsi_qla_host *ha = ddb_entry->ha;
390 struct ql4_task_data *task_data = task->dd_data;
391 uint16_t ctrl_flags = 0;
392 unsigned long flags;
393 int ret = QLA_ERROR;
394
395 spin_lock_irqsave(&ha->hardware_lock, flags);
396 task_data->iocb_req_cnt = 1;
397
398 if (!qla4xxx_space_in_req_ring(ha, task_data->iocb_req_cnt))
399 goto queuing_error;
400
401 passthru_iocb = (struct passthru0 *) ha->request_ptr;
402
403 memset(passthru_iocb, 0, sizeof(struct passthru0));
404 passthru_iocb->hdr.entryType = ET_PASSTHRU0;
405 passthru_iocb->hdr.systemDefined = SD_ISCSI_PDU;
406 passthru_iocb->hdr.entryCount = task_data->iocb_req_cnt;
407 passthru_iocb->handle = task->itt;
408 passthru_iocb->target = cpu_to_le16(ddb_entry->fw_ddb_index);
409 passthru_iocb->timeout = cpu_to_le16(PT_DEFAULT_TIMEOUT);
410
411
412 if (task_data->req_len) {
413 memcpy((uint8_t *)task_data->req_buffer +
414 sizeof(struct iscsi_hdr), task->data, task->data_count);
415 ctrl_flags |= PT_FLAG_SEND_BUFFER;
416 passthru_iocb->out_dsd.base.addrLow =
417 cpu_to_le32(LSDW(task_data->req_dma));
418 passthru_iocb->out_dsd.base.addrHigh =
419 cpu_to_le32(MSDW(task_data->req_dma));
420 passthru_iocb->out_dsd.count =
421 cpu_to_le32(task->data_count +
422 sizeof(struct iscsi_hdr));
423 }
424 if (task_data->resp_len) {
425 passthru_iocb->in_dsd.base.addrLow =
426 cpu_to_le32(LSDW(task_data->resp_dma));
427 passthru_iocb->in_dsd.base.addrHigh =
428 cpu_to_le32(MSDW(task_data->resp_dma));
429 passthru_iocb->in_dsd.count =
430 cpu_to_le32(task_data->resp_len);
431 }
432
433 ctrl_flags |= (PT_FLAG_ISCSI_PDU | PT_FLAG_WAIT_4_RESPONSE);
434 passthru_iocb->control_flags = cpu_to_le16(ctrl_flags);
435
436
437 qla4xxx_advance_req_ring_ptr(ha);
438 wmb();
439
440
441 ha->iocb_cnt += task_data->iocb_req_cnt;
442 ha->req_q_count -= task_data->iocb_req_cnt;
443 ha->isp_ops->queue_iocb(ha);
444 ret = QLA_SUCCESS;
445
446 queuing_error:
447 spin_unlock_irqrestore(&ha->hardware_lock, flags);
448 return ret;
449 }
450
451 static struct mrb *qla4xxx_get_new_mrb(struct scsi_qla_host *ha)
452 {
453 struct mrb *mrb;
454
455 mrb = kzalloc(sizeof(*mrb), GFP_KERNEL);
456 if (!mrb)
457 return mrb;
458
459 mrb->ha = ha;
460 return mrb;
461 }
462
463 static int qla4xxx_send_mbox_iocb(struct scsi_qla_host *ha, struct mrb *mrb,
464 uint32_t *in_mbox)
465 {
466 int rval = QLA_SUCCESS;
467 uint32_t i;
468 unsigned long flags;
469 uint32_t index = 0;
470
471
472 spin_lock_irqsave(&ha->hardware_lock, flags);
473
474
475 rval = qla4xxx_get_req_pkt(ha, (struct queue_entry **) &(mrb->mbox));
476 if (rval != QLA_SUCCESS)
477 goto exit_mbox_iocb;
478
479 index = ha->mrb_index;
480
481 for (i = 0; i < MAX_MRB; i++) {
482 index++;
483 if (index == MAX_MRB)
484 index = 1;
485 if (ha->active_mrb_array[index] == NULL) {
486 ha->mrb_index = index;
487 break;
488 }
489 }
490
491 mrb->iocb_cnt = 1;
492 ha->active_mrb_array[index] = mrb;
493 mrb->mbox->handle = index;
494 mrb->mbox->hdr.entryType = ET_MBOX_CMD;
495 mrb->mbox->hdr.entryCount = mrb->iocb_cnt;
496 memcpy(mrb->mbox->in_mbox, in_mbox, 32);
497 mrb->mbox_cmd = in_mbox[0];
498 wmb();
499
500 ha->iocb_cnt += mrb->iocb_cnt;
501 ha->isp_ops->queue_iocb(ha);
502 exit_mbox_iocb:
503 spin_unlock_irqrestore(&ha->hardware_lock, flags);
504 return rval;
505 }
506
507 int qla4xxx_ping_iocb(struct scsi_qla_host *ha, uint32_t options,
508 uint32_t payload_size, uint32_t pid, uint8_t *ipaddr)
509 {
510 uint32_t in_mbox[8];
511 struct mrb *mrb = NULL;
512 int rval = QLA_SUCCESS;
513
514 memset(in_mbox, 0, sizeof(in_mbox));
515
516 mrb = qla4xxx_get_new_mrb(ha);
517 if (!mrb) {
518 DEBUG2(ql4_printk(KERN_WARNING, ha, "%s: fail to get new mrb\n",
519 __func__));
520 rval = QLA_ERROR;
521 goto exit_ping;
522 }
523
524 in_mbox[0] = MBOX_CMD_PING;
525 in_mbox[1] = options;
526 memcpy(&in_mbox[2], &ipaddr[0], 4);
527 memcpy(&in_mbox[3], &ipaddr[4], 4);
528 memcpy(&in_mbox[4], &ipaddr[8], 4);
529 memcpy(&in_mbox[5], &ipaddr[12], 4);
530 in_mbox[6] = payload_size;
531
532 mrb->pid = pid;
533 rval = qla4xxx_send_mbox_iocb(ha, mrb, in_mbox);
534
535 if (rval != QLA_SUCCESS)
536 goto exit_ping;
537
538 return rval;
539 exit_ping:
540 kfree(mrb);
541 return rval;
542 }