This source file includes following definitions.
- mpt3sas_base_check_cmd_timeout
- _scsih_set_fwfault_debug
- _base_readl_aero
- _base_readl
- _base_clone_reply_to_sys_mem
- _base_clone_mpi_to_sys_mem
- _base_clone_to_sys_mem
- _base_get_chain
- _base_get_chain_phys
- _base_get_buffer_bar0
- _base_get_buffer_phys_bar0
- _base_get_chain_buffer_dma_to_chain_buffer
- _clone_sg_entries
- mpt3sas_remove_dead_ioc_func
- _base_fault_reset_work
- mpt3sas_base_start_watchdog
- mpt3sas_base_stop_watchdog
- mpt3sas_base_fault_info
- mpt3sas_halt_firmware
- _base_sas_ioc_info
- _base_display_event_data
- _base_sas_log_info
- _base_display_reply_info
- mpt3sas_base_done
- _base_async_event
- _get_st_from_smid
- _base_get_cb_idx
- _base_mask_interrupts
- _base_unmask_interrupts
- base_mod64
- _base_process_reply_queue
- _base_interrupt
- _base_irqpoll
- _base_init_irqpolls
- _base_is_controller_msix_enabled
- mpt3sas_base_sync_reply_irqs
- mpt3sas_base_release_callback_handler
- mpt3sas_base_register_callback_handler
- mpt3sas_base_initialize_callback_handler
- _base_build_zero_len_sge
- _base_add_sg_single_32
- _base_add_sg_single_64
- _base_get_chain_buffer_tracker
- _base_build_sg
- _base_build_nvme_prp
- base_make_prp_nvme
- base_is_prp_possible
- _base_check_pcie_native_sgl
- _base_add_sg_single_ieee
- _base_build_zero_len_sge_ieee
- _base_build_sg_scmd
- _base_build_sg_scmd_ieee
- _base_build_sg_ieee
- _base_config_dma_addressing
- _base_change_consistent_dma_mask
- _base_check_enable_msix
- _base_free_irq
- _base_request_irq
- _base_assign_reply_queues
- _base_check_and_enable_high_iops_queues
- _base_disable_msix
- _base_alloc_irq_vectors
- _base_enable_msix
- mpt3sas_base_unmap_resources
- _base_check_for_fault_and_issue_reset
- mpt3sas_base_map_resources
- mpt3sas_base_get_msg_frame
- mpt3sas_base_get_sense_buffer
- mpt3sas_base_get_sense_buffer_dma
- mpt3sas_base_get_pcie_sgl
- mpt3sas_base_get_pcie_sgl_dma
- mpt3sas_base_get_reply_virt_addr
- _base_get_msix_index
- _base_get_high_iops_msix_index
- mpt3sas_base_get_smid
- mpt3sas_base_get_smid_scsiio
- mpt3sas_base_get_smid_hpr
- _base_recovery_check
- mpt3sas_base_clear_st
- mpt3sas_base_free_smid
- _base_mpi_ep_writeq
- _base_writeq
- _base_writeq
- _base_set_and_get_msix_index
- _base_put_smid_mpi_ep_scsi_io
- _base_put_smid_scsi_io
- _base_put_smid_fast_path
- _base_put_smid_hi_priority
- mpt3sas_base_put_smid_nvme_encap
- _base_put_smid_default
- _base_put_smid_scsi_io_atomic
- _base_put_smid_fast_path_atomic
- _base_put_smid_hi_priority_atomic
- _base_put_smid_default_atomic
- _base_display_OEMs_branding
- _base_display_fwpkg_version
- _base_display_ioc_capabilities
- mpt3sas_base_update_missing_delay
- _base_update_ioc_page1_inlinewith_perf_mode
- _base_static_config_pages
- mpt3sas_free_enclosure_list
- _base_release_memory_pools
- is_MSB_are_same
- _base_allocate_memory_pools
- mpt3sas_base_get_iocstate
- _base_wait_on_iocstate
- _base_wait_for_doorbell_int
- _base_spin_on_doorbell_int
- _base_wait_for_doorbell_ack
- _base_wait_for_doorbell_not_used
- _base_send_ioc_reset
- mpt3sas_wait_for_ioc
- _base_handshake_req_reply_wait
- mpt3sas_base_sas_iounit_control
- mpt3sas_base_scsi_enclosure_processor
- _base_get_port_facts
- _base_wait_for_iocstate
- _base_get_ioc_facts
- _base_send_ioc_init
- mpt3sas_port_enable_done
- _base_send_port_enable
- mpt3sas_port_enable
- _base_determine_wait_on_discovery
- _base_unmask_events
- _base_event_notification
- mpt3sas_base_validate_event_type
- _base_diag_reset
- _base_make_ioc_ready
- _base_make_ioc_operational
- mpt3sas_base_free_resources
- mpt3sas_base_attach
- mpt3sas_base_detach
- _base_pre_reset_handler
- _base_after_reset_handler
- _base_reset_done_handler
- mpt3sas_wait_for_commands_to_complete
- _base_check_ioc_facts_changes
- mpt3sas_base_hard_reset_handler
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46 #include <linux/kernel.h>
47 #include <linux/module.h>
48 #include <linux/errno.h>
49 #include <linux/init.h>
50 #include <linux/slab.h>
51 #include <linux/types.h>
52 #include <linux/pci.h>
53 #include <linux/kdev_t.h>
54 #include <linux/blkdev.h>
55 #include <linux/delay.h>
56 #include <linux/interrupt.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/io.h>
59 #include <linux/time.h>
60 #include <linux/ktime.h>
61 #include <linux/kthread.h>
62 #include <asm/page.h>
63 #include <linux/aer.h>
64
65
66 #include "mpt3sas_base.h"
67
68 static MPT_CALLBACK mpt_callbacks[MPT_MAX_CALLBACKS];
69
70
71 #define FAULT_POLLING_INTERVAL 1000
72
73
74 #define MAX_HBA_QUEUE_DEPTH 30000
75 #define MAX_CHAIN_DEPTH 100000
76 static int max_queue_depth = -1;
77 module_param(max_queue_depth, int, 0444);
78 MODULE_PARM_DESC(max_queue_depth, " max controller queue depth ");
79
80 static int max_sgl_entries = -1;
81 module_param(max_sgl_entries, int, 0444);
82 MODULE_PARM_DESC(max_sgl_entries, " max sg entries ");
83
84 static int msix_disable = -1;
85 module_param(msix_disable, int, 0444);
86 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
87
88 static int smp_affinity_enable = 1;
89 module_param(smp_affinity_enable, int, 0444);
90 MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disable Default: enable(1)");
91
92 static int max_msix_vectors = -1;
93 module_param(max_msix_vectors, int, 0444);
94 MODULE_PARM_DESC(max_msix_vectors,
95 " max msix vectors");
96
97 static int irqpoll_weight = -1;
98 module_param(irqpoll_weight, int, 0444);
99 MODULE_PARM_DESC(irqpoll_weight,
100 "irq poll weight (default= one fourth of HBA queue depth)");
101
102 static int mpt3sas_fwfault_debug;
103 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
104 " enable detection of firmware fault and halt firmware - (default=0)");
105
106 static int perf_mode = -1;
107 module_param(perf_mode, int, 0444);
108 MODULE_PARM_DESC(perf_mode,
109 "Performance mode (only for Aero/Sea Generation), options:\n\t\t"
110 "0 - balanced: high iops mode is enabled &\n\t\t"
111 "interrupt coalescing is enabled only on high iops queues,\n\t\t"
112 "1 - iops: high iops mode is disabled &\n\t\t"
113 "interrupt coalescing is enabled on all queues,\n\t\t"
114 "2 - latency: high iops mode is disabled &\n\t\t"
115 "interrupt coalescing is enabled on all queues with timeout value 0xA,\n"
116 "\t\tdefault - default perf_mode is 'balanced'"
117 );
118
119 enum mpt3sas_perf_mode {
120 MPT_PERF_MODE_DEFAULT = -1,
121 MPT_PERF_MODE_BALANCED = 0,
122 MPT_PERF_MODE_IOPS = 1,
123 MPT_PERF_MODE_LATENCY = 2,
124 };
125
126 static int
127 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc);
128
129
130
131
132
133
134
135
136
137
138
139
140
141 u8
142 mpt3sas_base_check_cmd_timeout(struct MPT3SAS_ADAPTER *ioc,
143 u8 status, void *mpi_request, int sz)
144 {
145 u8 issue_reset = 0;
146
147 if (!(status & MPT3_CMD_RESET))
148 issue_reset = 1;
149
150 ioc_err(ioc, "Command %s\n",
151 issue_reset == 0 ? "terminated due to Host Reset" : "Timeout");
152 _debug_dump_mf(mpi_request, sz);
153
154 return issue_reset;
155 }
156
157
158
159
160
161
162
163
164 static int
165 _scsih_set_fwfault_debug(const char *val, const struct kernel_param *kp)
166 {
167 int ret = param_set_int(val, kp);
168 struct MPT3SAS_ADAPTER *ioc;
169
170 if (ret)
171 return ret;
172
173
174 pr_info("setting fwfault_debug(%d)\n", mpt3sas_fwfault_debug);
175 spin_lock(&gioc_lock);
176 list_for_each_entry(ioc, &mpt3sas_ioc_list, list)
177 ioc->fwfault_debug = mpt3sas_fwfault_debug;
178 spin_unlock(&gioc_lock);
179 return 0;
180 }
181 module_param_call(mpt3sas_fwfault_debug, _scsih_set_fwfault_debug,
182 param_get_int, &mpt3sas_fwfault_debug, 0644);
183
184
185
186
187
188
189
190
191 static inline u32
192 _base_readl_aero(const volatile void __iomem *addr)
193 {
194 u32 i = 0, ret_val;
195
196 do {
197 ret_val = readl(addr);
198 i++;
199 } while (ret_val == 0 && i < 3);
200
201 return ret_val;
202 }
203
204 static inline u32
205 _base_readl(const volatile void __iomem *addr)
206 {
207 return readl(addr);
208 }
209
210
211
212
213
214
215
216
217
218 static void
219 _base_clone_reply_to_sys_mem(struct MPT3SAS_ADAPTER *ioc, u32 reply,
220 u32 index)
221 {
222
223
224
225
226
227 u16 cmd_credit = ioc->facts.RequestCredit + 1;
228 void __iomem *reply_free_iomem = (void __iomem *)ioc->chip +
229 MPI_FRAME_START_OFFSET +
230 (cmd_credit * ioc->request_sz) + (index * sizeof(u32));
231
232 writel(reply, reply_free_iomem);
233 }
234
235
236
237
238
239
240
241
242
243 static void
244 _base_clone_mpi_to_sys_mem(void *dst_iomem, void *src, u32 size)
245 {
246 int i;
247 u32 *src_virt_mem = (u32 *)src;
248
249 for (i = 0; i < size/4; i++)
250 writel((u32)src_virt_mem[i],
251 (void __iomem *)dst_iomem + (i * 4));
252 }
253
254
255
256
257
258
259
260
261 static void
262 _base_clone_to_sys_mem(void __iomem *dst_iomem, void *src, u32 size)
263 {
264 int i;
265 u32 *src_virt_mem = (u32 *)(src);
266
267 for (i = 0; i < size/4; i++)
268 writel((u32)src_virt_mem[i],
269 (void __iomem *)dst_iomem + (i * 4));
270 }
271
272
273
274
275
276
277
278
279
280
281
282 static inline void __iomem*
283 _base_get_chain(struct MPT3SAS_ADAPTER *ioc, u16 smid,
284 u8 sge_chain_count)
285 {
286 void __iomem *base_chain, *chain_virt;
287 u16 cmd_credit = ioc->facts.RequestCredit + 1;
288
289 base_chain = (void __iomem *)ioc->chip + MPI_FRAME_START_OFFSET +
290 (cmd_credit * ioc->request_sz) +
291 REPLY_FREE_POOL_SIZE;
292 chain_virt = base_chain + (smid * ioc->facts.MaxChainDepth *
293 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
294 return chain_virt;
295 }
296
297
298
299
300
301
302
303
304
305
306
307
308 static inline phys_addr_t
309 _base_get_chain_phys(struct MPT3SAS_ADAPTER *ioc, u16 smid,
310 u8 sge_chain_count)
311 {
312 phys_addr_t base_chain_phys, chain_phys;
313 u16 cmd_credit = ioc->facts.RequestCredit + 1;
314
315 base_chain_phys = ioc->chip_phys + MPI_FRAME_START_OFFSET +
316 (cmd_credit * ioc->request_sz) +
317 REPLY_FREE_POOL_SIZE;
318 chain_phys = base_chain_phys + (smid * ioc->facts.MaxChainDepth *
319 ioc->request_sz) + (sge_chain_count * ioc->request_sz);
320 return chain_phys;
321 }
322
323
324
325
326
327
328
329
330
331
332
333
334 static void __iomem *
335 _base_get_buffer_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
336 {
337 u16 cmd_credit = ioc->facts.RequestCredit + 1;
338
339 void __iomem *chain_end = _base_get_chain(ioc,
340 cmd_credit + 1,
341 ioc->facts.MaxChainDepth);
342 return chain_end + (smid * 64 * 1024);
343 }
344
345
346
347
348
349
350
351
352
353
354
355 static phys_addr_t
356 _base_get_buffer_phys_bar0(struct MPT3SAS_ADAPTER *ioc, u16 smid)
357 {
358 u16 cmd_credit = ioc->facts.RequestCredit + 1;
359 phys_addr_t chain_end_phys = _base_get_chain_phys(ioc,
360 cmd_credit + 1,
361 ioc->facts.MaxChainDepth);
362 return chain_end_phys + (smid * 64 * 1024);
363 }
364
365
366
367
368
369
370
371
372
373
374
375
376 static void *
377 _base_get_chain_buffer_dma_to_chain_buffer(struct MPT3SAS_ADAPTER *ioc,
378 dma_addr_t chain_buffer_dma)
379 {
380 u16 index, j;
381 struct chain_tracker *ct;
382
383 for (index = 0; index < ioc->scsiio_depth; index++) {
384 for (j = 0; j < ioc->chains_needed_per_io; j++) {
385 ct = &ioc->chain_lookup[index].chains_per_smid[j];
386 if (ct && ct->chain_buffer_dma == chain_buffer_dma)
387 return ct->chain_buffer;
388 }
389 }
390 ioc_info(ioc, "Provided chain_buffer_dma address is not in the lookup list\n");
391 return NULL;
392 }
393
394
395
396
397
398
399
400
401
402
403
404 static void _clone_sg_entries(struct MPT3SAS_ADAPTER *ioc,
405 void *mpi_request, u16 smid)
406 {
407 Mpi2SGESimple32_t *sgel, *sgel_next;
408 u32 sgl_flags, sge_chain_count = 0;
409 bool is_write = 0;
410 u16 i = 0;
411 void __iomem *buffer_iomem;
412 phys_addr_t buffer_iomem_phys;
413 void __iomem *buff_ptr;
414 phys_addr_t buff_ptr_phys;
415 void __iomem *dst_chain_addr[MCPU_MAX_CHAINS_PER_IO];
416 void *src_chain_addr[MCPU_MAX_CHAINS_PER_IO];
417 phys_addr_t dst_addr_phys;
418 MPI2RequestHeader_t *request_hdr;
419 struct scsi_cmnd *scmd;
420 struct scatterlist *sg_scmd = NULL;
421 int is_scsiio_req = 0;
422
423 request_hdr = (MPI2RequestHeader_t *) mpi_request;
424
425 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST) {
426 Mpi25SCSIIORequest_t *scsiio_request =
427 (Mpi25SCSIIORequest_t *)mpi_request;
428 sgel = (Mpi2SGESimple32_t *) &scsiio_request->SGL;
429 is_scsiio_req = 1;
430 } else if (request_hdr->Function == MPI2_FUNCTION_CONFIG) {
431 Mpi2ConfigRequest_t *config_req =
432 (Mpi2ConfigRequest_t *)mpi_request;
433 sgel = (Mpi2SGESimple32_t *) &config_req->PageBufferSGE;
434 } else
435 return;
436
437
438
439
440
441
442 if (is_scsiio_req) {
443
444 scmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
445 if (scmd == NULL) {
446 ioc_err(ioc, "scmd is NULL\n");
447 return;
448 }
449
450
451 sg_scmd = scsi_sglist(scmd);
452 }
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469 buffer_iomem = _base_get_buffer_bar0(ioc, smid);
470 buffer_iomem_phys = _base_get_buffer_phys_bar0(ioc, smid);
471
472 buff_ptr = buffer_iomem;
473 buff_ptr_phys = buffer_iomem_phys;
474 WARN_ON(buff_ptr_phys > U32_MAX);
475
476 if (le32_to_cpu(sgel->FlagsLength) &
477 (MPI2_SGE_FLAGS_HOST_TO_IOC << MPI2_SGE_FLAGS_SHIFT))
478 is_write = 1;
479
480 for (i = 0; i < MPT_MIN_PHYS_SEGMENTS + ioc->facts.MaxChainDepth; i++) {
481
482 sgl_flags =
483 (le32_to_cpu(sgel->FlagsLength) >> MPI2_SGE_FLAGS_SHIFT);
484
485 switch (sgl_flags & MPI2_SGE_FLAGS_ELEMENT_MASK) {
486 case MPI2_SGE_FLAGS_CHAIN_ELEMENT:
487
488
489
490
491
492 sgel_next =
493 _base_get_chain_buffer_dma_to_chain_buffer(ioc,
494 le32_to_cpu(sgel->Address));
495 if (sgel_next == NULL)
496 return;
497
498
499
500
501 dst_chain_addr[sge_chain_count] =
502 _base_get_chain(ioc,
503 smid, sge_chain_count);
504 src_chain_addr[sge_chain_count] =
505 (void *) sgel_next;
506 dst_addr_phys = _base_get_chain_phys(ioc,
507 smid, sge_chain_count);
508 WARN_ON(dst_addr_phys > U32_MAX);
509 sgel->Address =
510 cpu_to_le32(lower_32_bits(dst_addr_phys));
511 sgel = sgel_next;
512 sge_chain_count++;
513 break;
514 case MPI2_SGE_FLAGS_SIMPLE_ELEMENT:
515 if (is_write) {
516 if (is_scsiio_req) {
517 _base_clone_to_sys_mem(buff_ptr,
518 sg_virt(sg_scmd),
519 (le32_to_cpu(sgel->FlagsLength) &
520 0x00ffffff));
521
522
523
524
525 sgel->Address =
526 cpu_to_le32((u32)buff_ptr_phys);
527 } else {
528 _base_clone_to_sys_mem(buff_ptr,
529 ioc->config_vaddr,
530 (le32_to_cpu(sgel->FlagsLength) &
531 0x00ffffff));
532 sgel->Address =
533 cpu_to_le32((u32)buff_ptr_phys);
534 }
535 }
536 buff_ptr += (le32_to_cpu(sgel->FlagsLength) &
537 0x00ffffff);
538 buff_ptr_phys += (le32_to_cpu(sgel->FlagsLength) &
539 0x00ffffff);
540 if ((le32_to_cpu(sgel->FlagsLength) &
541 (MPI2_SGE_FLAGS_END_OF_BUFFER
542 << MPI2_SGE_FLAGS_SHIFT)))
543 goto eob_clone_chain;
544 else {
545
546
547
548
549
550
551 if (is_scsiio_req) {
552 sg_scmd = sg_next(sg_scmd);
553 if (sg_scmd)
554 sgel++;
555 else
556 goto eob_clone_chain;
557 }
558 }
559 break;
560 }
561 }
562
563 eob_clone_chain:
564 for (i = 0; i < sge_chain_count; i++) {
565 if (is_scsiio_req)
566 _base_clone_to_sys_mem(dst_chain_addr[i],
567 src_chain_addr[i], ioc->request_sz);
568 }
569 }
570
571
572
573
574
575
576
577
578
579 static int mpt3sas_remove_dead_ioc_func(void *arg)
580 {
581 struct MPT3SAS_ADAPTER *ioc = (struct MPT3SAS_ADAPTER *)arg;
582 struct pci_dev *pdev;
583
584 if (!ioc)
585 return -1;
586
587 pdev = ioc->pdev;
588 if (!pdev)
589 return -1;
590 pci_stop_and_remove_bus_device_locked(pdev);
591 return 0;
592 }
593
594
595
596
597
598
599
600 static void
601 _base_fault_reset_work(struct work_struct *work)
602 {
603 struct MPT3SAS_ADAPTER *ioc =
604 container_of(work, struct MPT3SAS_ADAPTER, fault_reset_work.work);
605 unsigned long flags;
606 u32 doorbell;
607 int rc;
608 struct task_struct *p;
609
610
611 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
612 if (ioc->shost_recovery || ioc->pci_error_recovery)
613 goto rearm_timer;
614 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
615
616 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
617 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_MASK) {
618 ioc_err(ioc, "SAS host is non-operational !!!!\n");
619
620
621
622
623
624
625
626
627
628 if (ioc->non_operational_loop++ < 5) {
629 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock,
630 flags);
631 goto rearm_timer;
632 }
633
634
635
636
637
638
639
640
641 ioc->schedule_dead_ioc_flush_running_cmds(ioc);
642
643
644
645
646 ioc->remove_host = 1;
647
648 p = kthread_run(mpt3sas_remove_dead_ioc_func, ioc,
649 "%s_dead_ioc_%d", ioc->driver_name, ioc->id);
650 if (IS_ERR(p))
651 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread failed !!!!\n",
652 __func__);
653 else
654 ioc_err(ioc, "%s: Running mpt3sas_dead_ioc thread success !!!!\n",
655 __func__);
656 return;
657 }
658
659 ioc->non_operational_loop = 0;
660
661 if ((doorbell & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL) {
662 rc = mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
663 ioc_warn(ioc, "%s: hard reset: %s\n",
664 __func__, rc == 0 ? "success" : "failed");
665 doorbell = mpt3sas_base_get_iocstate(ioc, 0);
666 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
667 mpt3sas_base_fault_info(ioc, doorbell &
668 MPI2_DOORBELL_DATA_MASK);
669 if (rc && (doorbell & MPI2_IOC_STATE_MASK) !=
670 MPI2_IOC_STATE_OPERATIONAL)
671 return;
672 }
673
674 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
675 rearm_timer:
676 if (ioc->fault_reset_work_q)
677 queue_delayed_work(ioc->fault_reset_work_q,
678 &ioc->fault_reset_work,
679 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
680 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
681 }
682
683
684
685
686
687
688
689 void
690 mpt3sas_base_start_watchdog(struct MPT3SAS_ADAPTER *ioc)
691 {
692 unsigned long flags;
693
694 if (ioc->fault_reset_work_q)
695 return;
696
697
698
699 INIT_DELAYED_WORK(&ioc->fault_reset_work, _base_fault_reset_work);
700 snprintf(ioc->fault_reset_work_q_name,
701 sizeof(ioc->fault_reset_work_q_name), "poll_%s%d_status",
702 ioc->driver_name, ioc->id);
703 ioc->fault_reset_work_q =
704 create_singlethread_workqueue(ioc->fault_reset_work_q_name);
705 if (!ioc->fault_reset_work_q) {
706 ioc_err(ioc, "%s: failed (line=%d)\n", __func__, __LINE__);
707 return;
708 }
709 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
710 if (ioc->fault_reset_work_q)
711 queue_delayed_work(ioc->fault_reset_work_q,
712 &ioc->fault_reset_work,
713 msecs_to_jiffies(FAULT_POLLING_INTERVAL));
714 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
715 }
716
717
718
719
720
721
722
723 void
724 mpt3sas_base_stop_watchdog(struct MPT3SAS_ADAPTER *ioc)
725 {
726 unsigned long flags;
727 struct workqueue_struct *wq;
728
729 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
730 wq = ioc->fault_reset_work_q;
731 ioc->fault_reset_work_q = NULL;
732 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
733 if (wq) {
734 if (!cancel_delayed_work_sync(&ioc->fault_reset_work))
735 flush_workqueue(wq);
736 destroy_workqueue(wq);
737 }
738 }
739
740
741
742
743
744
745 void
746 mpt3sas_base_fault_info(struct MPT3SAS_ADAPTER *ioc , u16 fault_code)
747 {
748 ioc_err(ioc, "fault_state(0x%04x)!\n", fault_code);
749 }
750
751
752
753
754
755
756
757
758
759
760 void
761 mpt3sas_halt_firmware(struct MPT3SAS_ADAPTER *ioc)
762 {
763 u32 doorbell;
764
765 if (!ioc->fwfault_debug)
766 return;
767
768 dump_stack();
769
770 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
771 if ((doorbell & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
772 mpt3sas_base_fault_info(ioc , doorbell);
773 else {
774 writel(0xC0FFEE00, &ioc->chip->Doorbell);
775 ioc_err(ioc, "Firmware is halted due to command timeout\n");
776 }
777
778 if (ioc->fwfault_debug == 2)
779 for (;;)
780 ;
781 else
782 panic("panic in %s\n", __func__);
783 }
784
785
786
787
788
789
790
791 static void
792 _base_sas_ioc_info(struct MPT3SAS_ADAPTER *ioc, MPI2DefaultReply_t *mpi_reply,
793 MPI2RequestHeader_t *request_hdr)
794 {
795 u16 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) &
796 MPI2_IOCSTATUS_MASK;
797 char *desc = NULL;
798 u16 frame_sz;
799 char *func_str = NULL;
800
801
802 if (request_hdr->Function == MPI2_FUNCTION_SCSI_IO_REQUEST ||
803 request_hdr->Function == MPI2_FUNCTION_RAID_SCSI_IO_PASSTHROUGH ||
804 request_hdr->Function == MPI2_FUNCTION_EVENT_NOTIFICATION)
805 return;
806
807 if (ioc_status == MPI2_IOCSTATUS_CONFIG_INVALID_PAGE)
808 return;
809
810 switch (ioc_status) {
811
812
813
814
815
816 case MPI2_IOCSTATUS_INVALID_FUNCTION:
817 desc = "invalid function";
818 break;
819 case MPI2_IOCSTATUS_BUSY:
820 desc = "busy";
821 break;
822 case MPI2_IOCSTATUS_INVALID_SGL:
823 desc = "invalid sgl";
824 break;
825 case MPI2_IOCSTATUS_INTERNAL_ERROR:
826 desc = "internal error";
827 break;
828 case MPI2_IOCSTATUS_INVALID_VPID:
829 desc = "invalid vpid";
830 break;
831 case MPI2_IOCSTATUS_INSUFFICIENT_RESOURCES:
832 desc = "insufficient resources";
833 break;
834 case MPI2_IOCSTATUS_INSUFFICIENT_POWER:
835 desc = "insufficient power";
836 break;
837 case MPI2_IOCSTATUS_INVALID_FIELD:
838 desc = "invalid field";
839 break;
840 case MPI2_IOCSTATUS_INVALID_STATE:
841 desc = "invalid state";
842 break;
843 case MPI2_IOCSTATUS_OP_STATE_NOT_SUPPORTED:
844 desc = "op state not supported";
845 break;
846
847
848
849
850
851 case MPI2_IOCSTATUS_CONFIG_INVALID_ACTION:
852 desc = "config invalid action";
853 break;
854 case MPI2_IOCSTATUS_CONFIG_INVALID_TYPE:
855 desc = "config invalid type";
856 break;
857 case MPI2_IOCSTATUS_CONFIG_INVALID_PAGE:
858 desc = "config invalid page";
859 break;
860 case MPI2_IOCSTATUS_CONFIG_INVALID_DATA:
861 desc = "config invalid data";
862 break;
863 case MPI2_IOCSTATUS_CONFIG_NO_DEFAULTS:
864 desc = "config no defaults";
865 break;
866 case MPI2_IOCSTATUS_CONFIG_CANT_COMMIT:
867 desc = "config cant commit";
868 break;
869
870
871
872
873
874 case MPI2_IOCSTATUS_SCSI_RECOVERED_ERROR:
875 case MPI2_IOCSTATUS_SCSI_INVALID_DEVHANDLE:
876 case MPI2_IOCSTATUS_SCSI_DEVICE_NOT_THERE:
877 case MPI2_IOCSTATUS_SCSI_DATA_OVERRUN:
878 case MPI2_IOCSTATUS_SCSI_DATA_UNDERRUN:
879 case MPI2_IOCSTATUS_SCSI_IO_DATA_ERROR:
880 case MPI2_IOCSTATUS_SCSI_PROTOCOL_ERROR:
881 case MPI2_IOCSTATUS_SCSI_TASK_TERMINATED:
882 case MPI2_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:
883 case MPI2_IOCSTATUS_SCSI_TASK_MGMT_FAILED:
884 case MPI2_IOCSTATUS_SCSI_IOC_TERMINATED:
885 case MPI2_IOCSTATUS_SCSI_EXT_TERMINATED:
886 break;
887
888
889
890
891
892 case MPI2_IOCSTATUS_EEDP_GUARD_ERROR:
893 desc = "eedp guard error";
894 break;
895 case MPI2_IOCSTATUS_EEDP_REF_TAG_ERROR:
896 desc = "eedp ref tag error";
897 break;
898 case MPI2_IOCSTATUS_EEDP_APP_TAG_ERROR:
899 desc = "eedp app tag error";
900 break;
901
902
903
904
905
906 case MPI2_IOCSTATUS_TARGET_INVALID_IO_INDEX:
907 desc = "target invalid io index";
908 break;
909 case MPI2_IOCSTATUS_TARGET_ABORTED:
910 desc = "target aborted";
911 break;
912 case MPI2_IOCSTATUS_TARGET_NO_CONN_RETRYABLE:
913 desc = "target no conn retryable";
914 break;
915 case MPI2_IOCSTATUS_TARGET_NO_CONNECTION:
916 desc = "target no connection";
917 break;
918 case MPI2_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH:
919 desc = "target xfer count mismatch";
920 break;
921 case MPI2_IOCSTATUS_TARGET_DATA_OFFSET_ERROR:
922 desc = "target data offset error";
923 break;
924 case MPI2_IOCSTATUS_TARGET_TOO_MUCH_WRITE_DATA:
925 desc = "target too much write data";
926 break;
927 case MPI2_IOCSTATUS_TARGET_IU_TOO_SHORT:
928 desc = "target iu too short";
929 break;
930 case MPI2_IOCSTATUS_TARGET_ACK_NAK_TIMEOUT:
931 desc = "target ack nak timeout";
932 break;
933 case MPI2_IOCSTATUS_TARGET_NAK_RECEIVED:
934 desc = "target nak received";
935 break;
936
937
938
939
940
941 case MPI2_IOCSTATUS_SAS_SMP_REQUEST_FAILED:
942 desc = "smp request failed";
943 break;
944 case MPI2_IOCSTATUS_SAS_SMP_DATA_OVERRUN:
945 desc = "smp data overrun";
946 break;
947
948
949
950
951
952 case MPI2_IOCSTATUS_DIAGNOSTIC_RELEASED:
953 desc = "diagnostic released";
954 break;
955 default:
956 break;
957 }
958
959 if (!desc)
960 return;
961
962 switch (request_hdr->Function) {
963 case MPI2_FUNCTION_CONFIG:
964 frame_sz = sizeof(Mpi2ConfigRequest_t) + ioc->sge_size;
965 func_str = "config_page";
966 break;
967 case MPI2_FUNCTION_SCSI_TASK_MGMT:
968 frame_sz = sizeof(Mpi2SCSITaskManagementRequest_t);
969 func_str = "task_mgmt";
970 break;
971 case MPI2_FUNCTION_SAS_IO_UNIT_CONTROL:
972 frame_sz = sizeof(Mpi2SasIoUnitControlRequest_t);
973 func_str = "sas_iounit_ctl";
974 break;
975 case MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
976 frame_sz = sizeof(Mpi2SepRequest_t);
977 func_str = "enclosure";
978 break;
979 case MPI2_FUNCTION_IOC_INIT:
980 frame_sz = sizeof(Mpi2IOCInitRequest_t);
981 func_str = "ioc_init";
982 break;
983 case MPI2_FUNCTION_PORT_ENABLE:
984 frame_sz = sizeof(Mpi2PortEnableRequest_t);
985 func_str = "port_enable";
986 break;
987 case MPI2_FUNCTION_SMP_PASSTHROUGH:
988 frame_sz = sizeof(Mpi2SmpPassthroughRequest_t) + ioc->sge_size;
989 func_str = "smp_passthru";
990 break;
991 case MPI2_FUNCTION_NVME_ENCAPSULATED:
992 frame_sz = sizeof(Mpi26NVMeEncapsulatedRequest_t) +
993 ioc->sge_size;
994 func_str = "nvme_encapsulated";
995 break;
996 default:
997 frame_sz = 32;
998 func_str = "unknown";
999 break;
1000 }
1001
1002 ioc_warn(ioc, "ioc_status: %s(0x%04x), request(0x%p),(%s)\n",
1003 desc, ioc_status, request_hdr, func_str);
1004
1005 _debug_dump_mf(request_hdr, frame_sz/4);
1006 }
1007
1008
1009
1010
1011
1012
1013 static void
1014 _base_display_event_data(struct MPT3SAS_ADAPTER *ioc,
1015 Mpi2EventNotificationReply_t *mpi_reply)
1016 {
1017 char *desc = NULL;
1018 u16 event;
1019
1020 if (!(ioc->logging_level & MPT_DEBUG_EVENTS))
1021 return;
1022
1023 event = le16_to_cpu(mpi_reply->Event);
1024
1025 switch (event) {
1026 case MPI2_EVENT_LOG_DATA:
1027 desc = "Log Data";
1028 break;
1029 case MPI2_EVENT_STATE_CHANGE:
1030 desc = "Status Change";
1031 break;
1032 case MPI2_EVENT_HARD_RESET_RECEIVED:
1033 desc = "Hard Reset Received";
1034 break;
1035 case MPI2_EVENT_EVENT_CHANGE:
1036 desc = "Event Change";
1037 break;
1038 case MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE:
1039 desc = "Device Status Change";
1040 break;
1041 case MPI2_EVENT_IR_OPERATION_STATUS:
1042 if (!ioc->hide_ir_msg)
1043 desc = "IR Operation Status";
1044 break;
1045 case MPI2_EVENT_SAS_DISCOVERY:
1046 {
1047 Mpi2EventDataSasDiscovery_t *event_data =
1048 (Mpi2EventDataSasDiscovery_t *)mpi_reply->EventData;
1049 ioc_info(ioc, "Discovery: (%s)",
1050 event_data->ReasonCode == MPI2_EVENT_SAS_DISC_RC_STARTED ?
1051 "start" : "stop");
1052 if (event_data->DiscoveryStatus)
1053 pr_cont(" discovery_status(0x%08x)",
1054 le32_to_cpu(event_data->DiscoveryStatus));
1055 pr_cont("\n");
1056 return;
1057 }
1058 case MPI2_EVENT_SAS_BROADCAST_PRIMITIVE:
1059 desc = "SAS Broadcast Primitive";
1060 break;
1061 case MPI2_EVENT_SAS_INIT_DEVICE_STATUS_CHANGE:
1062 desc = "SAS Init Device Status Change";
1063 break;
1064 case MPI2_EVENT_SAS_INIT_TABLE_OVERFLOW:
1065 desc = "SAS Init Table Overflow";
1066 break;
1067 case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
1068 desc = "SAS Topology Change List";
1069 break;
1070 case MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE:
1071 desc = "SAS Enclosure Device Status Change";
1072 break;
1073 case MPI2_EVENT_IR_VOLUME:
1074 if (!ioc->hide_ir_msg)
1075 desc = "IR Volume";
1076 break;
1077 case MPI2_EVENT_IR_PHYSICAL_DISK:
1078 if (!ioc->hide_ir_msg)
1079 desc = "IR Physical Disk";
1080 break;
1081 case MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST:
1082 if (!ioc->hide_ir_msg)
1083 desc = "IR Configuration Change List";
1084 break;
1085 case MPI2_EVENT_LOG_ENTRY_ADDED:
1086 if (!ioc->hide_ir_msg)
1087 desc = "Log Entry Added";
1088 break;
1089 case MPI2_EVENT_TEMP_THRESHOLD:
1090 desc = "Temperature Threshold";
1091 break;
1092 case MPI2_EVENT_ACTIVE_CABLE_EXCEPTION:
1093 desc = "Cable Event";
1094 break;
1095 case MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR:
1096 desc = "SAS Device Discovery Error";
1097 break;
1098 case MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE:
1099 desc = "PCIE Device Status Change";
1100 break;
1101 case MPI2_EVENT_PCIE_ENUMERATION:
1102 {
1103 Mpi26EventDataPCIeEnumeration_t *event_data =
1104 (Mpi26EventDataPCIeEnumeration_t *)mpi_reply->EventData;
1105 ioc_info(ioc, "PCIE Enumeration: (%s)",
1106 event_data->ReasonCode == MPI26_EVENT_PCIE_ENUM_RC_STARTED ?
1107 "start" : "stop");
1108 if (event_data->EnumerationStatus)
1109 pr_cont("enumeration_status(0x%08x)",
1110 le32_to_cpu(event_data->EnumerationStatus));
1111 pr_cont("\n");
1112 return;
1113 }
1114 case MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST:
1115 desc = "PCIE Topology Change List";
1116 break;
1117 }
1118
1119 if (!desc)
1120 return;
1121
1122 ioc_info(ioc, "%s\n", desc);
1123 }
1124
1125
1126
1127
1128
1129
1130 static void
1131 _base_sas_log_info(struct MPT3SAS_ADAPTER *ioc , u32 log_info)
1132 {
1133 union loginfo_type {
1134 u32 loginfo;
1135 struct {
1136 u32 subcode:16;
1137 u32 code:8;
1138 u32 originator:4;
1139 u32 bus_type:4;
1140 } dw;
1141 };
1142 union loginfo_type sas_loginfo;
1143 char *originator_str = NULL;
1144
1145 sas_loginfo.loginfo = log_info;
1146 if (sas_loginfo.dw.bus_type != 3 )
1147 return;
1148
1149
1150 if (log_info == 0x31170000)
1151 return;
1152
1153
1154 if (ioc->ignore_loginfos && (log_info == 0x30050000 || log_info ==
1155 0x31140000 || log_info == 0x31130000))
1156 return;
1157
1158 switch (sas_loginfo.dw.originator) {
1159 case 0:
1160 originator_str = "IOP";
1161 break;
1162 case 1:
1163 originator_str = "PL";
1164 break;
1165 case 2:
1166 if (!ioc->hide_ir_msg)
1167 originator_str = "IR";
1168 else
1169 originator_str = "WarpDrive";
1170 break;
1171 }
1172
1173 ioc_warn(ioc, "log_info(0x%08x): originator(%s), code(0x%02x), sub_code(0x%04x)\n",
1174 log_info,
1175 originator_str, sas_loginfo.dw.code, sas_loginfo.dw.subcode);
1176 }
1177
1178
1179
1180
1181
1182
1183
1184
1185 static void
1186 _base_display_reply_info(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1187 u32 reply)
1188 {
1189 MPI2DefaultReply_t *mpi_reply;
1190 u16 ioc_status;
1191 u32 loginfo = 0;
1192
1193 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1194 if (unlikely(!mpi_reply)) {
1195 ioc_err(ioc, "mpi_reply not valid at %s:%d/%s()!\n",
1196 __FILE__, __LINE__, __func__);
1197 return;
1198 }
1199 ioc_status = le16_to_cpu(mpi_reply->IOCStatus);
1200
1201 if ((ioc_status & MPI2_IOCSTATUS_MASK) &&
1202 (ioc->logging_level & MPT_DEBUG_REPLY)) {
1203 _base_sas_ioc_info(ioc , mpi_reply,
1204 mpt3sas_base_get_msg_frame(ioc, smid));
1205 }
1206
1207 if (ioc_status & MPI2_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
1208 loginfo = le32_to_cpu(mpi_reply->IOCLogInfo);
1209 _base_sas_log_info(ioc, loginfo);
1210 }
1211
1212 if (ioc_status || loginfo) {
1213 ioc_status &= MPI2_IOCSTATUS_MASK;
1214 mpt3sas_trigger_mpi(ioc, ioc_status, loginfo);
1215 }
1216 }
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229 u8
1230 mpt3sas_base_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
1231 u32 reply)
1232 {
1233 MPI2DefaultReply_t *mpi_reply;
1234
1235 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1236 if (mpi_reply && mpi_reply->Function == MPI2_FUNCTION_EVENT_ACK)
1237 return mpt3sas_check_for_pending_internal_cmds(ioc, smid);
1238
1239 if (ioc->base_cmds.status == MPT3_CMD_NOT_USED)
1240 return 1;
1241
1242 ioc->base_cmds.status |= MPT3_CMD_COMPLETE;
1243 if (mpi_reply) {
1244 ioc->base_cmds.status |= MPT3_CMD_REPLY_VALID;
1245 memcpy(ioc->base_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
1246 }
1247 ioc->base_cmds.status &= ~MPT3_CMD_PENDING;
1248
1249 complete(&ioc->base_cmds.done);
1250 return 1;
1251 }
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263 static u8
1264 _base_async_event(struct MPT3SAS_ADAPTER *ioc, u8 msix_index, u32 reply)
1265 {
1266 Mpi2EventNotificationReply_t *mpi_reply;
1267 Mpi2EventAckRequest_t *ack_request;
1268 u16 smid;
1269 struct _event_ack_list *delayed_event_ack;
1270
1271 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
1272 if (!mpi_reply)
1273 return 1;
1274 if (mpi_reply->Function != MPI2_FUNCTION_EVENT_NOTIFICATION)
1275 return 1;
1276
1277 _base_display_event_data(ioc, mpi_reply);
1278
1279 if (!(mpi_reply->AckRequired & MPI2_EVENT_NOTIFICATION_ACK_REQUIRED))
1280 goto out;
1281 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
1282 if (!smid) {
1283 delayed_event_ack = kzalloc(sizeof(*delayed_event_ack),
1284 GFP_ATOMIC);
1285 if (!delayed_event_ack)
1286 goto out;
1287 INIT_LIST_HEAD(&delayed_event_ack->list);
1288 delayed_event_ack->Event = mpi_reply->Event;
1289 delayed_event_ack->EventContext = mpi_reply->EventContext;
1290 list_add_tail(&delayed_event_ack->list,
1291 &ioc->delayed_event_ack_list);
1292 dewtprintk(ioc,
1293 ioc_info(ioc, "DELAYED: EVENT ACK: event (0x%04x)\n",
1294 le16_to_cpu(mpi_reply->Event)));
1295 goto out;
1296 }
1297
1298 ack_request = mpt3sas_base_get_msg_frame(ioc, smid);
1299 memset(ack_request, 0, sizeof(Mpi2EventAckRequest_t));
1300 ack_request->Function = MPI2_FUNCTION_EVENT_ACK;
1301 ack_request->Event = mpi_reply->Event;
1302 ack_request->EventContext = mpi_reply->EventContext;
1303 ack_request->VF_ID = 0;
1304 ack_request->VP_ID = 0;
1305 ioc->put_smid_default(ioc, smid);
1306
1307 out:
1308
1309
1310 mpt3sas_scsih_event_callback(ioc, msix_index, reply);
1311
1312
1313 mpt3sas_ctl_event_callback(ioc, msix_index, reply);
1314
1315 return 1;
1316 }
1317
1318 static struct scsiio_tracker *
1319 _get_st_from_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1320 {
1321 struct scsi_cmnd *cmd;
1322
1323 if (WARN_ON(!smid) ||
1324 WARN_ON(smid >= ioc->hi_priority_smid))
1325 return NULL;
1326
1327 cmd = mpt3sas_scsih_scsi_lookup_get(ioc, smid);
1328 if (cmd)
1329 return scsi_cmd_priv(cmd);
1330
1331 return NULL;
1332 }
1333
1334
1335
1336
1337
1338
1339
1340
1341 static u8
1342 _base_get_cb_idx(struct MPT3SAS_ADAPTER *ioc, u16 smid)
1343 {
1344 int i;
1345 u16 ctl_smid = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT + 1;
1346 u8 cb_idx = 0xFF;
1347
1348 if (smid < ioc->hi_priority_smid) {
1349 struct scsiio_tracker *st;
1350
1351 if (smid < ctl_smid) {
1352 st = _get_st_from_smid(ioc, smid);
1353 if (st)
1354 cb_idx = st->cb_idx;
1355 } else if (smid == ctl_smid)
1356 cb_idx = ioc->ctl_cb_idx;
1357 } else if (smid < ioc->internal_smid) {
1358 i = smid - ioc->hi_priority_smid;
1359 cb_idx = ioc->hpr_lookup[i].cb_idx;
1360 } else if (smid <= ioc->hba_queue_depth) {
1361 i = smid - ioc->internal_smid;
1362 cb_idx = ioc->internal_lookup[i].cb_idx;
1363 }
1364 return cb_idx;
1365 }
1366
1367
1368
1369
1370
1371
1372
1373 static void
1374 _base_mask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1375 {
1376 u32 him_register;
1377
1378 ioc->mask_interrupts = 1;
1379 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1380 him_register |= MPI2_HIM_DIM + MPI2_HIM_RIM + MPI2_HIM_RESET_IRQ_MASK;
1381 writel(him_register, &ioc->chip->HostInterruptMask);
1382 ioc->base_readl(&ioc->chip->HostInterruptMask);
1383 }
1384
1385
1386
1387
1388
1389
1390
1391 static void
1392 _base_unmask_interrupts(struct MPT3SAS_ADAPTER *ioc)
1393 {
1394 u32 him_register;
1395
1396 him_register = ioc->base_readl(&ioc->chip->HostInterruptMask);
1397 him_register &= ~MPI2_HIM_RIM;
1398 writel(him_register, &ioc->chip->HostInterruptMask);
1399 ioc->mask_interrupts = 0;
1400 }
1401
1402 union reply_descriptor {
1403 u64 word;
1404 struct {
1405 u32 low;
1406 u32 high;
1407 } u;
1408 };
1409
1410 static u32 base_mod64(u64 dividend, u32 divisor)
1411 {
1412 u32 remainder;
1413
1414 if (!divisor)
1415 pr_err("mpt3sas: DIVISOR is zero, in div fn\n");
1416 remainder = do_div(dividend, divisor);
1417 return remainder;
1418 }
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428 static int
1429 _base_process_reply_queue(struct adapter_reply_queue *reply_q)
1430 {
1431 union reply_descriptor rd;
1432 u64 completed_cmds;
1433 u8 request_descript_type;
1434 u16 smid;
1435 u8 cb_idx;
1436 u32 reply;
1437 u8 msix_index = reply_q->msix_index;
1438 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1439 Mpi2ReplyDescriptorsUnion_t *rpf;
1440 u8 rc;
1441
1442 completed_cmds = 0;
1443 if (!atomic_add_unless(&reply_q->busy, 1, 1))
1444 return completed_cmds;
1445
1446 rpf = &reply_q->reply_post_free[reply_q->reply_post_host_index];
1447 request_descript_type = rpf->Default.ReplyFlags
1448 & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1449 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED) {
1450 atomic_dec(&reply_q->busy);
1451 return completed_cmds;
1452 }
1453
1454 cb_idx = 0xFF;
1455 do {
1456 rd.word = le64_to_cpu(rpf->Words);
1457 if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
1458 goto out;
1459 reply = 0;
1460 smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
1461 if (request_descript_type ==
1462 MPI25_RPY_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO_SUCCESS ||
1463 request_descript_type ==
1464 MPI2_RPY_DESCRIPT_FLAGS_SCSI_IO_SUCCESS ||
1465 request_descript_type ==
1466 MPI26_RPY_DESCRIPT_FLAGS_PCIE_ENCAPSULATED_SUCCESS) {
1467 cb_idx = _base_get_cb_idx(ioc, smid);
1468 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1469 (likely(mpt_callbacks[cb_idx] != NULL))) {
1470 rc = mpt_callbacks[cb_idx](ioc, smid,
1471 msix_index, 0);
1472 if (rc)
1473 mpt3sas_base_free_smid(ioc, smid);
1474 }
1475 } else if (request_descript_type ==
1476 MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
1477 reply = le32_to_cpu(
1478 rpf->AddressReply.ReplyFrameAddress);
1479 if (reply > ioc->reply_dma_max_address ||
1480 reply < ioc->reply_dma_min_address)
1481 reply = 0;
1482 if (smid) {
1483 cb_idx = _base_get_cb_idx(ioc, smid);
1484 if ((likely(cb_idx < MPT_MAX_CALLBACKS)) &&
1485 (likely(mpt_callbacks[cb_idx] != NULL))) {
1486 rc = mpt_callbacks[cb_idx](ioc, smid,
1487 msix_index, reply);
1488 if (reply)
1489 _base_display_reply_info(ioc,
1490 smid, msix_index, reply);
1491 if (rc)
1492 mpt3sas_base_free_smid(ioc,
1493 smid);
1494 }
1495 } else {
1496 _base_async_event(ioc, msix_index, reply);
1497 }
1498
1499
1500 if (reply) {
1501 ioc->reply_free_host_index =
1502 (ioc->reply_free_host_index ==
1503 (ioc->reply_free_queue_depth - 1)) ?
1504 0 : ioc->reply_free_host_index + 1;
1505 ioc->reply_free[ioc->reply_free_host_index] =
1506 cpu_to_le32(reply);
1507 if (ioc->is_mcpu_endpoint)
1508 _base_clone_reply_to_sys_mem(ioc,
1509 reply,
1510 ioc->reply_free_host_index);
1511 writel(ioc->reply_free_host_index,
1512 &ioc->chip->ReplyFreeHostIndex);
1513 }
1514 }
1515
1516 rpf->Words = cpu_to_le64(ULLONG_MAX);
1517 reply_q->reply_post_host_index =
1518 (reply_q->reply_post_host_index ==
1519 (ioc->reply_post_queue_depth - 1)) ? 0 :
1520 reply_q->reply_post_host_index + 1;
1521 request_descript_type =
1522 reply_q->reply_post_free[reply_q->reply_post_host_index].
1523 Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
1524 completed_cmds++;
1525
1526
1527
1528
1529
1530 if (!base_mod64(completed_cmds, ioc->thresh_hold)) {
1531 if (ioc->combined_reply_queue) {
1532 writel(reply_q->reply_post_host_index |
1533 ((msix_index & 7) <<
1534 MPI2_RPHI_MSIX_INDEX_SHIFT),
1535 ioc->replyPostRegisterIndex[msix_index/8]);
1536 } else {
1537 writel(reply_q->reply_post_host_index |
1538 (msix_index <<
1539 MPI2_RPHI_MSIX_INDEX_SHIFT),
1540 &ioc->chip->ReplyPostHostIndex);
1541 }
1542 if (!reply_q->irq_poll_scheduled) {
1543 reply_q->irq_poll_scheduled = true;
1544 irq_poll_sched(&reply_q->irqpoll);
1545 }
1546 atomic_dec(&reply_q->busy);
1547 return completed_cmds;
1548 }
1549 if (request_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
1550 goto out;
1551 if (!reply_q->reply_post_host_index)
1552 rpf = reply_q->reply_post_free;
1553 else
1554 rpf++;
1555 } while (1);
1556
1557 out:
1558
1559 if (!completed_cmds) {
1560 atomic_dec(&reply_q->busy);
1561 return completed_cmds;
1562 }
1563
1564 if (ioc->is_warpdrive) {
1565 writel(reply_q->reply_post_host_index,
1566 ioc->reply_post_host_index[msix_index]);
1567 atomic_dec(&reply_q->busy);
1568 return completed_cmds;
1569 }
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586 if (ioc->combined_reply_queue)
1587 writel(reply_q->reply_post_host_index | ((msix_index & 7) <<
1588 MPI2_RPHI_MSIX_INDEX_SHIFT),
1589 ioc->replyPostRegisterIndex[msix_index/8]);
1590 else
1591 writel(reply_q->reply_post_host_index | (msix_index <<
1592 MPI2_RPHI_MSIX_INDEX_SHIFT),
1593 &ioc->chip->ReplyPostHostIndex);
1594 atomic_dec(&reply_q->busy);
1595 return completed_cmds;
1596 }
1597
1598
1599
1600
1601
1602
1603
1604
1605 static irqreturn_t
1606 _base_interrupt(int irq, void *bus_id)
1607 {
1608 struct adapter_reply_queue *reply_q = bus_id;
1609 struct MPT3SAS_ADAPTER *ioc = reply_q->ioc;
1610
1611 if (ioc->mask_interrupts)
1612 return IRQ_NONE;
1613 if (reply_q->irq_poll_scheduled)
1614 return IRQ_HANDLED;
1615 return ((_base_process_reply_queue(reply_q) > 0) ?
1616 IRQ_HANDLED : IRQ_NONE);
1617 }
1618
1619
1620
1621
1622
1623
1624
1625
1626 static int
1627 _base_irqpoll(struct irq_poll *irqpoll, int budget)
1628 {
1629 struct adapter_reply_queue *reply_q;
1630 int num_entries = 0;
1631
1632 reply_q = container_of(irqpoll, struct adapter_reply_queue,
1633 irqpoll);
1634 if (reply_q->irq_line_enable) {
1635 disable_irq(reply_q->os_irq);
1636 reply_q->irq_line_enable = false;
1637 }
1638 num_entries = _base_process_reply_queue(reply_q);
1639 if (num_entries < budget) {
1640 irq_poll_complete(irqpoll);
1641 reply_q->irq_poll_scheduled = false;
1642 reply_q->irq_line_enable = true;
1643 enable_irq(reply_q->os_irq);
1644 }
1645
1646 return num_entries;
1647 }
1648
1649
1650
1651
1652
1653
1654
1655 static void
1656 _base_init_irqpolls(struct MPT3SAS_ADAPTER *ioc)
1657 {
1658 struct adapter_reply_queue *reply_q, *next;
1659
1660 if (list_empty(&ioc->reply_queue_list))
1661 return;
1662
1663 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
1664 irq_poll_init(&reply_q->irqpoll,
1665 ioc->hba_queue_depth/4, _base_irqpoll);
1666 reply_q->irq_poll_scheduled = false;
1667 reply_q->irq_line_enable = true;
1668 reply_q->os_irq = pci_irq_vector(ioc->pdev,
1669 reply_q->msix_index);
1670 }
1671 }
1672
1673
1674
1675
1676
1677
1678
1679 static inline int
1680 _base_is_controller_msix_enabled(struct MPT3SAS_ADAPTER *ioc)
1681 {
1682 return (ioc->facts.IOCCapabilities &
1683 MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX) && ioc->msix_enable;
1684 }
1685
1686
1687
1688
1689
1690
1691
1692
1693 void
1694 mpt3sas_base_sync_reply_irqs(struct MPT3SAS_ADAPTER *ioc)
1695 {
1696 struct adapter_reply_queue *reply_q;
1697
1698
1699
1700
1701 if (!_base_is_controller_msix_enabled(ioc))
1702 return;
1703
1704 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
1705 if (ioc->shost_recovery || ioc->remove_host ||
1706 ioc->pci_error_recovery)
1707 return;
1708
1709 if (reply_q->msix_index == 0)
1710 continue;
1711 if (reply_q->irq_poll_scheduled) {
1712
1713
1714
1715 irq_poll_disable(&reply_q->irqpoll);
1716 irq_poll_enable(&reply_q->irqpoll);
1717 reply_q->irq_poll_scheduled = false;
1718 reply_q->irq_line_enable = true;
1719 enable_irq(reply_q->os_irq);
1720 continue;
1721 }
1722 synchronize_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index));
1723 }
1724 }
1725
1726
1727
1728
1729
1730 void
1731 mpt3sas_base_release_callback_handler(u8 cb_idx)
1732 {
1733 mpt_callbacks[cb_idx] = NULL;
1734 }
1735
1736
1737
1738
1739
1740
1741
1742 u8
1743 mpt3sas_base_register_callback_handler(MPT_CALLBACK cb_func)
1744 {
1745 u8 cb_idx;
1746
1747 for (cb_idx = MPT_MAX_CALLBACKS-1; cb_idx; cb_idx--)
1748 if (mpt_callbacks[cb_idx] == NULL)
1749 break;
1750
1751 mpt_callbacks[cb_idx] = cb_func;
1752 return cb_idx;
1753 }
1754
1755
1756
1757
1758 void
1759 mpt3sas_base_initialize_callback_handler(void)
1760 {
1761 u8 cb_idx;
1762
1763 for (cb_idx = 0; cb_idx < MPT_MAX_CALLBACKS; cb_idx++)
1764 mpt3sas_base_release_callback_handler(cb_idx);
1765 }
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777 static void
1778 _base_build_zero_len_sge(struct MPT3SAS_ADAPTER *ioc, void *paddr)
1779 {
1780 u32 flags_length = (u32)((MPI2_SGE_FLAGS_LAST_ELEMENT |
1781 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST |
1782 MPI2_SGE_FLAGS_SIMPLE_ELEMENT) <<
1783 MPI2_SGE_FLAGS_SHIFT);
1784 ioc->base_add_sg_single(paddr, flags_length, -1);
1785 }
1786
1787
1788
1789
1790
1791
1792
1793 static void
1794 _base_add_sg_single_32(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1795 {
1796 Mpi2SGESimple32_t *sgel = paddr;
1797
1798 flags_length |= (MPI2_SGE_FLAGS_32_BIT_ADDRESSING |
1799 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1800 sgel->FlagsLength = cpu_to_le32(flags_length);
1801 sgel->Address = cpu_to_le32(dma_addr);
1802 }
1803
1804
1805
1806
1807
1808
1809
1810
1811 static void
1812 _base_add_sg_single_64(void *paddr, u32 flags_length, dma_addr_t dma_addr)
1813 {
1814 Mpi2SGESimple64_t *sgel = paddr;
1815
1816 flags_length |= (MPI2_SGE_FLAGS_64_BIT_ADDRESSING |
1817 MPI2_SGE_FLAGS_SYSTEM_ADDRESS) << MPI2_SGE_FLAGS_SHIFT;
1818 sgel->FlagsLength = cpu_to_le32(flags_length);
1819 sgel->Address = cpu_to_le64(dma_addr);
1820 }
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830 static struct chain_tracker *
1831 _base_get_chain_buffer_tracker(struct MPT3SAS_ADAPTER *ioc,
1832 struct scsi_cmnd *scmd)
1833 {
1834 struct chain_tracker *chain_req;
1835 struct scsiio_tracker *st = scsi_cmd_priv(scmd);
1836 u16 smid = st->smid;
1837 u8 chain_offset =
1838 atomic_read(&ioc->chain_lookup[smid - 1].chain_offset);
1839
1840 if (chain_offset == ioc->chains_needed_per_io)
1841 return NULL;
1842
1843 chain_req = &ioc->chain_lookup[smid - 1].chains_per_smid[chain_offset];
1844 atomic_inc(&ioc->chain_lookup[smid - 1].chain_offset);
1845 return chain_req;
1846 }
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858 static void
1859 _base_build_sg(struct MPT3SAS_ADAPTER *ioc, void *psge,
1860 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1861 size_t data_in_sz)
1862 {
1863 u32 sgl_flags;
1864
1865 if (!data_out_sz && !data_in_sz) {
1866 _base_build_zero_len_sge(ioc, psge);
1867 return;
1868 }
1869
1870 if (data_out_sz && data_in_sz) {
1871
1872 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1873 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
1874 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1875 ioc->base_add_sg_single(psge, sgl_flags |
1876 data_out_sz, data_out_dma);
1877
1878
1879 psge += ioc->sge_size;
1880
1881
1882 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1883 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1884 MPI2_SGE_FLAGS_END_OF_LIST);
1885 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1886 ioc->base_add_sg_single(psge, sgl_flags |
1887 data_in_sz, data_in_dma);
1888 } else if (data_out_sz) {
1889 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1890 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1891 MPI2_SGE_FLAGS_END_OF_LIST | MPI2_SGE_FLAGS_HOST_TO_IOC);
1892 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1893 ioc->base_add_sg_single(psge, sgl_flags |
1894 data_out_sz, data_out_dma);
1895 } else if (data_in_sz) {
1896 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
1897 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
1898 MPI2_SGE_FLAGS_END_OF_LIST);
1899 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
1900 ioc->base_add_sg_single(psge, sgl_flags |
1901 data_in_sz, data_in_dma);
1902 }
1903 }
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961 static void
1962 _base_build_nvme_prp(struct MPT3SAS_ADAPTER *ioc, u16 smid,
1963 Mpi26NVMeEncapsulatedRequest_t *nvme_encap_request,
1964 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
1965 size_t data_in_sz)
1966 {
1967 int prp_size = NVME_PRP_SIZE;
1968 __le64 *prp_entry, *prp1_entry, *prp2_entry;
1969 __le64 *prp_page;
1970 dma_addr_t prp_entry_dma, prp_page_dma, dma_addr;
1971 u32 offset, entry_len;
1972 u32 page_mask_result, page_mask;
1973 size_t length;
1974 struct mpt3sas_nvme_cmd *nvme_cmd =
1975 (void *)nvme_encap_request->NVMe_Command;
1976
1977
1978
1979
1980
1981 if (!data_in_sz && !data_out_sz)
1982 return;
1983 prp1_entry = &nvme_cmd->prp1;
1984 prp2_entry = &nvme_cmd->prp2;
1985 prp_entry = prp1_entry;
1986
1987
1988
1989
1990 prp_page = (__le64 *)mpt3sas_base_get_pcie_sgl(ioc, smid);
1991 prp_page_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
1992
1993
1994
1995
1996
1997 page_mask = ioc->page_size - 1;
1998 page_mask_result = (uintptr_t)((u8 *)prp_page + prp_size) & page_mask;
1999 if (!page_mask_result) {
2000
2001 prp_page = (__le64 *)((u8 *)prp_page + prp_size);
2002 prp_page_dma = prp_page_dma + prp_size;
2003 }
2004
2005
2006
2007
2008
2009 prp_entry_dma = prp_page_dma;
2010
2011
2012 if (data_in_sz) {
2013 dma_addr = data_in_dma;
2014 length = data_in_sz;
2015 } else {
2016 dma_addr = data_out_dma;
2017 length = data_out_sz;
2018 }
2019
2020
2021 while (length) {
2022
2023
2024
2025
2026 page_mask_result = (prp_entry_dma + prp_size) & page_mask;
2027 if (!page_mask_result) {
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040 prp_entry_dma++;
2041 *prp_entry = cpu_to_le64(prp_entry_dma);
2042 prp_entry++;
2043 }
2044
2045
2046 offset = dma_addr & page_mask;
2047 entry_len = ioc->page_size - offset;
2048
2049 if (prp_entry == prp1_entry) {
2050
2051
2052
2053
2054 *prp1_entry = cpu_to_le64(dma_addr);
2055
2056
2057
2058
2059
2060 prp_entry = prp2_entry;
2061 } else if (prp_entry == prp2_entry) {
2062
2063
2064
2065
2066
2067 if (length > ioc->page_size) {
2068
2069
2070
2071
2072
2073
2074 *prp2_entry = cpu_to_le64(prp_entry_dma);
2075
2076
2077
2078
2079
2080 prp_entry = prp_page;
2081 } else {
2082
2083
2084
2085
2086 *prp2_entry = cpu_to_le64(dma_addr);
2087 }
2088 } else {
2089
2090
2091
2092
2093
2094
2095
2096 *prp_entry = cpu_to_le64(dma_addr);
2097 prp_entry++;
2098 prp_entry_dma++;
2099 }
2100
2101
2102
2103
2104
2105 dma_addr += entry_len;
2106
2107
2108 if (entry_len > length)
2109 length = 0;
2110 else
2111 length -= entry_len;
2112 }
2113 }
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128 static void
2129 base_make_prp_nvme(struct MPT3SAS_ADAPTER *ioc,
2130 struct scsi_cmnd *scmd,
2131 Mpi25SCSIIORequest_t *mpi_request,
2132 u16 smid, int sge_count)
2133 {
2134 int sge_len, num_prp_in_chain = 0;
2135 Mpi25IeeeSgeChain64_t *main_chain_element, *ptr_first_sgl;
2136 __le64 *curr_buff;
2137 dma_addr_t msg_dma, sge_addr, offset;
2138 u32 page_mask, page_mask_result;
2139 struct scatterlist *sg_scmd;
2140 u32 first_prp_len;
2141 int data_len = scsi_bufflen(scmd);
2142 u32 nvme_pg_size;
2143
2144 nvme_pg_size = max_t(u32, ioc->page_size, NVME_PRP_PAGE_SIZE);
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157 page_mask = nvme_pg_size - 1;
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169 main_chain_element = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2170
2171
2172
2173
2174 main_chain_element = (Mpi25IeeeSgeChain64_t *)
2175 ((u8 *)main_chain_element + sizeof(MPI25_IEEE_SGE_CHAIN64));
2176
2177
2178
2179
2180
2181
2182
2183 curr_buff = mpt3sas_base_get_pcie_sgl(ioc, smid);
2184 msg_dma = mpt3sas_base_get_pcie_sgl_dma(ioc, smid);
2185
2186 main_chain_element->Address = cpu_to_le64(msg_dma);
2187 main_chain_element->NextChainOffset = 0;
2188 main_chain_element->Flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2189 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2190 MPI26_IEEE_SGE_FLAGS_NSF_NVME_PRP;
2191
2192
2193 ptr_first_sgl = (pMpi25IeeeSgeChain64_t)&mpi_request->SGL;
2194 sg_scmd = scsi_sglist(scmd);
2195 sge_addr = sg_dma_address(sg_scmd);
2196 sge_len = sg_dma_len(sg_scmd);
2197
2198 offset = sge_addr & page_mask;
2199 first_prp_len = nvme_pg_size - offset;
2200
2201 ptr_first_sgl->Address = cpu_to_le64(sge_addr);
2202 ptr_first_sgl->Length = cpu_to_le32(first_prp_len);
2203
2204 data_len -= first_prp_len;
2205
2206 if (sge_len > first_prp_len) {
2207 sge_addr += first_prp_len;
2208 sge_len -= first_prp_len;
2209 } else if (data_len && (sge_len == first_prp_len)) {
2210 sg_scmd = sg_next(sg_scmd);
2211 sge_addr = sg_dma_address(sg_scmd);
2212 sge_len = sg_dma_len(sg_scmd);
2213 }
2214
2215 for (;;) {
2216 offset = sge_addr & page_mask;
2217
2218
2219 page_mask_result = (uintptr_t)(curr_buff + 1) & page_mask;
2220 if (unlikely(!page_mask_result)) {
2221 scmd_printk(KERN_NOTICE,
2222 scmd, "page boundary curr_buff: 0x%p\n",
2223 curr_buff);
2224 msg_dma += 8;
2225 *curr_buff = cpu_to_le64(msg_dma);
2226 curr_buff++;
2227 num_prp_in_chain++;
2228 }
2229
2230 *curr_buff = cpu_to_le64(sge_addr);
2231 curr_buff++;
2232 msg_dma += 8;
2233 num_prp_in_chain++;
2234
2235 sge_addr += nvme_pg_size;
2236 sge_len -= nvme_pg_size;
2237 data_len -= nvme_pg_size;
2238
2239 if (data_len <= 0)
2240 break;
2241
2242 if (sge_len > 0)
2243 continue;
2244
2245 sg_scmd = sg_next(sg_scmd);
2246 sge_addr = sg_dma_address(sg_scmd);
2247 sge_len = sg_dma_len(sg_scmd);
2248 }
2249
2250 main_chain_element->Length =
2251 cpu_to_le32(num_prp_in_chain * sizeof(u64));
2252 return;
2253 }
2254
2255 static bool
2256 base_is_prp_possible(struct MPT3SAS_ADAPTER *ioc,
2257 struct _pcie_device *pcie_device, struct scsi_cmnd *scmd, int sge_count)
2258 {
2259 u32 data_length = 0;
2260 bool build_prp = true;
2261
2262 data_length = scsi_bufflen(scmd);
2263 if (pcie_device &&
2264 (mpt3sas_scsih_is_pcie_scsi_device(pcie_device->device_info))) {
2265 build_prp = false;
2266 return build_prp;
2267 }
2268
2269
2270
2271
2272 if ((data_length <= NVME_PRP_PAGE_SIZE*4) && (sge_count <= 2))
2273 build_prp = false;
2274
2275 return build_prp;
2276 }
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293 static int
2294 _base_check_pcie_native_sgl(struct MPT3SAS_ADAPTER *ioc,
2295 Mpi25SCSIIORequest_t *mpi_request, u16 smid, struct scsi_cmnd *scmd,
2296 struct _pcie_device *pcie_device)
2297 {
2298 int sges_left;
2299
2300
2301 sges_left = scsi_dma_map(scmd);
2302 if (sges_left < 0) {
2303 sdev_printk(KERN_ERR, scmd->device,
2304 "scsi_dma_map failed: request for %d bytes!\n",
2305 scsi_bufflen(scmd));
2306 return 1;
2307 }
2308
2309
2310 if (base_is_prp_possible(ioc, pcie_device,
2311 scmd, sges_left) == 0) {
2312
2313 goto out;
2314 }
2315
2316
2317
2318
2319 base_make_prp_nvme(ioc, scmd, mpi_request,
2320 smid, sges_left);
2321
2322 return 0;
2323 out:
2324 scsi_dma_unmap(scmd);
2325 return 1;
2326 }
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336 static void
2337 _base_add_sg_single_ieee(void *paddr, u8 flags, u8 chain_offset, u32 length,
2338 dma_addr_t dma_addr)
2339 {
2340 Mpi25IeeeSgeChain64_t *sgel = paddr;
2341
2342 sgel->Flags = flags;
2343 sgel->NextChainOffset = chain_offset;
2344 sgel->Length = cpu_to_le32(length);
2345 sgel->Address = cpu_to_le64(dma_addr);
2346 }
2347
2348
2349
2350
2351
2352
2353
2354
2355
2356
2357 static void
2358 _base_build_zero_len_sge_ieee(struct MPT3SAS_ADAPTER *ioc, void *paddr)
2359 {
2360 u8 sgl_flags = (MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2361 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR |
2362 MPI25_IEEE_SGE_FLAGS_END_OF_LIST);
2363
2364 _base_add_sg_single_ieee(paddr, sgl_flags, 0, 0, -1);
2365 }
2366
2367
2368
2369
2370
2371
2372
2373
2374
2375
2376
2377
2378
2379
2380
2381 static int
2382 _base_build_sg_scmd(struct MPT3SAS_ADAPTER *ioc,
2383 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *unused)
2384 {
2385 Mpi2SCSIIORequest_t *mpi_request;
2386 dma_addr_t chain_dma;
2387 struct scatterlist *sg_scmd;
2388 void *sg_local, *chain;
2389 u32 chain_offset;
2390 u32 chain_length;
2391 u32 chain_flags;
2392 int sges_left;
2393 u32 sges_in_segment;
2394 u32 sgl_flags;
2395 u32 sgl_flags_last_element;
2396 u32 sgl_flags_end_buffer;
2397 struct chain_tracker *chain_req;
2398
2399 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2400
2401
2402 sgl_flags = MPI2_SGE_FLAGS_SIMPLE_ELEMENT;
2403 if (scmd->sc_data_direction == DMA_TO_DEVICE)
2404 sgl_flags |= MPI2_SGE_FLAGS_HOST_TO_IOC;
2405 sgl_flags_last_element = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT)
2406 << MPI2_SGE_FLAGS_SHIFT;
2407 sgl_flags_end_buffer = (sgl_flags | MPI2_SGE_FLAGS_LAST_ELEMENT |
2408 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_END_OF_LIST)
2409 << MPI2_SGE_FLAGS_SHIFT;
2410 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2411
2412 sg_scmd = scsi_sglist(scmd);
2413 sges_left = scsi_dma_map(scmd);
2414 if (sges_left < 0) {
2415 sdev_printk(KERN_ERR, scmd->device,
2416 "scsi_dma_map failed: request for %d bytes!\n",
2417 scsi_bufflen(scmd));
2418 return -ENOMEM;
2419 }
2420
2421 sg_local = &mpi_request->SGL;
2422 sges_in_segment = ioc->max_sges_in_main_message;
2423 if (sges_left <= sges_in_segment)
2424 goto fill_in_last_segment;
2425
2426 mpi_request->ChainOffset = (offsetof(Mpi2SCSIIORequest_t, SGL) +
2427 (sges_in_segment * ioc->sge_size))/4;
2428
2429
2430 while (sges_in_segment) {
2431 if (sges_in_segment == 1)
2432 ioc->base_add_sg_single(sg_local,
2433 sgl_flags_last_element | sg_dma_len(sg_scmd),
2434 sg_dma_address(sg_scmd));
2435 else
2436 ioc->base_add_sg_single(sg_local, sgl_flags |
2437 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2438 sg_scmd = sg_next(sg_scmd);
2439 sg_local += ioc->sge_size;
2440 sges_left--;
2441 sges_in_segment--;
2442 }
2443
2444
2445 chain_flags = MPI2_SGE_FLAGS_CHAIN_ELEMENT << MPI2_SGE_FLAGS_SHIFT;
2446 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2447 if (!chain_req)
2448 return -1;
2449 chain = chain_req->chain_buffer;
2450 chain_dma = chain_req->chain_buffer_dma;
2451 do {
2452 sges_in_segment = (sges_left <=
2453 ioc->max_sges_in_chain_message) ? sges_left :
2454 ioc->max_sges_in_chain_message;
2455 chain_offset = (sges_left == sges_in_segment) ?
2456 0 : (sges_in_segment * ioc->sge_size)/4;
2457 chain_length = sges_in_segment * ioc->sge_size;
2458 if (chain_offset) {
2459 chain_offset = chain_offset <<
2460 MPI2_SGE_CHAIN_OFFSET_SHIFT;
2461 chain_length += ioc->sge_size;
2462 }
2463 ioc->base_add_sg_single(sg_local, chain_flags | chain_offset |
2464 chain_length, chain_dma);
2465 sg_local = chain;
2466 if (!chain_offset)
2467 goto fill_in_last_segment;
2468
2469
2470 while (sges_in_segment) {
2471 if (sges_in_segment == 1)
2472 ioc->base_add_sg_single(sg_local,
2473 sgl_flags_last_element |
2474 sg_dma_len(sg_scmd),
2475 sg_dma_address(sg_scmd));
2476 else
2477 ioc->base_add_sg_single(sg_local, sgl_flags |
2478 sg_dma_len(sg_scmd),
2479 sg_dma_address(sg_scmd));
2480 sg_scmd = sg_next(sg_scmd);
2481 sg_local += ioc->sge_size;
2482 sges_left--;
2483 sges_in_segment--;
2484 }
2485
2486 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2487 if (!chain_req)
2488 return -1;
2489 chain = chain_req->chain_buffer;
2490 chain_dma = chain_req->chain_buffer_dma;
2491 } while (1);
2492
2493
2494 fill_in_last_segment:
2495
2496
2497 while (sges_left) {
2498 if (sges_left == 1)
2499 ioc->base_add_sg_single(sg_local, sgl_flags_end_buffer |
2500 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2501 else
2502 ioc->base_add_sg_single(sg_local, sgl_flags |
2503 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2504 sg_scmd = sg_next(sg_scmd);
2505 sg_local += ioc->sge_size;
2506 sges_left--;
2507 }
2508
2509 return 0;
2510 }
2511
2512
2513
2514
2515
2516
2517
2518
2519
2520
2521
2522
2523
2524
2525
2526 static int
2527 _base_build_sg_scmd_ieee(struct MPT3SAS_ADAPTER *ioc,
2528 struct scsi_cmnd *scmd, u16 smid, struct _pcie_device *pcie_device)
2529 {
2530 Mpi25SCSIIORequest_t *mpi_request;
2531 dma_addr_t chain_dma;
2532 struct scatterlist *sg_scmd;
2533 void *sg_local, *chain;
2534 u32 chain_offset;
2535 u32 chain_length;
2536 int sges_left;
2537 u32 sges_in_segment;
2538 u8 simple_sgl_flags;
2539 u8 simple_sgl_flags_last;
2540 u8 chain_sgl_flags;
2541 struct chain_tracker *chain_req;
2542
2543 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
2544
2545
2546 simple_sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2547 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2548 simple_sgl_flags_last = simple_sgl_flags |
2549 MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2550 chain_sgl_flags = MPI2_IEEE_SGE_FLAGS_CHAIN_ELEMENT |
2551 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2552
2553
2554 if ((pcie_device) && (_base_check_pcie_native_sgl(ioc, mpi_request,
2555 smid, scmd, pcie_device) == 0)) {
2556
2557 return 0;
2558 }
2559
2560 sg_scmd = scsi_sglist(scmd);
2561 sges_left = scsi_dma_map(scmd);
2562 if (sges_left < 0) {
2563 sdev_printk(KERN_ERR, scmd->device,
2564 "scsi_dma_map failed: request for %d bytes!\n",
2565 scsi_bufflen(scmd));
2566 return -ENOMEM;
2567 }
2568
2569 sg_local = &mpi_request->SGL;
2570 sges_in_segment = (ioc->request_sz -
2571 offsetof(Mpi25SCSIIORequest_t, SGL))/ioc->sge_size_ieee;
2572 if (sges_left <= sges_in_segment)
2573 goto fill_in_last_segment;
2574
2575 mpi_request->ChainOffset = (sges_in_segment - 1 ) +
2576 (offsetof(Mpi25SCSIIORequest_t, SGL)/ioc->sge_size_ieee);
2577
2578
2579 while (sges_in_segment > 1) {
2580 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2581 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2582 sg_scmd = sg_next(sg_scmd);
2583 sg_local += ioc->sge_size_ieee;
2584 sges_left--;
2585 sges_in_segment--;
2586 }
2587
2588
2589 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2590 if (!chain_req)
2591 return -1;
2592 chain = chain_req->chain_buffer;
2593 chain_dma = chain_req->chain_buffer_dma;
2594 do {
2595 sges_in_segment = (sges_left <=
2596 ioc->max_sges_in_chain_message) ? sges_left :
2597 ioc->max_sges_in_chain_message;
2598 chain_offset = (sges_left == sges_in_segment) ?
2599 0 : sges_in_segment;
2600 chain_length = sges_in_segment * ioc->sge_size_ieee;
2601 if (chain_offset)
2602 chain_length += ioc->sge_size_ieee;
2603 _base_add_sg_single_ieee(sg_local, chain_sgl_flags,
2604 chain_offset, chain_length, chain_dma);
2605
2606 sg_local = chain;
2607 if (!chain_offset)
2608 goto fill_in_last_segment;
2609
2610
2611 while (sges_in_segment) {
2612 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2613 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2614 sg_scmd = sg_next(sg_scmd);
2615 sg_local += ioc->sge_size_ieee;
2616 sges_left--;
2617 sges_in_segment--;
2618 }
2619
2620 chain_req = _base_get_chain_buffer_tracker(ioc, scmd);
2621 if (!chain_req)
2622 return -1;
2623 chain = chain_req->chain_buffer;
2624 chain_dma = chain_req->chain_buffer_dma;
2625 } while (1);
2626
2627
2628 fill_in_last_segment:
2629
2630
2631 while (sges_left > 0) {
2632 if (sges_left == 1)
2633 _base_add_sg_single_ieee(sg_local,
2634 simple_sgl_flags_last, 0, sg_dma_len(sg_scmd),
2635 sg_dma_address(sg_scmd));
2636 else
2637 _base_add_sg_single_ieee(sg_local, simple_sgl_flags, 0,
2638 sg_dma_len(sg_scmd), sg_dma_address(sg_scmd));
2639 sg_scmd = sg_next(sg_scmd);
2640 sg_local += ioc->sge_size_ieee;
2641 sges_left--;
2642 }
2643
2644 return 0;
2645 }
2646
2647
2648
2649
2650
2651
2652
2653
2654
2655
2656 static void
2657 _base_build_sg_ieee(struct MPT3SAS_ADAPTER *ioc, void *psge,
2658 dma_addr_t data_out_dma, size_t data_out_sz, dma_addr_t data_in_dma,
2659 size_t data_in_sz)
2660 {
2661 u8 sgl_flags;
2662
2663 if (!data_out_sz && !data_in_sz) {
2664 _base_build_zero_len_sge_ieee(ioc, psge);
2665 return;
2666 }
2667
2668 if (data_out_sz && data_in_sz) {
2669
2670 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2671 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2672 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2673 data_out_dma);
2674
2675
2676 psge += ioc->sge_size_ieee;
2677
2678
2679 sgl_flags |= MPI25_IEEE_SGE_FLAGS_END_OF_LIST;
2680 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2681 data_in_dma);
2682 } else if (data_out_sz) {
2683 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2684 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2685 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2686 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_out_sz,
2687 data_out_dma);
2688 } else if (data_in_sz) {
2689 sgl_flags = MPI2_IEEE_SGE_FLAGS_SIMPLE_ELEMENT |
2690 MPI25_IEEE_SGE_FLAGS_END_OF_LIST |
2691 MPI2_IEEE_SGE_FLAGS_SYSTEM_ADDR;
2692 _base_add_sg_single_ieee(psge, sgl_flags, 0, data_in_sz,
2693 data_in_dma);
2694 }
2695 }
2696
2697 #define convert_to_kb(x) ((x) << (PAGE_SHIFT - 10))
2698
2699
2700
2701
2702
2703
2704
2705
2706 static int
2707 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
2708 {
2709 u64 required_mask, coherent_mask;
2710 struct sysinfo s;
2711
2712 int dma_mask = (ioc->hba_mpi_version_belonged > MPI2_VERSION) ? 63 : 64;
2713
2714 if (ioc->is_mcpu_endpoint)
2715 goto try_32bit;
2716
2717 required_mask = dma_get_required_mask(&pdev->dev);
2718 if (sizeof(dma_addr_t) == 4 || required_mask == 32)
2719 goto try_32bit;
2720
2721 if (ioc->dma_mask)
2722 coherent_mask = DMA_BIT_MASK(dma_mask);
2723 else
2724 coherent_mask = DMA_BIT_MASK(32);
2725
2726 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(dma_mask)) ||
2727 dma_set_coherent_mask(&pdev->dev, coherent_mask))
2728 goto try_32bit;
2729
2730 ioc->base_add_sg_single = &_base_add_sg_single_64;
2731 ioc->sge_size = sizeof(Mpi2SGESimple64_t);
2732 ioc->dma_mask = dma_mask;
2733 goto out;
2734
2735 try_32bit:
2736 if (dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
2737 return -ENODEV;
2738
2739 ioc->base_add_sg_single = &_base_add_sg_single_32;
2740 ioc->sge_size = sizeof(Mpi2SGESimple32_t);
2741 ioc->dma_mask = 32;
2742 out:
2743 si_meminfo(&s);
2744 ioc_info(ioc, "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
2745 ioc->dma_mask, convert_to_kb(s.totalram));
2746
2747 return 0;
2748 }
2749
2750 static int
2751 _base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
2752 struct pci_dev *pdev)
2753 {
2754 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ioc->dma_mask))) {
2755 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
2756 return -ENODEV;
2757 }
2758 return 0;
2759 }
2760
2761
2762
2763
2764
2765
2766
2767
2768 static int
2769 _base_check_enable_msix(struct MPT3SAS_ADAPTER *ioc)
2770 {
2771 int base;
2772 u16 message_control;
2773
2774
2775
2776
2777 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 &&
2778 ioc->pdev->revision == SAS2_PCI_DEVICE_B0_REVISION) {
2779 return -EINVAL;
2780 }
2781
2782 base = pci_find_capability(ioc->pdev, PCI_CAP_ID_MSIX);
2783 if (!base) {
2784 dfailprintk(ioc, ioc_info(ioc, "msix not supported\n"));
2785 return -EINVAL;
2786 }
2787
2788
2789
2790 if (ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2004 ||
2791 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2008 ||
2792 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_1 ||
2793 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_2 ||
2794 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2108_3 ||
2795 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_1 ||
2796 ioc->pdev->device == MPI2_MFGPAGE_DEVID_SAS2116_2)
2797 ioc->msix_vector_count = 1;
2798 else {
2799 pci_read_config_word(ioc->pdev, base + 2, &message_control);
2800 ioc->msix_vector_count = (message_control & 0x3FF) + 1;
2801 }
2802 dinitprintk(ioc, ioc_info(ioc, "msix is supported, vector_count(%d)\n",
2803 ioc->msix_vector_count));
2804 return 0;
2805 }
2806
2807
2808
2809
2810
2811
2812
2813 static void
2814 _base_free_irq(struct MPT3SAS_ADAPTER *ioc)
2815 {
2816 struct adapter_reply_queue *reply_q, *next;
2817
2818 if (list_empty(&ioc->reply_queue_list))
2819 return;
2820
2821 list_for_each_entry_safe(reply_q, next, &ioc->reply_queue_list, list) {
2822 list_del(&reply_q->list);
2823 if (ioc->smp_affinity_enable)
2824 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
2825 reply_q->msix_index), NULL);
2826 free_irq(pci_irq_vector(ioc->pdev, reply_q->msix_index),
2827 reply_q);
2828 kfree(reply_q);
2829 }
2830 }
2831
2832
2833
2834
2835
2836
2837
2838
2839 static int
2840 _base_request_irq(struct MPT3SAS_ADAPTER *ioc, u8 index)
2841 {
2842 struct pci_dev *pdev = ioc->pdev;
2843 struct adapter_reply_queue *reply_q;
2844 int r;
2845
2846 reply_q = kzalloc(sizeof(struct adapter_reply_queue), GFP_KERNEL);
2847 if (!reply_q) {
2848 ioc_err(ioc, "unable to allocate memory %zu!\n",
2849 sizeof(struct adapter_reply_queue));
2850 return -ENOMEM;
2851 }
2852 reply_q->ioc = ioc;
2853 reply_q->msix_index = index;
2854
2855 atomic_set(&reply_q->busy, 0);
2856 if (ioc->msix_enable)
2857 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d-msix%d",
2858 ioc->driver_name, ioc->id, index);
2859 else
2860 snprintf(reply_q->name, MPT_NAME_LENGTH, "%s%d",
2861 ioc->driver_name, ioc->id);
2862 r = request_irq(pci_irq_vector(pdev, index), _base_interrupt,
2863 IRQF_SHARED, reply_q->name, reply_q);
2864 if (r) {
2865 pr_err("%s: unable to allocate interrupt %d!\n",
2866 reply_q->name, pci_irq_vector(pdev, index));
2867 kfree(reply_q);
2868 return -EBUSY;
2869 }
2870
2871 INIT_LIST_HEAD(&reply_q->list);
2872 list_add_tail(&reply_q->list, &ioc->reply_queue_list);
2873 return 0;
2874 }
2875
2876
2877
2878
2879
2880
2881
2882
2883
2884
2885 static void
2886 _base_assign_reply_queues(struct MPT3SAS_ADAPTER *ioc)
2887 {
2888 unsigned int cpu, nr_cpus, nr_msix, index = 0;
2889 struct adapter_reply_queue *reply_q;
2890 int local_numa_node;
2891
2892 if (!_base_is_controller_msix_enabled(ioc))
2893 return;
2894
2895 if (ioc->msix_load_balance)
2896 return;
2897
2898 memset(ioc->cpu_msix_table, 0, ioc->cpu_msix_table_sz);
2899
2900 nr_cpus = num_online_cpus();
2901 nr_msix = ioc->reply_queue_count = min(ioc->reply_queue_count,
2902 ioc->facts.MaxMSIxVectors);
2903 if (!nr_msix)
2904 return;
2905
2906 if (ioc->smp_affinity_enable) {
2907
2908
2909
2910
2911
2912 if (ioc->high_iops_queues) {
2913 local_numa_node = dev_to_node(&ioc->pdev->dev);
2914 for (index = 0; index < ioc->high_iops_queues;
2915 index++) {
2916 irq_set_affinity_hint(pci_irq_vector(ioc->pdev,
2917 index), cpumask_of_node(local_numa_node));
2918 }
2919 }
2920
2921 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2922 const cpumask_t *mask;
2923
2924 if (reply_q->msix_index < ioc->high_iops_queues)
2925 continue;
2926
2927 mask = pci_irq_get_affinity(ioc->pdev,
2928 reply_q->msix_index);
2929 if (!mask) {
2930 ioc_warn(ioc, "no affinity for msi %x\n",
2931 reply_q->msix_index);
2932 goto fall_back;
2933 }
2934
2935 for_each_cpu_and(cpu, mask, cpu_online_mask) {
2936 if (cpu >= ioc->cpu_msix_table_sz)
2937 break;
2938 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
2939 }
2940 }
2941 return;
2942 }
2943
2944 fall_back:
2945 cpu = cpumask_first(cpu_online_mask);
2946 nr_msix -= ioc->high_iops_queues;
2947 index = 0;
2948
2949 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
2950 unsigned int i, group = nr_cpus / nr_msix;
2951
2952 if (reply_q->msix_index < ioc->high_iops_queues)
2953 continue;
2954
2955 if (cpu >= nr_cpus)
2956 break;
2957
2958 if (index < nr_cpus % nr_msix)
2959 group++;
2960
2961 for (i = 0 ; i < group ; i++) {
2962 ioc->cpu_msix_table[cpu] = reply_q->msix_index;
2963 cpu = cpumask_next(cpu, cpu_online_mask);
2964 }
2965 index++;
2966 }
2967 }
2968
2969
2970
2971
2972
2973
2974
2975
2976
2977
2978
2979
2980
2981
2982
2983 static void
2984 _base_check_and_enable_high_iops_queues(struct MPT3SAS_ADAPTER *ioc,
2985 int hba_msix_vector_count)
2986 {
2987 u16 lnksta, speed;
2988
2989 if (perf_mode == MPT_PERF_MODE_IOPS ||
2990 perf_mode == MPT_PERF_MODE_LATENCY) {
2991 ioc->high_iops_queues = 0;
2992 return;
2993 }
2994
2995 if (perf_mode == MPT_PERF_MODE_DEFAULT) {
2996
2997 pcie_capability_read_word(ioc->pdev, PCI_EXP_LNKSTA, &lnksta);
2998 speed = lnksta & PCI_EXP_LNKSTA_CLS;
2999
3000 if (speed < 0x4) {
3001 ioc->high_iops_queues = 0;
3002 return;
3003 }
3004 }
3005
3006 if (!reset_devices && ioc->is_aero_ioc &&
3007 hba_msix_vector_count == MPT3SAS_GEN35_MAX_MSIX_QUEUES &&
3008 num_online_cpus() >= MPT3SAS_HIGH_IOPS_REPLY_QUEUES &&
3009 max_msix_vectors == -1)
3010 ioc->high_iops_queues = MPT3SAS_HIGH_IOPS_REPLY_QUEUES;
3011 else
3012 ioc->high_iops_queues = 0;
3013 }
3014
3015
3016
3017
3018
3019
3020 static void
3021 _base_disable_msix(struct MPT3SAS_ADAPTER *ioc)
3022 {
3023 if (!ioc->msix_enable)
3024 return;
3025 pci_free_irq_vectors(ioc->pdev);
3026 ioc->msix_enable = 0;
3027 }
3028
3029
3030
3031
3032
3033
3034 static int
3035 _base_alloc_irq_vectors(struct MPT3SAS_ADAPTER *ioc)
3036 {
3037 int i, irq_flags = PCI_IRQ_MSIX;
3038 struct irq_affinity desc = { .pre_vectors = ioc->high_iops_queues };
3039 struct irq_affinity *descp = &desc;
3040
3041 if (ioc->smp_affinity_enable)
3042 irq_flags |= PCI_IRQ_AFFINITY;
3043 else
3044 descp = NULL;
3045
3046 ioc_info(ioc, " %d %d\n", ioc->high_iops_queues,
3047 ioc->msix_vector_count);
3048
3049 i = pci_alloc_irq_vectors_affinity(ioc->pdev,
3050 ioc->high_iops_queues,
3051 ioc->msix_vector_count, irq_flags, descp);
3052
3053 return i;
3054 }
3055
3056
3057
3058
3059
3060
3061 static int
3062 _base_enable_msix(struct MPT3SAS_ADAPTER *ioc)
3063 {
3064 int r;
3065 int i, local_max_msix_vectors;
3066 u8 try_msix = 0;
3067
3068 ioc->msix_load_balance = false;
3069
3070 if (msix_disable == -1 || msix_disable == 0)
3071 try_msix = 1;
3072
3073 if (!try_msix)
3074 goto try_ioapic;
3075
3076 if (_base_check_enable_msix(ioc) != 0)
3077 goto try_ioapic;
3078
3079 ioc_info(ioc, "MSI-X vectors supported: %d\n", ioc->msix_vector_count);
3080 pr_info("\t no of cores: %d, max_msix_vectors: %d\n",
3081 ioc->cpu_count, max_msix_vectors);
3082 if (ioc->is_aero_ioc)
3083 _base_check_and_enable_high_iops_queues(ioc,
3084 ioc->msix_vector_count);
3085 ioc->reply_queue_count =
3086 min_t(int, ioc->cpu_count + ioc->high_iops_queues,
3087 ioc->msix_vector_count);
3088
3089 if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
3090 local_max_msix_vectors = (reset_devices) ? 1 : 8;
3091 else
3092 local_max_msix_vectors = max_msix_vectors;
3093
3094 if (local_max_msix_vectors > 0)
3095 ioc->reply_queue_count = min_t(int, local_max_msix_vectors,
3096 ioc->reply_queue_count);
3097 else if (local_max_msix_vectors == 0)
3098 goto try_ioapic;
3099
3100
3101
3102
3103
3104 if (!ioc->combined_reply_queue &&
3105 ioc->hba_mpi_version_belonged != MPI2_VERSION) {
3106 ioc->msix_load_balance = true;
3107 }
3108
3109
3110
3111
3112
3113 if (ioc->msix_load_balance)
3114 ioc->smp_affinity_enable = 0;
3115
3116 r = _base_alloc_irq_vectors(ioc);
3117 if (r < 0) {
3118 dfailprintk(ioc,
3119 ioc_info(ioc, "pci_alloc_irq_vectors failed (r=%d) !!!\n",
3120 r));
3121 goto try_ioapic;
3122 }
3123
3124 ioc->msix_enable = 1;
3125 ioc->reply_queue_count = r;
3126 for (i = 0; i < ioc->reply_queue_count; i++) {
3127 r = _base_request_irq(ioc, i);
3128 if (r) {
3129 _base_free_irq(ioc);
3130 _base_disable_msix(ioc);
3131 goto try_ioapic;
3132 }
3133 }
3134
3135 ioc_info(ioc, "High IOPs queues : %s\n",
3136 ioc->high_iops_queues ? "enabled" : "disabled");
3137
3138 return 0;
3139
3140
3141 try_ioapic:
3142 ioc->high_iops_queues = 0;
3143 ioc_info(ioc, "High IOPs queues : disabled\n");
3144 ioc->reply_queue_count = 1;
3145 r = pci_alloc_irq_vectors(ioc->pdev, 1, 1, PCI_IRQ_LEGACY);
3146 if (r < 0) {
3147 dfailprintk(ioc,
3148 ioc_info(ioc, "pci_alloc_irq_vector(legacy) failed (r=%d) !!!\n",
3149 r));
3150 } else
3151 r = _base_request_irq(ioc, 0);
3152
3153 return r;
3154 }
3155
3156
3157
3158
3159
3160 static void
3161 mpt3sas_base_unmap_resources(struct MPT3SAS_ADAPTER *ioc)
3162 {
3163 struct pci_dev *pdev = ioc->pdev;
3164
3165 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3166
3167 _base_free_irq(ioc);
3168 _base_disable_msix(ioc);
3169
3170 kfree(ioc->replyPostRegisterIndex);
3171 ioc->replyPostRegisterIndex = NULL;
3172
3173
3174 if (ioc->chip_phys) {
3175 iounmap(ioc->chip);
3176 ioc->chip_phys = 0;
3177 }
3178
3179 if (pci_is_enabled(pdev)) {
3180 pci_release_selected_regions(ioc->pdev, ioc->bars);
3181 pci_disable_pcie_error_reporting(pdev);
3182 pci_disable_device(pdev);
3183 }
3184 }
3185
3186 static int
3187 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc);
3188
3189
3190
3191
3192
3193
3194
3195
3196 static int
3197 _base_check_for_fault_and_issue_reset(struct MPT3SAS_ADAPTER *ioc)
3198 {
3199 u32 ioc_state;
3200 int rc = -EFAULT;
3201
3202 dinitprintk(ioc, pr_info("%s\n", __func__));
3203 if (ioc->pci_error_recovery)
3204 return 0;
3205 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
3206 dhsprintk(ioc, pr_info("%s: ioc_state(0x%08x)\n", __func__, ioc_state));
3207
3208 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
3209 mpt3sas_base_fault_info(ioc, ioc_state &
3210 MPI2_DOORBELL_DATA_MASK);
3211 rc = _base_diag_reset(ioc);
3212 }
3213
3214 return rc;
3215 }
3216
3217
3218
3219
3220
3221
3222
3223 int
3224 mpt3sas_base_map_resources(struct MPT3SAS_ADAPTER *ioc)
3225 {
3226 struct pci_dev *pdev = ioc->pdev;
3227 u32 memap_sz;
3228 u32 pio_sz;
3229 int i, r = 0, rc;
3230 u64 pio_chip = 0;
3231 phys_addr_t chip_phys = 0;
3232 struct adapter_reply_queue *reply_q;
3233
3234 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
3235
3236 ioc->bars = pci_select_bars(pdev, IORESOURCE_MEM);
3237 if (pci_enable_device_mem(pdev)) {
3238 ioc_warn(ioc, "pci_enable_device_mem: failed\n");
3239 ioc->bars = 0;
3240 return -ENODEV;
3241 }
3242
3243
3244 if (pci_request_selected_regions(pdev, ioc->bars,
3245 ioc->driver_name)) {
3246 ioc_warn(ioc, "pci_request_selected_regions: failed\n");
3247 ioc->bars = 0;
3248 r = -ENODEV;
3249 goto out_fail;
3250 }
3251
3252
3253 pci_enable_pcie_error_reporting(pdev);
3254
3255 pci_set_master(pdev);
3256
3257
3258 if (_base_config_dma_addressing(ioc, pdev) != 0) {
3259 ioc_warn(ioc, "no suitable DMA mask for %s\n", pci_name(pdev));
3260 r = -ENODEV;
3261 goto out_fail;
3262 }
3263
3264 for (i = 0, memap_sz = 0, pio_sz = 0; (i < DEVICE_COUNT_RESOURCE) &&
3265 (!memap_sz || !pio_sz); i++) {
3266 if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
3267 if (pio_sz)
3268 continue;
3269 pio_chip = (u64)pci_resource_start(pdev, i);
3270 pio_sz = pci_resource_len(pdev, i);
3271 } else if (pci_resource_flags(pdev, i) & IORESOURCE_MEM) {
3272 if (memap_sz)
3273 continue;
3274 ioc->chip_phys = pci_resource_start(pdev, i);
3275 chip_phys = ioc->chip_phys;
3276 memap_sz = pci_resource_len(pdev, i);
3277 ioc->chip = ioremap(ioc->chip_phys, memap_sz);
3278 }
3279 }
3280
3281 if (ioc->chip == NULL) {
3282 ioc_err(ioc, "unable to map adapter memory! or resource not found\n");
3283 r = -EINVAL;
3284 goto out_fail;
3285 }
3286
3287 _base_mask_interrupts(ioc);
3288
3289 r = _base_get_ioc_facts(ioc);
3290 if (r) {
3291 rc = _base_check_for_fault_and_issue_reset(ioc);
3292 if (rc || (_base_get_ioc_facts(ioc)))
3293 goto out_fail;
3294 }
3295
3296 if (!ioc->rdpq_array_enable_assigned) {
3297 ioc->rdpq_array_enable = ioc->rdpq_array_capable;
3298 ioc->rdpq_array_enable_assigned = 1;
3299 }
3300
3301 r = _base_enable_msix(ioc);
3302 if (r)
3303 goto out_fail;
3304
3305 if (!ioc->is_driver_loading)
3306 _base_init_irqpolls(ioc);
3307
3308
3309
3310 if (ioc->combined_reply_queue) {
3311
3312
3313
3314
3315
3316
3317 ioc->replyPostRegisterIndex = kcalloc(
3318 ioc->combined_reply_index_count,
3319 sizeof(resource_size_t *), GFP_KERNEL);
3320 if (!ioc->replyPostRegisterIndex) {
3321 dfailprintk(ioc,
3322 ioc_warn(ioc, "allocation for reply Post Register Index failed!!!\n"));
3323 r = -ENOMEM;
3324 goto out_fail;
3325 }
3326
3327 for (i = 0; i < ioc->combined_reply_index_count; i++) {
3328 ioc->replyPostRegisterIndex[i] = (resource_size_t *)
3329 ((u8 __force *)&ioc->chip->Doorbell +
3330 MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET +
3331 (i * MPT3_SUP_REPLY_POST_HOST_INDEX_REG_OFFSET));
3332 }
3333 }
3334
3335 if (ioc->is_warpdrive) {
3336 ioc->reply_post_host_index[0] = (resource_size_t __iomem *)
3337 &ioc->chip->ReplyPostHostIndex;
3338
3339 for (i = 1; i < ioc->cpu_msix_table_sz; i++)
3340 ioc->reply_post_host_index[i] =
3341 (resource_size_t __iomem *)
3342 ((u8 __iomem *)&ioc->chip->Doorbell + (0x4000 + ((i - 1)
3343 * 4)));
3344 }
3345
3346 list_for_each_entry(reply_q, &ioc->reply_queue_list, list)
3347 pr_info("%s: %s enabled: IRQ %d\n",
3348 reply_q->name,
3349 ioc->msix_enable ? "PCI-MSI-X" : "IO-APIC",
3350 pci_irq_vector(ioc->pdev, reply_q->msix_index));
3351
3352 ioc_info(ioc, "iomem(%pap), mapped(0x%p), size(%d)\n",
3353 &chip_phys, ioc->chip, memap_sz);
3354 ioc_info(ioc, "ioport(0x%016llx), size(%d)\n",
3355 (unsigned long long)pio_chip, pio_sz);
3356
3357
3358 pci_save_state(pdev);
3359 return 0;
3360
3361 out_fail:
3362 mpt3sas_base_unmap_resources(ioc);
3363 return r;
3364 }
3365
3366
3367
3368
3369
3370
3371
3372
3373 void *
3374 mpt3sas_base_get_msg_frame(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3375 {
3376 return (void *)(ioc->request + (smid * ioc->request_sz));
3377 }
3378
3379
3380
3381
3382
3383
3384
3385
3386 void *
3387 mpt3sas_base_get_sense_buffer(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3388 {
3389 return (void *)(ioc->sense + ((smid - 1) * SCSI_SENSE_BUFFERSIZE));
3390 }
3391
3392
3393
3394
3395
3396
3397
3398
3399 __le32
3400 mpt3sas_base_get_sense_buffer_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3401 {
3402 return cpu_to_le32(ioc->sense_dma + ((smid - 1) *
3403 SCSI_SENSE_BUFFERSIZE));
3404 }
3405
3406
3407
3408
3409
3410
3411
3412
3413 void *
3414 mpt3sas_base_get_pcie_sgl(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3415 {
3416 return (void *)(ioc->pcie_sg_lookup[smid - 1].pcie_sgl);
3417 }
3418
3419
3420
3421
3422
3423
3424
3425
3426 dma_addr_t
3427 mpt3sas_base_get_pcie_sgl_dma(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3428 {
3429 return ioc->pcie_sg_lookup[smid - 1].pcie_sgl_dma;
3430 }
3431
3432
3433
3434
3435
3436
3437
3438
3439 void *
3440 mpt3sas_base_get_reply_virt_addr(struct MPT3SAS_ADAPTER *ioc, u32 phys_addr)
3441 {
3442 if (!phys_addr)
3443 return NULL;
3444 return ioc->reply + (phys_addr - (u32)ioc->reply_dma);
3445 }
3446
3447
3448
3449
3450
3451
3452
3453
3454
3455
3456 static inline u8
3457 _base_get_msix_index(struct MPT3SAS_ADAPTER *ioc,
3458 struct scsi_cmnd *scmd)
3459 {
3460
3461 if (ioc->msix_load_balance)
3462 return ioc->reply_queue_count ?
3463 base_mod64(atomic64_add_return(1,
3464 &ioc->total_io_cnt), ioc->reply_queue_count) : 0;
3465
3466 return ioc->cpu_msix_table[raw_smp_processor_id()];
3467 }
3468
3469
3470
3471
3472
3473
3474
3475
3476
3477
3478
3479 static inline u8
3480 _base_get_high_iops_msix_index(struct MPT3SAS_ADAPTER *ioc,
3481 struct scsi_cmnd *scmd)
3482 {
3483
3484
3485
3486
3487
3488 if (atomic_read(&scmd->device->device_busy) >
3489 MPT3SAS_DEVICE_HIGH_IOPS_DEPTH)
3490 return base_mod64((
3491 atomic64_add_return(1, &ioc->high_iops_outstanding) /
3492 MPT3SAS_HIGH_IOPS_BATCH_COUNT),
3493 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
3494
3495 return _base_get_msix_index(ioc, scmd);
3496 }
3497
3498
3499
3500
3501
3502
3503
3504
3505 u16
3506 mpt3sas_base_get_smid(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3507 {
3508 unsigned long flags;
3509 struct request_tracker *request;
3510 u16 smid;
3511
3512 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3513 if (list_empty(&ioc->internal_free_list)) {
3514 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3515 ioc_err(ioc, "%s: smid not available\n", __func__);
3516 return 0;
3517 }
3518
3519 request = list_entry(ioc->internal_free_list.next,
3520 struct request_tracker, tracker_list);
3521 request->cb_idx = cb_idx;
3522 smid = request->smid;
3523 list_del(&request->tracker_list);
3524 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3525 return smid;
3526 }
3527
3528
3529
3530
3531
3532
3533
3534
3535
3536 u16
3537 mpt3sas_base_get_smid_scsiio(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx,
3538 struct scsi_cmnd *scmd)
3539 {
3540 struct scsiio_tracker *request = scsi_cmd_priv(scmd);
3541 unsigned int tag = scmd->request->tag;
3542 u16 smid;
3543
3544 smid = tag + 1;
3545 request->cb_idx = cb_idx;
3546 request->smid = smid;
3547 request->scmd = scmd;
3548 INIT_LIST_HEAD(&request->chain_list);
3549 return smid;
3550 }
3551
3552
3553
3554
3555
3556
3557
3558
3559 u16
3560 mpt3sas_base_get_smid_hpr(struct MPT3SAS_ADAPTER *ioc, u8 cb_idx)
3561 {
3562 unsigned long flags;
3563 struct request_tracker *request;
3564 u16 smid;
3565
3566 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3567 if (list_empty(&ioc->hpr_free_list)) {
3568 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3569 return 0;
3570 }
3571
3572 request = list_entry(ioc->hpr_free_list.next,
3573 struct request_tracker, tracker_list);
3574 request->cb_idx = cb_idx;
3575 smid = request->smid;
3576 list_del(&request->tracker_list);
3577 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3578 return smid;
3579 }
3580
3581 static void
3582 _base_recovery_check(struct MPT3SAS_ADAPTER *ioc)
3583 {
3584
3585
3586
3587 if (ioc->shost_recovery && ioc->pending_io_count) {
3588 ioc->pending_io_count = scsi_host_busy(ioc->shost);
3589 if (ioc->pending_io_count == 0)
3590 wake_up(&ioc->reset_wq);
3591 }
3592 }
3593
3594 void mpt3sas_base_clear_st(struct MPT3SAS_ADAPTER *ioc,
3595 struct scsiio_tracker *st)
3596 {
3597 if (WARN_ON(st->smid == 0))
3598 return;
3599 st->cb_idx = 0xFF;
3600 st->direct_io = 0;
3601 st->scmd = NULL;
3602 atomic_set(&ioc->chain_lookup[st->smid - 1].chain_offset, 0);
3603 st->smid = 0;
3604 }
3605
3606
3607
3608
3609
3610
3611 void
3612 mpt3sas_base_free_smid(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3613 {
3614 unsigned long flags;
3615 int i;
3616
3617 if (smid < ioc->hi_priority_smid) {
3618 struct scsiio_tracker *st;
3619 void *request;
3620
3621 st = _get_st_from_smid(ioc, smid);
3622 if (!st) {
3623 _base_recovery_check(ioc);
3624 return;
3625 }
3626
3627
3628 request = mpt3sas_base_get_msg_frame(ioc, smid);
3629 memset(request, 0, ioc->request_sz);
3630
3631 mpt3sas_base_clear_st(ioc, st);
3632 _base_recovery_check(ioc);
3633 return;
3634 }
3635
3636 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
3637 if (smid < ioc->internal_smid) {
3638
3639 i = smid - ioc->hi_priority_smid;
3640 ioc->hpr_lookup[i].cb_idx = 0xFF;
3641 list_add(&ioc->hpr_lookup[i].tracker_list, &ioc->hpr_free_list);
3642 } else if (smid <= ioc->hba_queue_depth) {
3643
3644 i = smid - ioc->internal_smid;
3645 ioc->internal_lookup[i].cb_idx = 0xFF;
3646 list_add(&ioc->internal_lookup[i].tracker_list,
3647 &ioc->internal_free_list);
3648 }
3649 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
3650 }
3651
3652
3653
3654
3655
3656
3657
3658
3659
3660
3661
3662 static inline void
3663 _base_mpi_ep_writeq(__u64 b, volatile void __iomem *addr,
3664 spinlock_t *writeq_lock)
3665 {
3666 unsigned long flags;
3667
3668 spin_lock_irqsave(writeq_lock, flags);
3669 __raw_writel((u32)(b), addr);
3670 __raw_writel((u32)(b >> 32), (addr + 4));
3671 spin_unlock_irqrestore(writeq_lock, flags);
3672 }
3673
3674
3675
3676
3677
3678
3679
3680
3681
3682
3683
3684 #if defined(writeq) && defined(CONFIG_64BIT)
3685 static inline void
3686 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3687 {
3688 wmb();
3689 __raw_writeq(b, addr);
3690 barrier();
3691 }
3692 #else
3693 static inline void
3694 _base_writeq(__u64 b, volatile void __iomem *addr, spinlock_t *writeq_lock)
3695 {
3696 _base_mpi_ep_writeq(b, addr, writeq_lock);
3697 }
3698 #endif
3699
3700
3701
3702
3703
3704
3705
3706
3707
3708 static u8
3709 _base_set_and_get_msix_index(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3710 {
3711 struct scsiio_tracker *st = NULL;
3712
3713 if (smid < ioc->hi_priority_smid)
3714 st = _get_st_from_smid(ioc, smid);
3715
3716 if (st == NULL)
3717 return _base_get_msix_index(ioc, NULL);
3718
3719 st->msix_io = ioc->get_msix_index_for_smlio(ioc, st->scmd);
3720 return st->msix_io;
3721 }
3722
3723
3724
3725
3726
3727
3728
3729 static void
3730 _base_put_smid_mpi_ep_scsi_io(struct MPT3SAS_ADAPTER *ioc,
3731 u16 smid, u16 handle)
3732 {
3733 Mpi2RequestDescriptorUnion_t descriptor;
3734 u64 *request = (u64 *)&descriptor;
3735 void *mpi_req_iomem;
3736 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3737
3738 _clone_sg_entries(ioc, (void *) mfp, smid);
3739 mpi_req_iomem = (void __force *)ioc->chip +
3740 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3741 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3742 ioc->request_sz);
3743 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3744 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3745 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3746 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3747 descriptor.SCSIIO.LMID = 0;
3748 _base_mpi_ep_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3749 &ioc->scsi_lookup_lock);
3750 }
3751
3752
3753
3754
3755
3756
3757
3758 static void
3759 _base_put_smid_scsi_io(struct MPT3SAS_ADAPTER *ioc, u16 smid, u16 handle)
3760 {
3761 Mpi2RequestDescriptorUnion_t descriptor;
3762 u64 *request = (u64 *)&descriptor;
3763
3764
3765 descriptor.SCSIIO.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3766 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3767 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3768 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3769 descriptor.SCSIIO.LMID = 0;
3770 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3771 &ioc->scsi_lookup_lock);
3772 }
3773
3774
3775
3776
3777
3778
3779
3780 static void
3781 _base_put_smid_fast_path(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3782 u16 handle)
3783 {
3784 Mpi2RequestDescriptorUnion_t descriptor;
3785 u64 *request = (u64 *)&descriptor;
3786
3787 descriptor.SCSIIO.RequestFlags =
3788 MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3789 descriptor.SCSIIO.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3790 descriptor.SCSIIO.SMID = cpu_to_le16(smid);
3791 descriptor.SCSIIO.DevHandle = cpu_to_le16(handle);
3792 descriptor.SCSIIO.LMID = 0;
3793 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3794 &ioc->scsi_lookup_lock);
3795 }
3796
3797
3798
3799
3800
3801
3802
3803 static void
3804 _base_put_smid_hi_priority(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3805 u16 msix_task)
3806 {
3807 Mpi2RequestDescriptorUnion_t descriptor;
3808 void *mpi_req_iomem;
3809 u64 *request;
3810
3811 if (ioc->is_mcpu_endpoint) {
3812 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3813
3814
3815 mpi_req_iomem = (void __force *)ioc->chip
3816 + MPI_FRAME_START_OFFSET
3817 + (smid * ioc->request_sz);
3818 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3819 ioc->request_sz);
3820 }
3821
3822 request = (u64 *)&descriptor;
3823
3824 descriptor.HighPriority.RequestFlags =
3825 MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3826 descriptor.HighPriority.MSIxIndex = msix_task;
3827 descriptor.HighPriority.SMID = cpu_to_le16(smid);
3828 descriptor.HighPriority.LMID = 0;
3829 descriptor.HighPriority.Reserved1 = 0;
3830 if (ioc->is_mcpu_endpoint)
3831 _base_mpi_ep_writeq(*request,
3832 &ioc->chip->RequestDescriptorPostLow,
3833 &ioc->scsi_lookup_lock);
3834 else
3835 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3836 &ioc->scsi_lookup_lock);
3837 }
3838
3839
3840
3841
3842
3843
3844
3845 void
3846 mpt3sas_base_put_smid_nvme_encap(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3847 {
3848 Mpi2RequestDescriptorUnion_t descriptor;
3849 u64 *request = (u64 *)&descriptor;
3850
3851 descriptor.Default.RequestFlags =
3852 MPI26_REQ_DESCRIPT_FLAGS_PCIE_ENCAPSULATED;
3853 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3854 descriptor.Default.SMID = cpu_to_le16(smid);
3855 descriptor.Default.LMID = 0;
3856 descriptor.Default.DescriptorTypeDependent = 0;
3857 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3858 &ioc->scsi_lookup_lock);
3859 }
3860
3861
3862
3863
3864
3865
3866 static void
3867 _base_put_smid_default(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3868 {
3869 Mpi2RequestDescriptorUnion_t descriptor;
3870 void *mpi_req_iomem;
3871 u64 *request;
3872
3873 if (ioc->is_mcpu_endpoint) {
3874 __le32 *mfp = (__le32 *)mpt3sas_base_get_msg_frame(ioc, smid);
3875
3876 _clone_sg_entries(ioc, (void *) mfp, smid);
3877
3878 mpi_req_iomem = (void __force *)ioc->chip +
3879 MPI_FRAME_START_OFFSET + (smid * ioc->request_sz);
3880 _base_clone_mpi_to_sys_mem(mpi_req_iomem, (void *)mfp,
3881 ioc->request_sz);
3882 }
3883 request = (u64 *)&descriptor;
3884 descriptor.Default.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3885 descriptor.Default.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3886 descriptor.Default.SMID = cpu_to_le16(smid);
3887 descriptor.Default.LMID = 0;
3888 descriptor.Default.DescriptorTypeDependent = 0;
3889 if (ioc->is_mcpu_endpoint)
3890 _base_mpi_ep_writeq(*request,
3891 &ioc->chip->RequestDescriptorPostLow,
3892 &ioc->scsi_lookup_lock);
3893 else
3894 _base_writeq(*request, &ioc->chip->RequestDescriptorPostLow,
3895 &ioc->scsi_lookup_lock);
3896 }
3897
3898
3899
3900
3901
3902
3903
3904
3905
3906
3907 static void
3908 _base_put_smid_scsi_io_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3909 u16 handle)
3910 {
3911 Mpi26AtomicRequestDescriptor_t descriptor;
3912 u32 *request = (u32 *)&descriptor;
3913
3914 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_SCSI_IO;
3915 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3916 descriptor.SMID = cpu_to_le16(smid);
3917
3918 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
3919 }
3920
3921
3922
3923
3924
3925
3926
3927
3928
3929 static void
3930 _base_put_smid_fast_path_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3931 u16 handle)
3932 {
3933 Mpi26AtomicRequestDescriptor_t descriptor;
3934 u32 *request = (u32 *)&descriptor;
3935
3936 descriptor.RequestFlags = MPI25_REQ_DESCRIPT_FLAGS_FAST_PATH_SCSI_IO;
3937 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3938 descriptor.SMID = cpu_to_le16(smid);
3939
3940 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
3941 }
3942
3943
3944
3945
3946
3947
3948
3949
3950
3951
3952 static void
3953 _base_put_smid_hi_priority_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid,
3954 u16 msix_task)
3955 {
3956 Mpi26AtomicRequestDescriptor_t descriptor;
3957 u32 *request = (u32 *)&descriptor;
3958
3959 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_HIGH_PRIORITY;
3960 descriptor.MSIxIndex = msix_task;
3961 descriptor.SMID = cpu_to_le16(smid);
3962
3963 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
3964 }
3965
3966
3967
3968
3969
3970
3971
3972
3973
3974 static void
3975 _base_put_smid_default_atomic(struct MPT3SAS_ADAPTER *ioc, u16 smid)
3976 {
3977 Mpi26AtomicRequestDescriptor_t descriptor;
3978 u32 *request = (u32 *)&descriptor;
3979
3980 descriptor.RequestFlags = MPI2_REQ_DESCRIPT_FLAGS_DEFAULT_TYPE;
3981 descriptor.MSIxIndex = _base_set_and_get_msix_index(ioc, smid);
3982 descriptor.SMID = cpu_to_le16(smid);
3983
3984 writel(cpu_to_le32(*request), &ioc->chip->AtomicRequestDescriptorPost);
3985 }
3986
3987
3988
3989
3990
3991 static void
3992 _base_display_OEMs_branding(struct MPT3SAS_ADAPTER *ioc)
3993 {
3994 if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
3995 return;
3996
3997 switch (ioc->pdev->subsystem_vendor) {
3998 case PCI_VENDOR_ID_INTEL:
3999 switch (ioc->pdev->device) {
4000 case MPI2_MFGPAGE_DEVID_SAS2008:
4001 switch (ioc->pdev->subsystem_device) {
4002 case MPT2SAS_INTEL_RMS2LL080_SSDID:
4003 ioc_info(ioc, "%s\n",
4004 MPT2SAS_INTEL_RMS2LL080_BRANDING);
4005 break;
4006 case MPT2SAS_INTEL_RMS2LL040_SSDID:
4007 ioc_info(ioc, "%s\n",
4008 MPT2SAS_INTEL_RMS2LL040_BRANDING);
4009 break;
4010 case MPT2SAS_INTEL_SSD910_SSDID:
4011 ioc_info(ioc, "%s\n",
4012 MPT2SAS_INTEL_SSD910_BRANDING);
4013 break;
4014 default:
4015 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4016 ioc->pdev->subsystem_device);
4017 break;
4018 }
4019 break;
4020 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4021 switch (ioc->pdev->subsystem_device) {
4022 case MPT2SAS_INTEL_RS25GB008_SSDID:
4023 ioc_info(ioc, "%s\n",
4024 MPT2SAS_INTEL_RS25GB008_BRANDING);
4025 break;
4026 case MPT2SAS_INTEL_RMS25JB080_SSDID:
4027 ioc_info(ioc, "%s\n",
4028 MPT2SAS_INTEL_RMS25JB080_BRANDING);
4029 break;
4030 case MPT2SAS_INTEL_RMS25JB040_SSDID:
4031 ioc_info(ioc, "%s\n",
4032 MPT2SAS_INTEL_RMS25JB040_BRANDING);
4033 break;
4034 case MPT2SAS_INTEL_RMS25KB080_SSDID:
4035 ioc_info(ioc, "%s\n",
4036 MPT2SAS_INTEL_RMS25KB080_BRANDING);
4037 break;
4038 case MPT2SAS_INTEL_RMS25KB040_SSDID:
4039 ioc_info(ioc, "%s\n",
4040 MPT2SAS_INTEL_RMS25KB040_BRANDING);
4041 break;
4042 case MPT2SAS_INTEL_RMS25LB040_SSDID:
4043 ioc_info(ioc, "%s\n",
4044 MPT2SAS_INTEL_RMS25LB040_BRANDING);
4045 break;
4046 case MPT2SAS_INTEL_RMS25LB080_SSDID:
4047 ioc_info(ioc, "%s\n",
4048 MPT2SAS_INTEL_RMS25LB080_BRANDING);
4049 break;
4050 default:
4051 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4052 ioc->pdev->subsystem_device);
4053 break;
4054 }
4055 break;
4056 case MPI25_MFGPAGE_DEVID_SAS3008:
4057 switch (ioc->pdev->subsystem_device) {
4058 case MPT3SAS_INTEL_RMS3JC080_SSDID:
4059 ioc_info(ioc, "%s\n",
4060 MPT3SAS_INTEL_RMS3JC080_BRANDING);
4061 break;
4062
4063 case MPT3SAS_INTEL_RS3GC008_SSDID:
4064 ioc_info(ioc, "%s\n",
4065 MPT3SAS_INTEL_RS3GC008_BRANDING);
4066 break;
4067 case MPT3SAS_INTEL_RS3FC044_SSDID:
4068 ioc_info(ioc, "%s\n",
4069 MPT3SAS_INTEL_RS3FC044_BRANDING);
4070 break;
4071 case MPT3SAS_INTEL_RS3UC080_SSDID:
4072 ioc_info(ioc, "%s\n",
4073 MPT3SAS_INTEL_RS3UC080_BRANDING);
4074 break;
4075 default:
4076 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4077 ioc->pdev->subsystem_device);
4078 break;
4079 }
4080 break;
4081 default:
4082 ioc_info(ioc, "Intel(R) Controller: Subsystem ID: 0x%X\n",
4083 ioc->pdev->subsystem_device);
4084 break;
4085 }
4086 break;
4087 case PCI_VENDOR_ID_DELL:
4088 switch (ioc->pdev->device) {
4089 case MPI2_MFGPAGE_DEVID_SAS2008:
4090 switch (ioc->pdev->subsystem_device) {
4091 case MPT2SAS_DELL_6GBPS_SAS_HBA_SSDID:
4092 ioc_info(ioc, "%s\n",
4093 MPT2SAS_DELL_6GBPS_SAS_HBA_BRANDING);
4094 break;
4095 case MPT2SAS_DELL_PERC_H200_ADAPTER_SSDID:
4096 ioc_info(ioc, "%s\n",
4097 MPT2SAS_DELL_PERC_H200_ADAPTER_BRANDING);
4098 break;
4099 case MPT2SAS_DELL_PERC_H200_INTEGRATED_SSDID:
4100 ioc_info(ioc, "%s\n",
4101 MPT2SAS_DELL_PERC_H200_INTEGRATED_BRANDING);
4102 break;
4103 case MPT2SAS_DELL_PERC_H200_MODULAR_SSDID:
4104 ioc_info(ioc, "%s\n",
4105 MPT2SAS_DELL_PERC_H200_MODULAR_BRANDING);
4106 break;
4107 case MPT2SAS_DELL_PERC_H200_EMBEDDED_SSDID:
4108 ioc_info(ioc, "%s\n",
4109 MPT2SAS_DELL_PERC_H200_EMBEDDED_BRANDING);
4110 break;
4111 case MPT2SAS_DELL_PERC_H200_SSDID:
4112 ioc_info(ioc, "%s\n",
4113 MPT2SAS_DELL_PERC_H200_BRANDING);
4114 break;
4115 case MPT2SAS_DELL_6GBPS_SAS_SSDID:
4116 ioc_info(ioc, "%s\n",
4117 MPT2SAS_DELL_6GBPS_SAS_BRANDING);
4118 break;
4119 default:
4120 ioc_info(ioc, "Dell 6Gbps HBA: Subsystem ID: 0x%X\n",
4121 ioc->pdev->subsystem_device);
4122 break;
4123 }
4124 break;
4125 case MPI25_MFGPAGE_DEVID_SAS3008:
4126 switch (ioc->pdev->subsystem_device) {
4127 case MPT3SAS_DELL_12G_HBA_SSDID:
4128 ioc_info(ioc, "%s\n",
4129 MPT3SAS_DELL_12G_HBA_BRANDING);
4130 break;
4131 default:
4132 ioc_info(ioc, "Dell 12Gbps HBA: Subsystem ID: 0x%X\n",
4133 ioc->pdev->subsystem_device);
4134 break;
4135 }
4136 break;
4137 default:
4138 ioc_info(ioc, "Dell HBA: Subsystem ID: 0x%X\n",
4139 ioc->pdev->subsystem_device);
4140 break;
4141 }
4142 break;
4143 case PCI_VENDOR_ID_CISCO:
4144 switch (ioc->pdev->device) {
4145 case MPI25_MFGPAGE_DEVID_SAS3008:
4146 switch (ioc->pdev->subsystem_device) {
4147 case MPT3SAS_CISCO_12G_8E_HBA_SSDID:
4148 ioc_info(ioc, "%s\n",
4149 MPT3SAS_CISCO_12G_8E_HBA_BRANDING);
4150 break;
4151 case MPT3SAS_CISCO_12G_8I_HBA_SSDID:
4152 ioc_info(ioc, "%s\n",
4153 MPT3SAS_CISCO_12G_8I_HBA_BRANDING);
4154 break;
4155 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4156 ioc_info(ioc, "%s\n",
4157 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4158 break;
4159 default:
4160 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4161 ioc->pdev->subsystem_device);
4162 break;
4163 }
4164 break;
4165 case MPI25_MFGPAGE_DEVID_SAS3108_1:
4166 switch (ioc->pdev->subsystem_device) {
4167 case MPT3SAS_CISCO_12G_AVILA_HBA_SSDID:
4168 ioc_info(ioc, "%s\n",
4169 MPT3SAS_CISCO_12G_AVILA_HBA_BRANDING);
4170 break;
4171 case MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_SSDID:
4172 ioc_info(ioc, "%s\n",
4173 MPT3SAS_CISCO_12G_COLUSA_MEZZANINE_HBA_BRANDING);
4174 break;
4175 default:
4176 ioc_info(ioc, "Cisco 12Gbps SAS HBA: Subsystem ID: 0x%X\n",
4177 ioc->pdev->subsystem_device);
4178 break;
4179 }
4180 break;
4181 default:
4182 ioc_info(ioc, "Cisco SAS HBA: Subsystem ID: 0x%X\n",
4183 ioc->pdev->subsystem_device);
4184 break;
4185 }
4186 break;
4187 case MPT2SAS_HP_3PAR_SSVID:
4188 switch (ioc->pdev->device) {
4189 case MPI2_MFGPAGE_DEVID_SAS2004:
4190 switch (ioc->pdev->subsystem_device) {
4191 case MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_SSDID:
4192 ioc_info(ioc, "%s\n",
4193 MPT2SAS_HP_DAUGHTER_2_4_INTERNAL_BRANDING);
4194 break;
4195 default:
4196 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4197 ioc->pdev->subsystem_device);
4198 break;
4199 }
4200 break;
4201 case MPI2_MFGPAGE_DEVID_SAS2308_2:
4202 switch (ioc->pdev->subsystem_device) {
4203 case MPT2SAS_HP_2_4_INTERNAL_SSDID:
4204 ioc_info(ioc, "%s\n",
4205 MPT2SAS_HP_2_4_INTERNAL_BRANDING);
4206 break;
4207 case MPT2SAS_HP_2_4_EXTERNAL_SSDID:
4208 ioc_info(ioc, "%s\n",
4209 MPT2SAS_HP_2_4_EXTERNAL_BRANDING);
4210 break;
4211 case MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_SSDID:
4212 ioc_info(ioc, "%s\n",
4213 MPT2SAS_HP_1_4_INTERNAL_1_4_EXTERNAL_BRANDING);
4214 break;
4215 case MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_SSDID:
4216 ioc_info(ioc, "%s\n",
4217 MPT2SAS_HP_EMBEDDED_2_4_INTERNAL_BRANDING);
4218 break;
4219 default:
4220 ioc_info(ioc, "HP 6Gbps SAS HBA: Subsystem ID: 0x%X\n",
4221 ioc->pdev->subsystem_device);
4222 break;
4223 }
4224 break;
4225 default:
4226 ioc_info(ioc, "HP SAS HBA: Subsystem ID: 0x%X\n",
4227 ioc->pdev->subsystem_device);
4228 break;
4229 }
4230 default:
4231 break;
4232 }
4233 }
4234
4235
4236
4237
4238
4239
4240
4241
4242 static int
4243 _base_display_fwpkg_version(struct MPT3SAS_ADAPTER *ioc)
4244 {
4245 Mpi2FWImageHeader_t *FWImgHdr;
4246 Mpi25FWUploadRequest_t *mpi_request;
4247 Mpi2FWUploadReply_t mpi_reply;
4248 int r = 0;
4249 void *fwpkg_data = NULL;
4250 dma_addr_t fwpkg_data_dma;
4251 u16 smid, ioc_status;
4252 size_t data_length;
4253
4254 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4255
4256 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
4257 ioc_err(ioc, "%s: internal command already in use\n", __func__);
4258 return -EAGAIN;
4259 }
4260
4261 data_length = sizeof(Mpi2FWImageHeader_t);
4262 fwpkg_data = dma_alloc_coherent(&ioc->pdev->dev, data_length,
4263 &fwpkg_data_dma, GFP_KERNEL);
4264 if (!fwpkg_data) {
4265 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4266 __FILE__, __LINE__, __func__);
4267 return -ENOMEM;
4268 }
4269
4270 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
4271 if (!smid) {
4272 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
4273 r = -EAGAIN;
4274 goto out;
4275 }
4276
4277 ioc->base_cmds.status = MPT3_CMD_PENDING;
4278 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
4279 ioc->base_cmds.smid = smid;
4280 memset(mpi_request, 0, sizeof(Mpi25FWUploadRequest_t));
4281 mpi_request->Function = MPI2_FUNCTION_FW_UPLOAD;
4282 mpi_request->ImageType = MPI2_FW_UPLOAD_ITYPE_FW_FLASH;
4283 mpi_request->ImageSize = cpu_to_le32(data_length);
4284 ioc->build_sg(ioc, &mpi_request->SGL, 0, 0, fwpkg_data_dma,
4285 data_length);
4286 init_completion(&ioc->base_cmds.done);
4287 ioc->put_smid_default(ioc, smid);
4288
4289 wait_for_completion_timeout(&ioc->base_cmds.done,
4290 FW_IMG_HDR_READ_TIMEOUT*HZ);
4291 ioc_info(ioc, "%s: complete\n", __func__);
4292 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
4293 ioc_err(ioc, "%s: timeout\n", __func__);
4294 _debug_dump_mf(mpi_request,
4295 sizeof(Mpi25FWUploadRequest_t)/4);
4296 r = -ETIME;
4297 } else {
4298 memset(&mpi_reply, 0, sizeof(Mpi2FWUploadReply_t));
4299 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID) {
4300 memcpy(&mpi_reply, ioc->base_cmds.reply,
4301 sizeof(Mpi2FWUploadReply_t));
4302 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4303 MPI2_IOCSTATUS_MASK;
4304 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
4305 FWImgHdr = (Mpi2FWImageHeader_t *)fwpkg_data;
4306 if (FWImgHdr->PackageVersion.Word) {
4307 ioc_info(ioc, "FW Package Version (%02d.%02d.%02d.%02d)\n",
4308 FWImgHdr->PackageVersion.Struct.Major,
4309 FWImgHdr->PackageVersion.Struct.Minor,
4310 FWImgHdr->PackageVersion.Struct.Unit,
4311 FWImgHdr->PackageVersion.Struct.Dev);
4312 }
4313 } else {
4314 _debug_dump_mf(&mpi_reply,
4315 sizeof(Mpi2FWUploadReply_t)/4);
4316 }
4317 }
4318 }
4319 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
4320 out:
4321 if (fwpkg_data)
4322 dma_free_coherent(&ioc->pdev->dev, data_length, fwpkg_data,
4323 fwpkg_data_dma);
4324 return r;
4325 }
4326
4327
4328
4329
4330
4331 static void
4332 _base_display_ioc_capabilities(struct MPT3SAS_ADAPTER *ioc)
4333 {
4334 int i = 0;
4335 char desc[16];
4336 u32 iounit_pg1_flags;
4337 u32 bios_version;
4338
4339 bios_version = le32_to_cpu(ioc->bios_pg3.BiosVersion);
4340 strncpy(desc, ioc->manu_pg0.ChipName, 16);
4341 ioc_info(ioc, "%s: FWVersion(%02d.%02d.%02d.%02d), ChipRevision(0x%02x), BiosVersion(%02d.%02d.%02d.%02d)\n",
4342 desc,
4343 (ioc->facts.FWVersion.Word & 0xFF000000) >> 24,
4344 (ioc->facts.FWVersion.Word & 0x00FF0000) >> 16,
4345 (ioc->facts.FWVersion.Word & 0x0000FF00) >> 8,
4346 ioc->facts.FWVersion.Word & 0x000000FF,
4347 ioc->pdev->revision,
4348 (bios_version & 0xFF000000) >> 24,
4349 (bios_version & 0x00FF0000) >> 16,
4350 (bios_version & 0x0000FF00) >> 8,
4351 bios_version & 0x000000FF);
4352
4353 _base_display_OEMs_branding(ioc);
4354
4355 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
4356 pr_info("%sNVMe", i ? "," : "");
4357 i++;
4358 }
4359
4360 ioc_info(ioc, "Protocol=(");
4361
4362 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
4363 pr_cont("Initiator");
4364 i++;
4365 }
4366
4367 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_TARGET) {
4368 pr_cont("%sTarget", i ? "," : "");
4369 i++;
4370 }
4371
4372 i = 0;
4373 pr_cont("), Capabilities=(");
4374
4375 if (!ioc->hide_ir_msg) {
4376 if (ioc->facts.IOCCapabilities &
4377 MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID) {
4378 pr_cont("Raid");
4379 i++;
4380 }
4381 }
4382
4383 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_TLR) {
4384 pr_cont("%sTLR", i ? "," : "");
4385 i++;
4386 }
4387
4388 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_MULTICAST) {
4389 pr_cont("%sMulticast", i ? "," : "");
4390 i++;
4391 }
4392
4393 if (ioc->facts.IOCCapabilities &
4394 MPI2_IOCFACTS_CAPABILITY_BIDIRECTIONAL_TARGET) {
4395 pr_cont("%sBIDI Target", i ? "," : "");
4396 i++;
4397 }
4398
4399 if (ioc->facts.IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_EEDP) {
4400 pr_cont("%sEEDP", i ? "," : "");
4401 i++;
4402 }
4403
4404 if (ioc->facts.IOCCapabilities &
4405 MPI2_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER) {
4406 pr_cont("%sSnapshot Buffer", i ? "," : "");
4407 i++;
4408 }
4409
4410 if (ioc->facts.IOCCapabilities &
4411 MPI2_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER) {
4412 pr_cont("%sDiag Trace Buffer", i ? "," : "");
4413 i++;
4414 }
4415
4416 if (ioc->facts.IOCCapabilities &
4417 MPI2_IOCFACTS_CAPABILITY_EXTENDED_BUFFER) {
4418 pr_cont("%sDiag Extended Buffer", i ? "," : "");
4419 i++;
4420 }
4421
4422 if (ioc->facts.IOCCapabilities &
4423 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING) {
4424 pr_cont("%sTask Set Full", i ? "," : "");
4425 i++;
4426 }
4427
4428 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4429 if (!(iounit_pg1_flags & MPI2_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE)) {
4430 pr_cont("%sNCQ", i ? "," : "");
4431 i++;
4432 }
4433
4434 pr_cont(")\n");
4435 }
4436
4437
4438
4439
4440
4441
4442
4443
4444
4445
4446
4447 void
4448 mpt3sas_base_update_missing_delay(struct MPT3SAS_ADAPTER *ioc,
4449 u16 device_missing_delay, u8 io_missing_delay)
4450 {
4451 u16 dmd, dmd_new, dmd_orignal;
4452 u8 io_missing_delay_original;
4453 u16 sz;
4454 Mpi2SasIOUnitPage1_t *sas_iounit_pg1 = NULL;
4455 Mpi2ConfigReply_t mpi_reply;
4456 u8 num_phys = 0;
4457 u16 ioc_status;
4458
4459 mpt3sas_config_get_number_hba_phys(ioc, &num_phys);
4460 if (!num_phys)
4461 return;
4462
4463 sz = offsetof(Mpi2SasIOUnitPage1_t, PhyData) + (num_phys *
4464 sizeof(Mpi2SasIOUnit1PhyData_t));
4465 sas_iounit_pg1 = kzalloc(sz, GFP_KERNEL);
4466 if (!sas_iounit_pg1) {
4467 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4468 __FILE__, __LINE__, __func__);
4469 goto out;
4470 }
4471 if ((mpt3sas_config_get_sas_iounit_pg1(ioc, &mpi_reply,
4472 sas_iounit_pg1, sz))) {
4473 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4474 __FILE__, __LINE__, __func__);
4475 goto out;
4476 }
4477 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
4478 MPI2_IOCSTATUS_MASK;
4479 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
4480 ioc_err(ioc, "failure at %s:%d/%s()!\n",
4481 __FILE__, __LINE__, __func__);
4482 goto out;
4483 }
4484
4485
4486 dmd = sas_iounit_pg1->ReportDeviceMissingDelay;
4487 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4488 dmd = (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4489 else
4490 dmd = dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4491 dmd_orignal = dmd;
4492 if (device_missing_delay > 0x7F) {
4493 dmd = (device_missing_delay > 0x7F0) ? 0x7F0 :
4494 device_missing_delay;
4495 dmd = dmd / 16;
4496 dmd |= MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16;
4497 } else
4498 dmd = device_missing_delay;
4499 sas_iounit_pg1->ReportDeviceMissingDelay = dmd;
4500
4501
4502 io_missing_delay_original = sas_iounit_pg1->IODeviceMissingDelay;
4503 sas_iounit_pg1->IODeviceMissingDelay = io_missing_delay;
4504
4505 if (!mpt3sas_config_set_sas_iounit_pg1(ioc, &mpi_reply, sas_iounit_pg1,
4506 sz)) {
4507 if (dmd & MPI2_SASIOUNIT1_REPORT_MISSING_UNIT_16)
4508 dmd_new = (dmd &
4509 MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK) * 16;
4510 else
4511 dmd_new =
4512 dmd & MPI2_SASIOUNIT1_REPORT_MISSING_TIMEOUT_MASK;
4513 ioc_info(ioc, "device_missing_delay: old(%d), new(%d)\n",
4514 dmd_orignal, dmd_new);
4515 ioc_info(ioc, "ioc_missing_delay: old(%d), new(%d)\n",
4516 io_missing_delay_original,
4517 io_missing_delay);
4518 ioc->device_missing_delay = dmd_new;
4519 ioc->io_missing_delay = io_missing_delay;
4520 }
4521
4522 out:
4523 kfree(sas_iounit_pg1);
4524 }
4525
4526
4527
4528
4529
4530
4531
4532
4533 static void
4534 _base_update_ioc_page1_inlinewith_perf_mode(struct MPT3SAS_ADAPTER *ioc)
4535 {
4536 Mpi2IOCPage1_t ioc_pg1;
4537 Mpi2ConfigReply_t mpi_reply;
4538
4539 mpt3sas_config_get_ioc_pg1(ioc, &mpi_reply, &ioc->ioc_pg1_copy);
4540 memcpy(&ioc_pg1, &ioc->ioc_pg1_copy, sizeof(Mpi2IOCPage1_t));
4541
4542 switch (perf_mode) {
4543 case MPT_PERF_MODE_DEFAULT:
4544 case MPT_PERF_MODE_BALANCED:
4545 if (ioc->high_iops_queues) {
4546 ioc_info(ioc,
4547 "Enable interrupt coalescing only for first\t"
4548 "%d reply queues\n",
4549 MPT3SAS_HIGH_IOPS_REPLY_QUEUES);
4550
4551
4552
4553
4554
4555
4556
4557
4558
4559 ioc_pg1.ProductSpecific = cpu_to_le32(0x80000000 |
4560 ((1 << MPT3SAS_HIGH_IOPS_REPLY_QUEUES/8) - 1));
4561 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4562 ioc_info(ioc, "performance mode: balanced\n");
4563 return;
4564 }
4565
4566 case MPT_PERF_MODE_LATENCY:
4567
4568
4569
4570
4571 ioc_pg1.CoalescingTimeout = cpu_to_le32(0xa);
4572 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4573 ioc_pg1.ProductSpecific = 0;
4574 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4575 ioc_info(ioc, "performance mode: latency\n");
4576 break;
4577 case MPT_PERF_MODE_IOPS:
4578
4579
4580
4581 ioc_info(ioc,
4582 "performance mode: iops with coalescing timeout: 0x%x\n",
4583 le32_to_cpu(ioc_pg1.CoalescingTimeout));
4584 ioc_pg1.Flags |= cpu_to_le32(MPI2_IOCPAGE1_REPLY_COALESCING);
4585 ioc_pg1.ProductSpecific = 0;
4586 mpt3sas_config_set_ioc_pg1(ioc, &mpi_reply, &ioc_pg1);
4587 break;
4588 }
4589 }
4590
4591
4592
4593
4594
4595 static void
4596 _base_static_config_pages(struct MPT3SAS_ADAPTER *ioc)
4597 {
4598 Mpi2ConfigReply_t mpi_reply;
4599 u32 iounit_pg1_flags;
4600
4601 ioc->nvme_abort_timeout = 30;
4602 mpt3sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
4603 if (ioc->ir_firmware)
4604 mpt3sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
4605 &ioc->manu_pg10);
4606
4607
4608
4609
4610
4611 mpt3sas_config_get_manufacturing_pg11(ioc, &mpi_reply, &ioc->manu_pg11);
4612 if (!ioc->is_gen35_ioc && ioc->manu_pg11.EEDPTagMode == 0) {
4613 pr_err("%s: overriding NVDATA EEDPTagMode setting\n",
4614 ioc->name);
4615 ioc->manu_pg11.EEDPTagMode &= ~0x3;
4616 ioc->manu_pg11.EEDPTagMode |= 0x1;
4617 mpt3sas_config_set_manufacturing_pg11(ioc, &mpi_reply,
4618 &ioc->manu_pg11);
4619 }
4620 if (ioc->manu_pg11.AddlFlags2 & NVME_TASK_MNGT_CUSTOM_MASK)
4621 ioc->tm_custom_handling = 1;
4622 else {
4623 ioc->tm_custom_handling = 0;
4624 if (ioc->manu_pg11.NVMeAbortTO < NVME_TASK_ABORT_MIN_TIMEOUT)
4625 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MIN_TIMEOUT;
4626 else if (ioc->manu_pg11.NVMeAbortTO >
4627 NVME_TASK_ABORT_MAX_TIMEOUT)
4628 ioc->nvme_abort_timeout = NVME_TASK_ABORT_MAX_TIMEOUT;
4629 else
4630 ioc->nvme_abort_timeout = ioc->manu_pg11.NVMeAbortTO;
4631 }
4632
4633 mpt3sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
4634 mpt3sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
4635 mpt3sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
4636 mpt3sas_config_get_iounit_pg0(ioc, &mpi_reply, &ioc->iounit_pg0);
4637 mpt3sas_config_get_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4638 mpt3sas_config_get_iounit_pg8(ioc, &mpi_reply, &ioc->iounit_pg8);
4639 _base_display_ioc_capabilities(ioc);
4640
4641
4642
4643
4644
4645 iounit_pg1_flags = le32_to_cpu(ioc->iounit_pg1.Flags);
4646 if ((ioc->facts.IOCCapabilities &
4647 MPI2_IOCFACTS_CAPABILITY_TASK_SET_FULL_HANDLING))
4648 iounit_pg1_flags &=
4649 ~MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4650 else
4651 iounit_pg1_flags |=
4652 MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
4653 ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
4654 mpt3sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
4655
4656 if (ioc->iounit_pg8.NumSensors)
4657 ioc->temp_sensors_count = ioc->iounit_pg8.NumSensors;
4658 if (ioc->is_aero_ioc)
4659 _base_update_ioc_page1_inlinewith_perf_mode(ioc);
4660 }
4661
4662
4663
4664
4665
4666
4667
4668 void
4669 mpt3sas_free_enclosure_list(struct MPT3SAS_ADAPTER *ioc)
4670 {
4671 struct _enclosure_node *enclosure_dev, *enclosure_dev_next;
4672
4673
4674 list_for_each_entry_safe(enclosure_dev,
4675 enclosure_dev_next, &ioc->enclosure_list, list) {
4676 list_del(&enclosure_dev->list);
4677 kfree(enclosure_dev);
4678 }
4679 }
4680
4681
4682
4683
4684
4685
4686
4687 static void
4688 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4689 {
4690 int i = 0;
4691 int j = 0;
4692 struct chain_tracker *ct;
4693 struct reply_post_struct *rps;
4694
4695 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4696
4697 if (ioc->request) {
4698 dma_free_coherent(&ioc->pdev->dev, ioc->request_dma_sz,
4699 ioc->request, ioc->request_dma);
4700 dexitprintk(ioc,
4701 ioc_info(ioc, "request_pool(0x%p): free\n",
4702 ioc->request));
4703 ioc->request = NULL;
4704 }
4705
4706 if (ioc->sense) {
4707 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
4708 dma_pool_destroy(ioc->sense_dma_pool);
4709 dexitprintk(ioc,
4710 ioc_info(ioc, "sense_pool(0x%p): free\n",
4711 ioc->sense));
4712 ioc->sense = NULL;
4713 }
4714
4715 if (ioc->reply) {
4716 dma_pool_free(ioc->reply_dma_pool, ioc->reply, ioc->reply_dma);
4717 dma_pool_destroy(ioc->reply_dma_pool);
4718 dexitprintk(ioc,
4719 ioc_info(ioc, "reply_pool(0x%p): free\n",
4720 ioc->reply));
4721 ioc->reply = NULL;
4722 }
4723
4724 if (ioc->reply_free) {
4725 dma_pool_free(ioc->reply_free_dma_pool, ioc->reply_free,
4726 ioc->reply_free_dma);
4727 dma_pool_destroy(ioc->reply_free_dma_pool);
4728 dexitprintk(ioc,
4729 ioc_info(ioc, "reply_free_pool(0x%p): free\n",
4730 ioc->reply_free));
4731 ioc->reply_free = NULL;
4732 }
4733
4734 if (ioc->reply_post) {
4735 do {
4736 rps = &ioc->reply_post[i];
4737 if (rps->reply_post_free) {
4738 dma_pool_free(
4739 ioc->reply_post_free_dma_pool,
4740 rps->reply_post_free,
4741 rps->reply_post_free_dma);
4742 dexitprintk(ioc,
4743 ioc_info(ioc, "reply_post_free_pool(0x%p): free\n",
4744 rps->reply_post_free));
4745 rps->reply_post_free = NULL;
4746 }
4747 } while (ioc->rdpq_array_enable &&
4748 (++i < ioc->reply_queue_count));
4749 if (ioc->reply_post_free_array &&
4750 ioc->rdpq_array_enable) {
4751 dma_pool_free(ioc->reply_post_free_array_dma_pool,
4752 ioc->reply_post_free_array,
4753 ioc->reply_post_free_array_dma);
4754 ioc->reply_post_free_array = NULL;
4755 }
4756 dma_pool_destroy(ioc->reply_post_free_array_dma_pool);
4757 dma_pool_destroy(ioc->reply_post_free_dma_pool);
4758 kfree(ioc->reply_post);
4759 }
4760
4761 if (ioc->pcie_sgl_dma_pool) {
4762 for (i = 0; i < ioc->scsiio_depth; i++) {
4763 dma_pool_free(ioc->pcie_sgl_dma_pool,
4764 ioc->pcie_sg_lookup[i].pcie_sgl,
4765 ioc->pcie_sg_lookup[i].pcie_sgl_dma);
4766 }
4767 if (ioc->pcie_sgl_dma_pool)
4768 dma_pool_destroy(ioc->pcie_sgl_dma_pool);
4769 }
4770
4771 if (ioc->config_page) {
4772 dexitprintk(ioc,
4773 ioc_info(ioc, "config_page(0x%p): free\n",
4774 ioc->config_page));
4775 dma_free_coherent(&ioc->pdev->dev, ioc->config_page_sz,
4776 ioc->config_page, ioc->config_page_dma);
4777 }
4778
4779 kfree(ioc->hpr_lookup);
4780 kfree(ioc->internal_lookup);
4781 if (ioc->chain_lookup) {
4782 for (i = 0; i < ioc->scsiio_depth; i++) {
4783 for (j = ioc->chains_per_prp_buffer;
4784 j < ioc->chains_needed_per_io; j++) {
4785 ct = &ioc->chain_lookup[i].chains_per_smid[j];
4786 if (ct && ct->chain_buffer)
4787 dma_pool_free(ioc->chain_dma_pool,
4788 ct->chain_buffer,
4789 ct->chain_buffer_dma);
4790 }
4791 kfree(ioc->chain_lookup[i].chains_per_smid);
4792 }
4793 dma_pool_destroy(ioc->chain_dma_pool);
4794 kfree(ioc->chain_lookup);
4795 ioc->chain_lookup = NULL;
4796 }
4797 }
4798
4799
4800
4801
4802
4803
4804
4805
4806
4807
4808
4809 static int
4810 is_MSB_are_same(long reply_pool_start_address, u32 pool_sz)
4811 {
4812 long reply_pool_end_address;
4813
4814 reply_pool_end_address = reply_pool_start_address + pool_sz;
4815
4816 if (upper_32_bits(reply_pool_start_address) ==
4817 upper_32_bits(reply_pool_end_address))
4818 return 1;
4819 else
4820 return 0;
4821 }
4822
4823
4824
4825
4826
4827
4828
4829 static int
4830 _base_allocate_memory_pools(struct MPT3SAS_ADAPTER *ioc)
4831 {
4832 struct mpt3sas_facts *facts;
4833 u16 max_sge_elements;
4834 u16 chains_needed_per_io;
4835 u32 sz, total_sz, reply_post_free_sz, reply_post_free_array_sz;
4836 u32 retry_sz;
4837 u16 max_request_credit, nvme_blocks_needed;
4838 unsigned short sg_tablesize;
4839 u16 sge_size;
4840 int i, j;
4841 struct chain_tracker *ct;
4842
4843 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
4844
4845
4846 retry_sz = 0;
4847 facts = &ioc->facts;
4848
4849
4850 if (max_sgl_entries != -1)
4851 sg_tablesize = max_sgl_entries;
4852 else {
4853 if (ioc->hba_mpi_version_belonged == MPI2_VERSION)
4854 sg_tablesize = MPT2SAS_SG_DEPTH;
4855 else
4856 sg_tablesize = MPT3SAS_SG_DEPTH;
4857 }
4858
4859
4860 if (reset_devices)
4861 sg_tablesize = min_t(unsigned short, sg_tablesize,
4862 MPT_KDUMP_MIN_PHYS_SEGMENTS);
4863
4864 if (ioc->is_mcpu_endpoint)
4865 ioc->shost->sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4866 else {
4867 if (sg_tablesize < MPT_MIN_PHYS_SEGMENTS)
4868 sg_tablesize = MPT_MIN_PHYS_SEGMENTS;
4869 else if (sg_tablesize > MPT_MAX_PHYS_SEGMENTS) {
4870 sg_tablesize = min_t(unsigned short, sg_tablesize,
4871 SG_MAX_SEGMENTS);
4872 ioc_warn(ioc, "sg_tablesize(%u) is bigger than kernel defined SG_CHUNK_SIZE(%u)\n",
4873 sg_tablesize, MPT_MAX_PHYS_SEGMENTS);
4874 }
4875 ioc->shost->sg_tablesize = sg_tablesize;
4876 }
4877
4878 ioc->internal_depth = min_t(int, (facts->HighPriorityCredit + (5)),
4879 (facts->RequestCredit / 4));
4880 if (ioc->internal_depth < INTERNAL_CMDS_COUNT) {
4881 if (facts->RequestCredit <= (INTERNAL_CMDS_COUNT +
4882 INTERNAL_SCSIIO_CMDS_COUNT)) {
4883 ioc_err(ioc, "IOC doesn't have enough Request Credits, it has just %d number of credits\n",
4884 facts->RequestCredit);
4885 return -ENOMEM;
4886 }
4887 ioc->internal_depth = 10;
4888 }
4889
4890 ioc->hi_priority_depth = ioc->internal_depth - (5);
4891
4892 if (max_queue_depth != -1 && max_queue_depth != 0) {
4893 max_request_credit = min_t(u16, max_queue_depth +
4894 ioc->internal_depth, facts->RequestCredit);
4895 if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
4896 max_request_credit = MAX_HBA_QUEUE_DEPTH;
4897 } else if (reset_devices)
4898 max_request_credit = min_t(u16, facts->RequestCredit,
4899 (MPT3SAS_KDUMP_SCSI_IO_DEPTH + ioc->internal_depth));
4900 else
4901 max_request_credit = min_t(u16, facts->RequestCredit,
4902 MAX_HBA_QUEUE_DEPTH);
4903
4904
4905
4906
4907
4908 ioc->hba_queue_depth = max_request_credit + ioc->hi_priority_depth;
4909
4910
4911 ioc->request_sz = facts->IOCRequestFrameSize * 4;
4912
4913
4914 ioc->reply_sz = facts->ReplyFrameSize * 4;
4915
4916
4917 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
4918 if (facts->IOCMaxChainSegmentSize)
4919 ioc->chain_segment_sz =
4920 facts->IOCMaxChainSegmentSize *
4921 MAX_CHAIN_ELEMT_SZ;
4922 else
4923
4924 ioc->chain_segment_sz = DEFAULT_NUM_FWCHAIN_ELEMTS *
4925 MAX_CHAIN_ELEMT_SZ;
4926 } else
4927 ioc->chain_segment_sz = ioc->request_sz;
4928
4929
4930 sge_size = max_t(u16, ioc->sge_size, ioc->sge_size_ieee);
4931
4932 retry_allocation:
4933 total_sz = 0;
4934
4935 max_sge_elements = ioc->request_sz - ((sizeof(Mpi2SCSIIORequest_t) -
4936 sizeof(Mpi2SGEIOUnion_t)) + sge_size);
4937 ioc->max_sges_in_main_message = max_sge_elements/sge_size;
4938
4939
4940 max_sge_elements = ioc->chain_segment_sz - sge_size;
4941 ioc->max_sges_in_chain_message = max_sge_elements/sge_size;
4942
4943
4944
4945
4946 chains_needed_per_io = ((ioc->shost->sg_tablesize -
4947 ioc->max_sges_in_main_message)/ioc->max_sges_in_chain_message)
4948 + 1;
4949 if (chains_needed_per_io > facts->MaxChainDepth) {
4950 chains_needed_per_io = facts->MaxChainDepth;
4951 ioc->shost->sg_tablesize = min_t(u16,
4952 ioc->max_sges_in_main_message + (ioc->max_sges_in_chain_message
4953 * chains_needed_per_io), ioc->shost->sg_tablesize);
4954 }
4955 ioc->chains_needed_per_io = chains_needed_per_io;
4956
4957
4958 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4959
4960
4961 if (ioc->is_mcpu_endpoint)
4962 ioc->reply_post_queue_depth = ioc->reply_free_queue_depth;
4963 else {
4964
4965 ioc->reply_post_queue_depth = ioc->hba_queue_depth +
4966 ioc->reply_free_queue_depth + 1;
4967
4968 if (ioc->reply_post_queue_depth % 16)
4969 ioc->reply_post_queue_depth += 16 -
4970 (ioc->reply_post_queue_depth % 16);
4971 }
4972
4973 if (ioc->reply_post_queue_depth >
4974 facts->MaxReplyDescriptorPostQueueDepth) {
4975 ioc->reply_post_queue_depth =
4976 facts->MaxReplyDescriptorPostQueueDepth -
4977 (facts->MaxReplyDescriptorPostQueueDepth % 16);
4978 ioc->hba_queue_depth =
4979 ((ioc->reply_post_queue_depth - 64) / 2) - 1;
4980 ioc->reply_free_queue_depth = ioc->hba_queue_depth + 64;
4981 }
4982
4983 dinitprintk(ioc,
4984 ioc_info(ioc, "scatter gather: sge_in_main_msg(%d), sge_per_chain(%d), sge_per_io(%d), chains_per_io(%d)\n",
4985 ioc->max_sges_in_main_message,
4986 ioc->max_sges_in_chain_message,
4987 ioc->shost->sg_tablesize,
4988 ioc->chains_needed_per_io));
4989
4990
4991 reply_post_free_sz = ioc->reply_post_queue_depth *
4992 sizeof(Mpi2DefaultReplyDescriptor_t);
4993
4994 sz = reply_post_free_sz;
4995 if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
4996 sz *= ioc->reply_queue_count;
4997
4998 ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
4999 (ioc->reply_queue_count):1,
5000 sizeof(struct reply_post_struct), GFP_KERNEL);
5001
5002 if (!ioc->reply_post) {
5003 ioc_err(ioc, "reply_post_free pool: kcalloc failed\n");
5004 goto out;
5005 }
5006 ioc->reply_post_free_dma_pool = dma_pool_create("reply_post_free pool",
5007 &ioc->pdev->dev, sz, 16, 0);
5008 if (!ioc->reply_post_free_dma_pool) {
5009 ioc_err(ioc, "reply_post_free pool: dma_pool_create failed\n");
5010 goto out;
5011 }
5012 i = 0;
5013 do {
5014 ioc->reply_post[i].reply_post_free =
5015 dma_pool_zalloc(ioc->reply_post_free_dma_pool,
5016 GFP_KERNEL,
5017 &ioc->reply_post[i].reply_post_free_dma);
5018 if (!ioc->reply_post[i].reply_post_free) {
5019 ioc_err(ioc, "reply_post_free pool: dma_pool_alloc failed\n");
5020 goto out;
5021 }
5022 dinitprintk(ioc,
5023 ioc_info(ioc, "reply post free pool (0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5024 ioc->reply_post[i].reply_post_free,
5025 ioc->reply_post_queue_depth,
5026 8, sz / 1024));
5027 dinitprintk(ioc,
5028 ioc_info(ioc, "reply_post_free_dma = (0x%llx)\n",
5029 (u64)ioc->reply_post[i].reply_post_free_dma));
5030 total_sz += sz;
5031 } while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
5032
5033 if (ioc->dma_mask > 32) {
5034 if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
5035 ioc_warn(ioc, "no suitable consistent DMA mask for %s\n",
5036 pci_name(ioc->pdev));
5037 goto out;
5038 }
5039 }
5040
5041 ioc->scsiio_depth = ioc->hba_queue_depth -
5042 ioc->hi_priority_depth - ioc->internal_depth;
5043
5044
5045
5046
5047 ioc->shost->can_queue = ioc->scsiio_depth - INTERNAL_SCSIIO_CMDS_COUNT;
5048 dinitprintk(ioc,
5049 ioc_info(ioc, "scsi host: can_queue depth (%d)\n",
5050 ioc->shost->can_queue));
5051
5052
5053
5054
5055
5056 ioc->chain_depth = ioc->chains_needed_per_io * ioc->scsiio_depth;
5057 sz = ((ioc->scsiio_depth + 1) * ioc->request_sz);
5058
5059
5060 sz += (ioc->hi_priority_depth * ioc->request_sz);
5061
5062
5063 sz += (ioc->internal_depth * ioc->request_sz);
5064
5065 ioc->request_dma_sz = sz;
5066 ioc->request = dma_alloc_coherent(&ioc->pdev->dev, sz,
5067 &ioc->request_dma, GFP_KERNEL);
5068 if (!ioc->request) {
5069 ioc_err(ioc, "request pool: dma_alloc_coherent failed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kB)\n",
5070 ioc->hba_queue_depth, ioc->chains_needed_per_io,
5071 ioc->request_sz, sz / 1024);
5072 if (ioc->scsiio_depth < MPT3SAS_SAS_QUEUE_DEPTH)
5073 goto out;
5074 retry_sz = 64;
5075 ioc->hba_queue_depth -= retry_sz;
5076 _base_release_memory_pools(ioc);
5077 goto retry_allocation;
5078 }
5079 memset(ioc->request, 0, sz);
5080
5081 if (retry_sz)
5082 ioc_err(ioc, "request pool: dma_alloc_coherent succeed: hba_depth(%d), chains_per_io(%d), frame_sz(%d), total(%d kb)\n",
5083 ioc->hba_queue_depth, ioc->chains_needed_per_io,
5084 ioc->request_sz, sz / 1024);
5085
5086
5087 ioc->hi_priority = ioc->request + ((ioc->scsiio_depth + 1) *
5088 ioc->request_sz);
5089 ioc->hi_priority_dma = ioc->request_dma + ((ioc->scsiio_depth + 1) *
5090 ioc->request_sz);
5091
5092
5093 ioc->internal = ioc->hi_priority + (ioc->hi_priority_depth *
5094 ioc->request_sz);
5095 ioc->internal_dma = ioc->hi_priority_dma + (ioc->hi_priority_depth *
5096 ioc->request_sz);
5097
5098 dinitprintk(ioc,
5099 ioc_info(ioc, "request pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5100 ioc->request, ioc->hba_queue_depth,
5101 ioc->request_sz,
5102 (ioc->hba_queue_depth * ioc->request_sz) / 1024));
5103
5104 dinitprintk(ioc,
5105 ioc_info(ioc, "request pool: dma(0x%llx)\n",
5106 (unsigned long long)ioc->request_dma));
5107 total_sz += sz;
5108
5109 dinitprintk(ioc,
5110 ioc_info(ioc, "scsiio(0x%p): depth(%d)\n",
5111 ioc->request, ioc->scsiio_depth));
5112
5113 ioc->chain_depth = min_t(u32, ioc->chain_depth, MAX_CHAIN_DEPTH);
5114 sz = ioc->scsiio_depth * sizeof(struct chain_lookup);
5115 ioc->chain_lookup = kzalloc(sz, GFP_KERNEL);
5116 if (!ioc->chain_lookup) {
5117 ioc_err(ioc, "chain_lookup: __get_free_pages failed\n");
5118 goto out;
5119 }
5120
5121 sz = ioc->chains_needed_per_io * sizeof(struct chain_tracker);
5122 for (i = 0; i < ioc->scsiio_depth; i++) {
5123 ioc->chain_lookup[i].chains_per_smid = kzalloc(sz, GFP_KERNEL);
5124 if (!ioc->chain_lookup[i].chains_per_smid) {
5125 ioc_err(ioc, "chain_lookup: kzalloc failed\n");
5126 goto out;
5127 }
5128 }
5129
5130
5131 ioc->hpr_lookup = kcalloc(ioc->hi_priority_depth,
5132 sizeof(struct request_tracker), GFP_KERNEL);
5133 if (!ioc->hpr_lookup) {
5134 ioc_err(ioc, "hpr_lookup: kcalloc failed\n");
5135 goto out;
5136 }
5137 ioc->hi_priority_smid = ioc->scsiio_depth + 1;
5138 dinitprintk(ioc,
5139 ioc_info(ioc, "hi_priority(0x%p): depth(%d), start smid(%d)\n",
5140 ioc->hi_priority,
5141 ioc->hi_priority_depth, ioc->hi_priority_smid));
5142
5143
5144 ioc->internal_lookup = kcalloc(ioc->internal_depth,
5145 sizeof(struct request_tracker), GFP_KERNEL);
5146 if (!ioc->internal_lookup) {
5147 ioc_err(ioc, "internal_lookup: kcalloc failed\n");
5148 goto out;
5149 }
5150 ioc->internal_smid = ioc->hi_priority_smid + ioc->hi_priority_depth;
5151 dinitprintk(ioc,
5152 ioc_info(ioc, "internal(0x%p): depth(%d), start smid(%d)\n",
5153 ioc->internal,
5154 ioc->internal_depth, ioc->internal_smid));
5155
5156
5157
5158
5159
5160
5161
5162
5163
5164
5165
5166
5167
5168 ioc->chains_per_prp_buffer = 0;
5169 if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_NVME_DEVICES) {
5170 nvme_blocks_needed =
5171 (ioc->shost->sg_tablesize * NVME_PRP_SIZE) - 1;
5172 nvme_blocks_needed /= (ioc->page_size - NVME_PRP_SIZE);
5173 nvme_blocks_needed++;
5174
5175 sz = sizeof(struct pcie_sg_list) * ioc->scsiio_depth;
5176 ioc->pcie_sg_lookup = kzalloc(sz, GFP_KERNEL);
5177 if (!ioc->pcie_sg_lookup) {
5178 ioc_info(ioc, "PCIe SGL lookup: kzalloc failed\n");
5179 goto out;
5180 }
5181 sz = nvme_blocks_needed * ioc->page_size;
5182 ioc->pcie_sgl_dma_pool =
5183 dma_pool_create("PCIe SGL pool", &ioc->pdev->dev, sz, 16, 0);
5184 if (!ioc->pcie_sgl_dma_pool) {
5185 ioc_info(ioc, "PCIe SGL pool: dma_pool_create failed\n");
5186 goto out;
5187 }
5188
5189 ioc->chains_per_prp_buffer = sz/ioc->chain_segment_sz;
5190 ioc->chains_per_prp_buffer = min(ioc->chains_per_prp_buffer,
5191 ioc->chains_needed_per_io);
5192
5193 for (i = 0; i < ioc->scsiio_depth; i++) {
5194 ioc->pcie_sg_lookup[i].pcie_sgl = dma_pool_alloc(
5195 ioc->pcie_sgl_dma_pool, GFP_KERNEL,
5196 &ioc->pcie_sg_lookup[i].pcie_sgl_dma);
5197 if (!ioc->pcie_sg_lookup[i].pcie_sgl) {
5198 ioc_info(ioc, "PCIe SGL pool: dma_pool_alloc failed\n");
5199 goto out;
5200 }
5201 for (j = 0; j < ioc->chains_per_prp_buffer; j++) {
5202 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5203 ct->chain_buffer =
5204 ioc->pcie_sg_lookup[i].pcie_sgl +
5205 (j * ioc->chain_segment_sz);
5206 ct->chain_buffer_dma =
5207 ioc->pcie_sg_lookup[i].pcie_sgl_dma +
5208 (j * ioc->chain_segment_sz);
5209 }
5210 }
5211
5212 dinitprintk(ioc,
5213 ioc_info(ioc, "PCIe sgl pool depth(%d), element_size(%d), pool_size(%d kB)\n",
5214 ioc->scsiio_depth, sz,
5215 (sz * ioc->scsiio_depth) / 1024));
5216 dinitprintk(ioc,
5217 ioc_info(ioc, "Number of chains can fit in a PRP page(%d)\n",
5218 ioc->chains_per_prp_buffer));
5219 total_sz += sz * ioc->scsiio_depth;
5220 }
5221
5222 ioc->chain_dma_pool = dma_pool_create("chain pool", &ioc->pdev->dev,
5223 ioc->chain_segment_sz, 16, 0);
5224 if (!ioc->chain_dma_pool) {
5225 ioc_err(ioc, "chain_dma_pool: dma_pool_create failed\n");
5226 goto out;
5227 }
5228 for (i = 0; i < ioc->scsiio_depth; i++) {
5229 for (j = ioc->chains_per_prp_buffer;
5230 j < ioc->chains_needed_per_io; j++) {
5231 ct = &ioc->chain_lookup[i].chains_per_smid[j];
5232 ct->chain_buffer = dma_pool_alloc(
5233 ioc->chain_dma_pool, GFP_KERNEL,
5234 &ct->chain_buffer_dma);
5235 if (!ct->chain_buffer) {
5236 ioc_err(ioc, "chain_lookup: pci_pool_alloc failed\n");
5237 goto out;
5238 }
5239 }
5240 total_sz += ioc->chain_segment_sz;
5241 }
5242
5243 dinitprintk(ioc,
5244 ioc_info(ioc, "chain pool depth(%d), frame_size(%d), pool_size(%d kB)\n",
5245 ioc->chain_depth, ioc->chain_segment_sz,
5246 (ioc->chain_depth * ioc->chain_segment_sz) / 1024));
5247
5248
5249 sz = ioc->scsiio_depth * SCSI_SENSE_BUFFERSIZE;
5250 ioc->sense_dma_pool = dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5251 4, 0);
5252 if (!ioc->sense_dma_pool) {
5253 ioc_err(ioc, "sense pool: dma_pool_create failed\n");
5254 goto out;
5255 }
5256 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5257 &ioc->sense_dma);
5258 if (!ioc->sense) {
5259 ioc_err(ioc, "sense pool: dma_pool_alloc failed\n");
5260 goto out;
5261 }
5262
5263
5264
5265
5266
5267
5268
5269
5270
5271 if (!is_MSB_are_same((long)ioc->sense, sz)) {
5272
5273 dma_pool_free(ioc->sense_dma_pool, ioc->sense, ioc->sense_dma);
5274 dma_pool_destroy(ioc->sense_dma_pool);
5275 ioc->sense = NULL;
5276
5277 ioc->sense_dma_pool =
5278 dma_pool_create("sense pool", &ioc->pdev->dev, sz,
5279 roundup_pow_of_two(sz), 0);
5280 if (!ioc->sense_dma_pool) {
5281 ioc_err(ioc, "sense pool: pci_pool_create failed\n");
5282 goto out;
5283 }
5284 ioc->sense = dma_pool_alloc(ioc->sense_dma_pool, GFP_KERNEL,
5285 &ioc->sense_dma);
5286 if (!ioc->sense) {
5287 ioc_err(ioc, "sense pool: pci_pool_alloc failed\n");
5288 goto out;
5289 }
5290 }
5291 dinitprintk(ioc,
5292 ioc_info(ioc, "sense pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5293 ioc->sense, ioc->scsiio_depth,
5294 SCSI_SENSE_BUFFERSIZE, sz / 1024));
5295 dinitprintk(ioc,
5296 ioc_info(ioc, "sense_dma(0x%llx)\n",
5297 (unsigned long long)ioc->sense_dma));
5298 total_sz += sz;
5299
5300
5301 sz = ioc->reply_free_queue_depth * ioc->reply_sz;
5302 ioc->reply_dma_pool = dma_pool_create("reply pool", &ioc->pdev->dev, sz,
5303 4, 0);
5304 if (!ioc->reply_dma_pool) {
5305 ioc_err(ioc, "reply pool: dma_pool_create failed\n");
5306 goto out;
5307 }
5308 ioc->reply = dma_pool_alloc(ioc->reply_dma_pool, GFP_KERNEL,
5309 &ioc->reply_dma);
5310 if (!ioc->reply) {
5311 ioc_err(ioc, "reply pool: dma_pool_alloc failed\n");
5312 goto out;
5313 }
5314 ioc->reply_dma_min_address = (u32)(ioc->reply_dma);
5315 ioc->reply_dma_max_address = (u32)(ioc->reply_dma) + sz;
5316 dinitprintk(ioc,
5317 ioc_info(ioc, "reply pool(0x%p): depth(%d), frame_size(%d), pool_size(%d kB)\n",
5318 ioc->reply, ioc->reply_free_queue_depth,
5319 ioc->reply_sz, sz / 1024));
5320 dinitprintk(ioc,
5321 ioc_info(ioc, "reply_dma(0x%llx)\n",
5322 (unsigned long long)ioc->reply_dma));
5323 total_sz += sz;
5324
5325
5326 sz = ioc->reply_free_queue_depth * 4;
5327 ioc->reply_free_dma_pool = dma_pool_create("reply_free pool",
5328 &ioc->pdev->dev, sz, 16, 0);
5329 if (!ioc->reply_free_dma_pool) {
5330 ioc_err(ioc, "reply_free pool: dma_pool_create failed\n");
5331 goto out;
5332 }
5333 ioc->reply_free = dma_pool_zalloc(ioc->reply_free_dma_pool, GFP_KERNEL,
5334 &ioc->reply_free_dma);
5335 if (!ioc->reply_free) {
5336 ioc_err(ioc, "reply_free pool: dma_pool_alloc failed\n");
5337 goto out;
5338 }
5339 dinitprintk(ioc,
5340 ioc_info(ioc, "reply_free pool(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
5341 ioc->reply_free, ioc->reply_free_queue_depth,
5342 4, sz / 1024));
5343 dinitprintk(ioc,
5344 ioc_info(ioc, "reply_free_dma (0x%llx)\n",
5345 (unsigned long long)ioc->reply_free_dma));
5346 total_sz += sz;
5347
5348 if (ioc->rdpq_array_enable) {
5349 reply_post_free_array_sz = ioc->reply_queue_count *
5350 sizeof(Mpi2IOCInitRDPQArrayEntry);
5351 ioc->reply_post_free_array_dma_pool =
5352 dma_pool_create("reply_post_free_array pool",
5353 &ioc->pdev->dev, reply_post_free_array_sz, 16, 0);
5354 if (!ioc->reply_post_free_array_dma_pool) {
5355 dinitprintk(ioc,
5356 ioc_info(ioc, "reply_post_free_array pool: dma_pool_create failed\n"));
5357 goto out;
5358 }
5359 ioc->reply_post_free_array =
5360 dma_pool_alloc(ioc->reply_post_free_array_dma_pool,
5361 GFP_KERNEL, &ioc->reply_post_free_array_dma);
5362 if (!ioc->reply_post_free_array) {
5363 dinitprintk(ioc,
5364 ioc_info(ioc, "reply_post_free_array pool: dma_pool_alloc failed\n"));
5365 goto out;
5366 }
5367 }
5368 ioc->config_page_sz = 512;
5369 ioc->config_page = dma_alloc_coherent(&ioc->pdev->dev,
5370 ioc->config_page_sz, &ioc->config_page_dma, GFP_KERNEL);
5371 if (!ioc->config_page) {
5372 ioc_err(ioc, "config page: dma_pool_alloc failed\n");
5373 goto out;
5374 }
5375 dinitprintk(ioc,
5376 ioc_info(ioc, "config page(0x%p): size(%d)\n",
5377 ioc->config_page, ioc->config_page_sz));
5378 dinitprintk(ioc,
5379 ioc_info(ioc, "config_page_dma(0x%llx)\n",
5380 (unsigned long long)ioc->config_page_dma));
5381 total_sz += ioc->config_page_sz;
5382
5383 ioc_info(ioc, "Allocated physical memory: size(%d kB)\n",
5384 total_sz / 1024);
5385 ioc_info(ioc, "Current Controller Queue Depth(%d),Max Controller Queue Depth(%d)\n",
5386 ioc->shost->can_queue, facts->RequestCredit);
5387 ioc_info(ioc, "Scatter Gather Elements per IO(%d)\n",
5388 ioc->shost->sg_tablesize);
5389 return 0;
5390
5391 out:
5392 return -ENOMEM;
5393 }
5394
5395
5396
5397
5398
5399
5400
5401
5402
5403 u32
5404 mpt3sas_base_get_iocstate(struct MPT3SAS_ADAPTER *ioc, int cooked)
5405 {
5406 u32 s, sc;
5407
5408 s = ioc->base_readl(&ioc->chip->Doorbell);
5409 sc = s & MPI2_IOC_STATE_MASK;
5410 return cooked ? sc : s;
5411 }
5412
5413
5414
5415
5416
5417
5418
5419
5420
5421 static int
5422 _base_wait_on_iocstate(struct MPT3SAS_ADAPTER *ioc, u32 ioc_state, int timeout)
5423 {
5424 u32 count, cntdn;
5425 u32 current_state;
5426
5427 count = 0;
5428 cntdn = 1000 * timeout;
5429 do {
5430 current_state = mpt3sas_base_get_iocstate(ioc, 1);
5431 if (current_state == ioc_state)
5432 return 0;
5433 if (count && current_state == MPI2_IOC_STATE_FAULT)
5434 break;
5435
5436 usleep_range(1000, 1500);
5437 count++;
5438 } while (--cntdn);
5439
5440 return current_state;
5441 }
5442
5443
5444
5445
5446
5447
5448
5449
5450
5451
5452
5453 static int
5454 _base_wait_for_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5455 {
5456 u32 cntdn, count;
5457 u32 int_status;
5458
5459 count = 0;
5460 cntdn = 1000 * timeout;
5461 do {
5462 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5463 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5464 dhsprintk(ioc,
5465 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5466 __func__, count, timeout));
5467 return 0;
5468 }
5469
5470 usleep_range(1000, 1500);
5471 count++;
5472 } while (--cntdn);
5473
5474 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5475 __func__, count, int_status);
5476 return -EFAULT;
5477 }
5478
5479 static int
5480 _base_spin_on_doorbell_int(struct MPT3SAS_ADAPTER *ioc, int timeout)
5481 {
5482 u32 cntdn, count;
5483 u32 int_status;
5484
5485 count = 0;
5486 cntdn = 2000 * timeout;
5487 do {
5488 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5489 if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5490 dhsprintk(ioc,
5491 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5492 __func__, count, timeout));
5493 return 0;
5494 }
5495
5496 udelay(500);
5497 count++;
5498 } while (--cntdn);
5499
5500 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5501 __func__, count, int_status);
5502 return -EFAULT;
5503
5504 }
5505
5506
5507
5508
5509
5510
5511
5512
5513
5514
5515
5516 static int
5517 _base_wait_for_doorbell_ack(struct MPT3SAS_ADAPTER *ioc, int timeout)
5518 {
5519 u32 cntdn, count;
5520 u32 int_status;
5521 u32 doorbell;
5522
5523 count = 0;
5524 cntdn = 1000 * timeout;
5525 do {
5526 int_status = ioc->base_readl(&ioc->chip->HostInterruptStatus);
5527 if (!(int_status & MPI2_HIS_SYS2IOC_DB_STATUS)) {
5528 dhsprintk(ioc,
5529 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5530 __func__, count, timeout));
5531 return 0;
5532 } else if (int_status & MPI2_HIS_IOC2SYS_DB_STATUS) {
5533 doorbell = ioc->base_readl(&ioc->chip->Doorbell);
5534 if ((doorbell & MPI2_IOC_STATE_MASK) ==
5535 MPI2_IOC_STATE_FAULT) {
5536 mpt3sas_base_fault_info(ioc , doorbell);
5537 return -EFAULT;
5538 }
5539 } else if (int_status == 0xFFFFFFFF)
5540 goto out;
5541
5542 usleep_range(1000, 1500);
5543 count++;
5544 } while (--cntdn);
5545
5546 out:
5547 ioc_err(ioc, "%s: failed due to timeout count(%d), int_status(%x)!\n",
5548 __func__, count, int_status);
5549 return -EFAULT;
5550 }
5551
5552
5553
5554
5555
5556
5557
5558
5559 static int
5560 _base_wait_for_doorbell_not_used(struct MPT3SAS_ADAPTER *ioc, int timeout)
5561 {
5562 u32 cntdn, count;
5563 u32 doorbell_reg;
5564
5565 count = 0;
5566 cntdn = 1000 * timeout;
5567 do {
5568 doorbell_reg = ioc->base_readl(&ioc->chip->Doorbell);
5569 if (!(doorbell_reg & MPI2_DOORBELL_USED)) {
5570 dhsprintk(ioc,
5571 ioc_info(ioc, "%s: successful count(%d), timeout(%d)\n",
5572 __func__, count, timeout));
5573 return 0;
5574 }
5575
5576 usleep_range(1000, 1500);
5577 count++;
5578 } while (--cntdn);
5579
5580 ioc_err(ioc, "%s: failed due to timeout count(%d), doorbell_reg(%x)!\n",
5581 __func__, count, doorbell_reg);
5582 return -EFAULT;
5583 }
5584
5585
5586
5587
5588
5589
5590
5591
5592
5593 static int
5594 _base_send_ioc_reset(struct MPT3SAS_ADAPTER *ioc, u8 reset_type, int timeout)
5595 {
5596 u32 ioc_state;
5597 int r = 0;
5598
5599 if (reset_type != MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET) {
5600 ioc_err(ioc, "%s: unknown reset_type\n", __func__);
5601 return -EFAULT;
5602 }
5603
5604 if (!(ioc->facts.IOCCapabilities &
5605 MPI2_IOCFACTS_CAPABILITY_EVENT_REPLAY))
5606 return -EFAULT;
5607
5608 ioc_info(ioc, "sending message unit reset !!\n");
5609
5610 writel(reset_type << MPI2_DOORBELL_FUNCTION_SHIFT,
5611 &ioc->chip->Doorbell);
5612 if ((_base_wait_for_doorbell_ack(ioc, 15))) {
5613 r = -EFAULT;
5614 goto out;
5615 }
5616 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
5617 if (ioc_state) {
5618 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
5619 __func__, ioc_state);
5620 r = -EFAULT;
5621 goto out;
5622 }
5623 out:
5624 ioc_info(ioc, "message unit reset: %s\n",
5625 r == 0 ? "SUCCESS" : "FAILED");
5626 return r;
5627 }
5628
5629
5630
5631
5632
5633
5634
5635
5636
5637
5638
5639 int
5640 mpt3sas_wait_for_ioc(struct MPT3SAS_ADAPTER *ioc, int timeout)
5641 {
5642 int wait_state_count = 0;
5643 u32 ioc_state;
5644
5645 do {
5646 ioc_state = mpt3sas_base_get_iocstate(ioc, 1);
5647 if (ioc_state == MPI2_IOC_STATE_OPERATIONAL)
5648 break;
5649 ssleep(1);
5650 ioc_info(ioc, "%s: waiting for operational state(count=%d)\n",
5651 __func__, ++wait_state_count);
5652 } while (--timeout);
5653 if (!timeout) {
5654 ioc_err(ioc, "%s: failed due to ioc not operational\n", __func__);
5655 return -EFAULT;
5656 }
5657 if (wait_state_count)
5658 ioc_info(ioc, "ioc is operational\n");
5659 return 0;
5660 }
5661
5662
5663
5664
5665
5666
5667
5668
5669
5670
5671
5672
5673 static int
5674 _base_handshake_req_reply_wait(struct MPT3SAS_ADAPTER *ioc, int request_bytes,
5675 u32 *request, int reply_bytes, u16 *reply, int timeout)
5676 {
5677 MPI2DefaultReply_t *default_reply = (MPI2DefaultReply_t *)reply;
5678 int i;
5679 u8 failed;
5680 __le32 *mfp;
5681
5682
5683 if ((ioc->base_readl(&ioc->chip->Doorbell) & MPI2_DOORBELL_USED)) {
5684 ioc_err(ioc, "doorbell is in use (line=%d)\n", __LINE__);
5685 return -EFAULT;
5686 }
5687
5688
5689 if (ioc->base_readl(&ioc->chip->HostInterruptStatus) &
5690 MPI2_HIS_IOC2SYS_DB_STATUS)
5691 writel(0, &ioc->chip->HostInterruptStatus);
5692
5693
5694 writel(((MPI2_FUNCTION_HANDSHAKE<<MPI2_DOORBELL_FUNCTION_SHIFT) |
5695 ((request_bytes/4)<<MPI2_DOORBELL_ADD_DWORDS_SHIFT)),
5696 &ioc->chip->Doorbell);
5697
5698 if ((_base_spin_on_doorbell_int(ioc, 5))) {
5699 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5700 __LINE__);
5701 return -EFAULT;
5702 }
5703 writel(0, &ioc->chip->HostInterruptStatus);
5704
5705 if ((_base_wait_for_doorbell_ack(ioc, 5))) {
5706 ioc_err(ioc, "doorbell handshake ack failed (line=%d)\n",
5707 __LINE__);
5708 return -EFAULT;
5709 }
5710
5711
5712 for (i = 0, failed = 0; i < request_bytes/4 && !failed; i++) {
5713 writel(cpu_to_le32(request[i]), &ioc->chip->Doorbell);
5714 if ((_base_wait_for_doorbell_ack(ioc, 5)))
5715 failed = 1;
5716 }
5717
5718 if (failed) {
5719 ioc_err(ioc, "doorbell handshake sending request failed (line=%d)\n",
5720 __LINE__);
5721 return -EFAULT;
5722 }
5723
5724
5725 if ((_base_wait_for_doorbell_int(ioc, timeout))) {
5726 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5727 __LINE__);
5728 return -EFAULT;
5729 }
5730
5731
5732 reply[0] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5733 & MPI2_DOORBELL_DATA_MASK);
5734 writel(0, &ioc->chip->HostInterruptStatus);
5735 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5736 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5737 __LINE__);
5738 return -EFAULT;
5739 }
5740 reply[1] = le16_to_cpu(ioc->base_readl(&ioc->chip->Doorbell)
5741 & MPI2_DOORBELL_DATA_MASK);
5742 writel(0, &ioc->chip->HostInterruptStatus);
5743
5744 for (i = 2; i < default_reply->MsgLength * 2; i++) {
5745 if ((_base_wait_for_doorbell_int(ioc, 5))) {
5746 ioc_err(ioc, "doorbell handshake int failed (line=%d)\n",
5747 __LINE__);
5748 return -EFAULT;
5749 }
5750 if (i >= reply_bytes/2)
5751 ioc->base_readl(&ioc->chip->Doorbell);
5752 else
5753 reply[i] = le16_to_cpu(
5754 ioc->base_readl(&ioc->chip->Doorbell)
5755 & MPI2_DOORBELL_DATA_MASK);
5756 writel(0, &ioc->chip->HostInterruptStatus);
5757 }
5758
5759 _base_wait_for_doorbell_int(ioc, 5);
5760 if (_base_wait_for_doorbell_not_used(ioc, 5) != 0) {
5761 dhsprintk(ioc,
5762 ioc_info(ioc, "doorbell is in use (line=%d)\n",
5763 __LINE__));
5764 }
5765 writel(0, &ioc->chip->HostInterruptStatus);
5766
5767 if (ioc->logging_level & MPT_DEBUG_INIT) {
5768 mfp = (__le32 *)reply;
5769 pr_info("\toffset:data\n");
5770 for (i = 0; i < reply_bytes/4; i++)
5771 pr_info("\t[0x%02x]:%08x\n", i*4,
5772 le32_to_cpu(mfp[i]));
5773 }
5774 return 0;
5775 }
5776
5777
5778
5779
5780
5781
5782
5783
5784
5785
5786
5787
5788
5789
5790
5791 int
5792 mpt3sas_base_sas_iounit_control(struct MPT3SAS_ADAPTER *ioc,
5793 Mpi2SasIoUnitControlReply_t *mpi_reply,
5794 Mpi2SasIoUnitControlRequest_t *mpi_request)
5795 {
5796 u16 smid;
5797 u8 issue_reset = 0;
5798 int rc;
5799 void *request;
5800
5801 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5802
5803 mutex_lock(&ioc->base_cmds.mutex);
5804
5805 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5806 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
5807 rc = -EAGAIN;
5808 goto out;
5809 }
5810
5811 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
5812 if (rc)
5813 goto out;
5814
5815 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5816 if (!smid) {
5817 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5818 rc = -EAGAIN;
5819 goto out;
5820 }
5821
5822 rc = 0;
5823 ioc->base_cmds.status = MPT3_CMD_PENDING;
5824 request = mpt3sas_base_get_msg_frame(ioc, smid);
5825 ioc->base_cmds.smid = smid;
5826 memcpy(request, mpi_request, sizeof(Mpi2SasIoUnitControlRequest_t));
5827 if (mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5828 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET)
5829 ioc->ioc_link_reset_in_progress = 1;
5830 init_completion(&ioc->base_cmds.done);
5831 ioc->put_smid_default(ioc, smid);
5832 wait_for_completion_timeout(&ioc->base_cmds.done,
5833 msecs_to_jiffies(10000));
5834 if ((mpi_request->Operation == MPI2_SAS_OP_PHY_HARD_RESET ||
5835 mpi_request->Operation == MPI2_SAS_OP_PHY_LINK_RESET) &&
5836 ioc->ioc_link_reset_in_progress)
5837 ioc->ioc_link_reset_in_progress = 0;
5838 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
5839 issue_reset =
5840 mpt3sas_base_check_cmd_timeout(ioc,
5841 ioc->base_cmds.status, mpi_request,
5842 sizeof(Mpi2SasIoUnitControlRequest_t)/4);
5843 goto issue_host_reset;
5844 }
5845 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5846 memcpy(mpi_reply, ioc->base_cmds.reply,
5847 sizeof(Mpi2SasIoUnitControlReply_t));
5848 else
5849 memset(mpi_reply, 0, sizeof(Mpi2SasIoUnitControlReply_t));
5850 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5851 goto out;
5852
5853 issue_host_reset:
5854 if (issue_reset)
5855 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
5856 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5857 rc = -EFAULT;
5858 out:
5859 mutex_unlock(&ioc->base_cmds.mutex);
5860 return rc;
5861 }
5862
5863
5864
5865
5866
5867
5868
5869
5870
5871
5872
5873
5874 int
5875 mpt3sas_base_scsi_enclosure_processor(struct MPT3SAS_ADAPTER *ioc,
5876 Mpi2SepReply_t *mpi_reply, Mpi2SepRequest_t *mpi_request)
5877 {
5878 u16 smid;
5879 u8 issue_reset = 0;
5880 int rc;
5881 void *request;
5882
5883 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5884
5885 mutex_lock(&ioc->base_cmds.mutex);
5886
5887 if (ioc->base_cmds.status != MPT3_CMD_NOT_USED) {
5888 ioc_err(ioc, "%s: base_cmd in use\n", __func__);
5889 rc = -EAGAIN;
5890 goto out;
5891 }
5892
5893 rc = mpt3sas_wait_for_ioc(ioc, IOC_OPERATIONAL_WAIT_COUNT);
5894 if (rc)
5895 goto out;
5896
5897 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
5898 if (!smid) {
5899 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
5900 rc = -EAGAIN;
5901 goto out;
5902 }
5903
5904 rc = 0;
5905 ioc->base_cmds.status = MPT3_CMD_PENDING;
5906 request = mpt3sas_base_get_msg_frame(ioc, smid);
5907 ioc->base_cmds.smid = smid;
5908 memset(request, 0, ioc->request_sz);
5909 memcpy(request, mpi_request, sizeof(Mpi2SepReply_t));
5910 init_completion(&ioc->base_cmds.done);
5911 ioc->put_smid_default(ioc, smid);
5912 wait_for_completion_timeout(&ioc->base_cmds.done,
5913 msecs_to_jiffies(10000));
5914 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
5915 issue_reset =
5916 mpt3sas_base_check_cmd_timeout(ioc,
5917 ioc->base_cmds.status, mpi_request,
5918 sizeof(Mpi2SepRequest_t)/4);
5919 goto issue_host_reset;
5920 }
5921 if (ioc->base_cmds.status & MPT3_CMD_REPLY_VALID)
5922 memcpy(mpi_reply, ioc->base_cmds.reply,
5923 sizeof(Mpi2SepReply_t));
5924 else
5925 memset(mpi_reply, 0, sizeof(Mpi2SepReply_t));
5926 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5927 goto out;
5928
5929 issue_host_reset:
5930 if (issue_reset)
5931 mpt3sas_base_hard_reset_handler(ioc, FORCE_BIG_HAMMER);
5932 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
5933 rc = -EFAULT;
5934 out:
5935 mutex_unlock(&ioc->base_cmds.mutex);
5936 return rc;
5937 }
5938
5939
5940
5941
5942
5943
5944
5945
5946 static int
5947 _base_get_port_facts(struct MPT3SAS_ADAPTER *ioc, int port)
5948 {
5949 Mpi2PortFactsRequest_t mpi_request;
5950 Mpi2PortFactsReply_t mpi_reply;
5951 struct mpt3sas_port_facts *pfacts;
5952 int mpi_reply_sz, mpi_request_sz, r;
5953
5954 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5955
5956 mpi_reply_sz = sizeof(Mpi2PortFactsReply_t);
5957 mpi_request_sz = sizeof(Mpi2PortFactsRequest_t);
5958 memset(&mpi_request, 0, mpi_request_sz);
5959 mpi_request.Function = MPI2_FUNCTION_PORT_FACTS;
5960 mpi_request.PortNumber = port;
5961 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
5962 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
5963
5964 if (r != 0) {
5965 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
5966 return r;
5967 }
5968
5969 pfacts = &ioc->pfacts[port];
5970 memset(pfacts, 0, sizeof(struct mpt3sas_port_facts));
5971 pfacts->PortNumber = mpi_reply.PortNumber;
5972 pfacts->VP_ID = mpi_reply.VP_ID;
5973 pfacts->VF_ID = mpi_reply.VF_ID;
5974 pfacts->MaxPostedCmdBuffers =
5975 le16_to_cpu(mpi_reply.MaxPostedCmdBuffers);
5976
5977 return 0;
5978 }
5979
5980
5981
5982
5983
5984
5985
5986
5987 static int
5988 _base_wait_for_iocstate(struct MPT3SAS_ADAPTER *ioc, int timeout)
5989 {
5990 u32 ioc_state;
5991 int rc;
5992
5993 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
5994
5995 if (ioc->pci_error_recovery) {
5996 dfailprintk(ioc,
5997 ioc_info(ioc, "%s: host in pci error recovery\n",
5998 __func__));
5999 return -EFAULT;
6000 }
6001
6002 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6003 dhsprintk(ioc,
6004 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6005 __func__, ioc_state));
6006
6007 if (((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY) ||
6008 (ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6009 return 0;
6010
6011 if (ioc_state & MPI2_DOORBELL_USED) {
6012 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6013 goto issue_diag_reset;
6014 }
6015
6016 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6017 mpt3sas_base_fault_info(ioc, ioc_state &
6018 MPI2_DOORBELL_DATA_MASK);
6019 goto issue_diag_reset;
6020 }
6021
6022 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, timeout);
6023 if (ioc_state) {
6024 dfailprintk(ioc,
6025 ioc_info(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6026 __func__, ioc_state));
6027 return -EFAULT;
6028 }
6029
6030 issue_diag_reset:
6031 rc = _base_diag_reset(ioc);
6032 return rc;
6033 }
6034
6035
6036
6037
6038
6039
6040
6041 static int
6042 _base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc)
6043 {
6044 Mpi2IOCFactsRequest_t mpi_request;
6045 Mpi2IOCFactsReply_t mpi_reply;
6046 struct mpt3sas_facts *facts;
6047 int mpi_reply_sz, mpi_request_sz, r;
6048
6049 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6050
6051 r = _base_wait_for_iocstate(ioc, 10);
6052 if (r) {
6053 dfailprintk(ioc,
6054 ioc_info(ioc, "%s: failed getting to correct state\n",
6055 __func__));
6056 return r;
6057 }
6058 mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
6059 mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
6060 memset(&mpi_request, 0, mpi_request_sz);
6061 mpi_request.Function = MPI2_FUNCTION_IOC_FACTS;
6062 r = _base_handshake_req_reply_wait(ioc, mpi_request_sz,
6063 (u32 *)&mpi_request, mpi_reply_sz, (u16 *)&mpi_reply, 5);
6064
6065 if (r != 0) {
6066 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6067 return r;
6068 }
6069
6070 facts = &ioc->facts;
6071 memset(facts, 0, sizeof(struct mpt3sas_facts));
6072 facts->MsgVersion = le16_to_cpu(mpi_reply.MsgVersion);
6073 facts->HeaderVersion = le16_to_cpu(mpi_reply.HeaderVersion);
6074 facts->VP_ID = mpi_reply.VP_ID;
6075 facts->VF_ID = mpi_reply.VF_ID;
6076 facts->IOCExceptions = le16_to_cpu(mpi_reply.IOCExceptions);
6077 facts->MaxChainDepth = mpi_reply.MaxChainDepth;
6078 facts->WhoInit = mpi_reply.WhoInit;
6079 facts->NumberOfPorts = mpi_reply.NumberOfPorts;
6080 facts->MaxMSIxVectors = mpi_reply.MaxMSIxVectors;
6081 if (ioc->msix_enable && (facts->MaxMSIxVectors <=
6082 MAX_COMBINED_MSIX_VECTORS(ioc->is_gen35_ioc)))
6083 ioc->combined_reply_queue = 0;
6084 facts->RequestCredit = le16_to_cpu(mpi_reply.RequestCredit);
6085 facts->MaxReplyDescriptorPostQueueDepth =
6086 le16_to_cpu(mpi_reply.MaxReplyDescriptorPostQueueDepth);
6087 facts->ProductID = le16_to_cpu(mpi_reply.ProductID);
6088 facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
6089 if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
6090 ioc->ir_firmware = 1;
6091 if ((facts->IOCCapabilities &
6092 MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE) && (!reset_devices))
6093 ioc->rdpq_array_capable = 1;
6094 if ((facts->IOCCapabilities & MPI26_IOCFACTS_CAPABILITY_ATOMIC_REQ)
6095 && ioc->is_aero_ioc)
6096 ioc->atomic_desc_capable = 1;
6097 facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
6098 facts->IOCRequestFrameSize =
6099 le16_to_cpu(mpi_reply.IOCRequestFrameSize);
6100 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6101 facts->IOCMaxChainSegmentSize =
6102 le16_to_cpu(mpi_reply.IOCMaxChainSegmentSize);
6103 }
6104 facts->MaxInitiators = le16_to_cpu(mpi_reply.MaxInitiators);
6105 facts->MaxTargets = le16_to_cpu(mpi_reply.MaxTargets);
6106 ioc->shost->max_id = -1;
6107 facts->MaxSasExpanders = le16_to_cpu(mpi_reply.MaxSasExpanders);
6108 facts->MaxEnclosures = le16_to_cpu(mpi_reply.MaxEnclosures);
6109 facts->ProtocolFlags = le16_to_cpu(mpi_reply.ProtocolFlags);
6110 facts->HighPriorityCredit =
6111 le16_to_cpu(mpi_reply.HighPriorityCredit);
6112 facts->ReplyFrameSize = mpi_reply.ReplyFrameSize;
6113 facts->MaxDevHandle = le16_to_cpu(mpi_reply.MaxDevHandle);
6114 facts->CurrentHostPageSize = mpi_reply.CurrentHostPageSize;
6115
6116
6117
6118
6119 ioc->page_size = 1 << facts->CurrentHostPageSize;
6120 if (ioc->page_size == 1) {
6121 ioc_info(ioc, "CurrentHostPageSize is 0: Setting default host page size to 4k\n");
6122 ioc->page_size = 1 << MPT3SAS_HOST_PAGE_SIZE_4K;
6123 }
6124 dinitprintk(ioc,
6125 ioc_info(ioc, "CurrentHostPageSize(%d)\n",
6126 facts->CurrentHostPageSize));
6127
6128 dinitprintk(ioc,
6129 ioc_info(ioc, "hba queue depth(%d), max chains per io(%d)\n",
6130 facts->RequestCredit, facts->MaxChainDepth));
6131 dinitprintk(ioc,
6132 ioc_info(ioc, "request frame size(%d), reply frame size(%d)\n",
6133 facts->IOCRequestFrameSize * 4,
6134 facts->ReplyFrameSize * 4));
6135 return 0;
6136 }
6137
6138
6139
6140
6141
6142
6143
6144 static int
6145 _base_send_ioc_init(struct MPT3SAS_ADAPTER *ioc)
6146 {
6147 Mpi2IOCInitRequest_t mpi_request;
6148 Mpi2IOCInitReply_t mpi_reply;
6149 int i, r = 0;
6150 ktime_t current_time;
6151 u16 ioc_status;
6152 u32 reply_post_free_array_sz = 0;
6153
6154 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6155
6156 memset(&mpi_request, 0, sizeof(Mpi2IOCInitRequest_t));
6157 mpi_request.Function = MPI2_FUNCTION_IOC_INIT;
6158 mpi_request.WhoInit = MPI2_WHOINIT_HOST_DRIVER;
6159 mpi_request.VF_ID = 0;
6160 mpi_request.VP_ID = 0;
6161 mpi_request.MsgVersion = cpu_to_le16(ioc->hba_mpi_version_belonged);
6162 mpi_request.HeaderVersion = cpu_to_le16(MPI2_HEADER_VERSION);
6163 mpi_request.HostPageSize = MPT3SAS_HOST_PAGE_SIZE_4K;
6164
6165 if (_base_is_controller_msix_enabled(ioc))
6166 mpi_request.HostMSIxVectors = ioc->reply_queue_count;
6167 mpi_request.SystemRequestFrameSize = cpu_to_le16(ioc->request_sz/4);
6168 mpi_request.ReplyDescriptorPostQueueDepth =
6169 cpu_to_le16(ioc->reply_post_queue_depth);
6170 mpi_request.ReplyFreeQueueDepth =
6171 cpu_to_le16(ioc->reply_free_queue_depth);
6172
6173 mpi_request.SenseBufferAddressHigh =
6174 cpu_to_le32((u64)ioc->sense_dma >> 32);
6175 mpi_request.SystemReplyAddressHigh =
6176 cpu_to_le32((u64)ioc->reply_dma >> 32);
6177 mpi_request.SystemRequestFrameBaseAddress =
6178 cpu_to_le64((u64)ioc->request_dma);
6179 mpi_request.ReplyFreeQueueAddress =
6180 cpu_to_le64((u64)ioc->reply_free_dma);
6181
6182 if (ioc->rdpq_array_enable) {
6183 reply_post_free_array_sz = ioc->reply_queue_count *
6184 sizeof(Mpi2IOCInitRDPQArrayEntry);
6185 memset(ioc->reply_post_free_array, 0, reply_post_free_array_sz);
6186 for (i = 0; i < ioc->reply_queue_count; i++)
6187 ioc->reply_post_free_array[i].RDPQBaseAddress =
6188 cpu_to_le64(
6189 (u64)ioc->reply_post[i].reply_post_free_dma);
6190 mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
6191 mpi_request.ReplyDescriptorPostQueueAddress =
6192 cpu_to_le64((u64)ioc->reply_post_free_array_dma);
6193 } else {
6194 mpi_request.ReplyDescriptorPostQueueAddress =
6195 cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
6196 }
6197
6198
6199
6200
6201 current_time = ktime_get_real();
6202 mpi_request.TimeStamp = cpu_to_le64(ktime_to_ms(current_time));
6203
6204 if (ioc->logging_level & MPT_DEBUG_INIT) {
6205 __le32 *mfp;
6206 int i;
6207
6208 mfp = (__le32 *)&mpi_request;
6209 pr_info("\toffset:data\n");
6210 for (i = 0; i < sizeof(Mpi2IOCInitRequest_t)/4; i++)
6211 pr_info("\t[0x%02x]:%08x\n", i*4,
6212 le32_to_cpu(mfp[i]));
6213 }
6214
6215 r = _base_handshake_req_reply_wait(ioc,
6216 sizeof(Mpi2IOCInitRequest_t), (u32 *)&mpi_request,
6217 sizeof(Mpi2IOCInitReply_t), (u16 *)&mpi_reply, 10);
6218
6219 if (r != 0) {
6220 ioc_err(ioc, "%s: handshake failed (r=%d)\n", __func__, r);
6221 return r;
6222 }
6223
6224 ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
6225 if (ioc_status != MPI2_IOCSTATUS_SUCCESS ||
6226 mpi_reply.IOCLogInfo) {
6227 ioc_err(ioc, "%s: failed\n", __func__);
6228 r = -EIO;
6229 }
6230
6231 return r;
6232 }
6233
6234
6235
6236
6237
6238
6239
6240
6241
6242
6243
6244 u8
6245 mpt3sas_port_enable_done(struct MPT3SAS_ADAPTER *ioc, u16 smid, u8 msix_index,
6246 u32 reply)
6247 {
6248 MPI2DefaultReply_t *mpi_reply;
6249 u16 ioc_status;
6250
6251 if (ioc->port_enable_cmds.status == MPT3_CMD_NOT_USED)
6252 return 1;
6253
6254 mpi_reply = mpt3sas_base_get_reply_virt_addr(ioc, reply);
6255 if (!mpi_reply)
6256 return 1;
6257
6258 if (mpi_reply->Function != MPI2_FUNCTION_PORT_ENABLE)
6259 return 1;
6260
6261 ioc->port_enable_cmds.status &= ~MPT3_CMD_PENDING;
6262 ioc->port_enable_cmds.status |= MPT3_CMD_COMPLETE;
6263 ioc->port_enable_cmds.status |= MPT3_CMD_REPLY_VALID;
6264 memcpy(ioc->port_enable_cmds.reply, mpi_reply, mpi_reply->MsgLength*4);
6265 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6266 if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
6267 ioc->port_enable_failed = 1;
6268
6269 if (ioc->is_driver_loading) {
6270 if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
6271 mpt3sas_port_enable_complete(ioc);
6272 return 1;
6273 } else {
6274 ioc->start_scan_failed = ioc_status;
6275 ioc->start_scan = 0;
6276 return 1;
6277 }
6278 }
6279 complete(&ioc->port_enable_cmds.done);
6280 return 1;
6281 }
6282
6283
6284
6285
6286
6287
6288
6289 static int
6290 _base_send_port_enable(struct MPT3SAS_ADAPTER *ioc)
6291 {
6292 Mpi2PortEnableRequest_t *mpi_request;
6293 Mpi2PortEnableReply_t *mpi_reply;
6294 int r = 0;
6295 u16 smid;
6296 u16 ioc_status;
6297
6298 ioc_info(ioc, "sending port enable !!\n");
6299
6300 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6301 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6302 return -EAGAIN;
6303 }
6304
6305 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6306 if (!smid) {
6307 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6308 return -EAGAIN;
6309 }
6310
6311 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6312 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6313 ioc->port_enable_cmds.smid = smid;
6314 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6315 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6316
6317 init_completion(&ioc->port_enable_cmds.done);
6318 ioc->put_smid_default(ioc, smid);
6319 wait_for_completion_timeout(&ioc->port_enable_cmds.done, 300*HZ);
6320 if (!(ioc->port_enable_cmds.status & MPT3_CMD_COMPLETE)) {
6321 ioc_err(ioc, "%s: timeout\n", __func__);
6322 _debug_dump_mf(mpi_request,
6323 sizeof(Mpi2PortEnableRequest_t)/4);
6324 if (ioc->port_enable_cmds.status & MPT3_CMD_RESET)
6325 r = -EFAULT;
6326 else
6327 r = -ETIME;
6328 goto out;
6329 }
6330
6331 mpi_reply = ioc->port_enable_cmds.reply;
6332 ioc_status = le16_to_cpu(mpi_reply->IOCStatus) & MPI2_IOCSTATUS_MASK;
6333 if (ioc_status != MPI2_IOCSTATUS_SUCCESS) {
6334 ioc_err(ioc, "%s: failed with (ioc_status=0x%08x)\n",
6335 __func__, ioc_status);
6336 r = -EFAULT;
6337 goto out;
6338 }
6339
6340 out:
6341 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
6342 ioc_info(ioc, "port enable: %s\n", r == 0 ? "SUCCESS" : "FAILED");
6343 return r;
6344 }
6345
6346
6347
6348
6349
6350
6351
6352 int
6353 mpt3sas_port_enable(struct MPT3SAS_ADAPTER *ioc)
6354 {
6355 Mpi2PortEnableRequest_t *mpi_request;
6356 u16 smid;
6357
6358 ioc_info(ioc, "sending port enable !!\n");
6359
6360 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
6361 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6362 return -EAGAIN;
6363 }
6364
6365 smid = mpt3sas_base_get_smid(ioc, ioc->port_enable_cb_idx);
6366 if (!smid) {
6367 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6368 return -EAGAIN;
6369 }
6370
6371 ioc->port_enable_cmds.status = MPT3_CMD_PENDING;
6372 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6373 ioc->port_enable_cmds.smid = smid;
6374 memset(mpi_request, 0, sizeof(Mpi2PortEnableRequest_t));
6375 mpi_request->Function = MPI2_FUNCTION_PORT_ENABLE;
6376
6377 ioc->put_smid_default(ioc, smid);
6378 return 0;
6379 }
6380
6381
6382
6383
6384
6385
6386
6387
6388
6389
6390 static int
6391 _base_determine_wait_on_discovery(struct MPT3SAS_ADAPTER *ioc)
6392 {
6393
6394
6395
6396
6397
6398
6399 if (ioc->ir_firmware)
6400 return 1;
6401
6402
6403 if (!ioc->bios_pg3.BiosVersion)
6404 return 0;
6405
6406
6407
6408
6409
6410
6411
6412
6413 if ((ioc->bios_pg2.CurrentBootDeviceForm &
6414 MPI2_BIOSPAGE2_FORM_MASK) ==
6415 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6416
6417 (ioc->bios_pg2.ReqBootDeviceForm &
6418 MPI2_BIOSPAGE2_FORM_MASK) ==
6419 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED &&
6420
6421 (ioc->bios_pg2.ReqAltBootDeviceForm &
6422 MPI2_BIOSPAGE2_FORM_MASK) ==
6423 MPI2_BIOSPAGE2_FORM_NO_DEVICE_SPECIFIED)
6424 return 0;
6425
6426 return 1;
6427 }
6428
6429
6430
6431
6432
6433
6434
6435
6436 static void
6437 _base_unmask_events(struct MPT3SAS_ADAPTER *ioc, u16 event)
6438 {
6439 u32 desired_event;
6440
6441 if (event >= 128)
6442 return;
6443
6444 desired_event = (1 << (event % 32));
6445
6446 if (event < 32)
6447 ioc->event_masks[0] &= ~desired_event;
6448 else if (event < 64)
6449 ioc->event_masks[1] &= ~desired_event;
6450 else if (event < 96)
6451 ioc->event_masks[2] &= ~desired_event;
6452 else if (event < 128)
6453 ioc->event_masks[3] &= ~desired_event;
6454 }
6455
6456
6457
6458
6459
6460
6461
6462 static int
6463 _base_event_notification(struct MPT3SAS_ADAPTER *ioc)
6464 {
6465 Mpi2EventNotificationRequest_t *mpi_request;
6466 u16 smid;
6467 int r = 0;
6468 int i;
6469
6470 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6471
6472 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
6473 ioc_err(ioc, "%s: internal command already in use\n", __func__);
6474 return -EAGAIN;
6475 }
6476
6477 smid = mpt3sas_base_get_smid(ioc, ioc->base_cb_idx);
6478 if (!smid) {
6479 ioc_err(ioc, "%s: failed obtaining a smid\n", __func__);
6480 return -EAGAIN;
6481 }
6482 ioc->base_cmds.status = MPT3_CMD_PENDING;
6483 mpi_request = mpt3sas_base_get_msg_frame(ioc, smid);
6484 ioc->base_cmds.smid = smid;
6485 memset(mpi_request, 0, sizeof(Mpi2EventNotificationRequest_t));
6486 mpi_request->Function = MPI2_FUNCTION_EVENT_NOTIFICATION;
6487 mpi_request->VF_ID = 0;
6488 mpi_request->VP_ID = 0;
6489 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
6490 mpi_request->EventMasks[i] =
6491 cpu_to_le32(ioc->event_masks[i]);
6492 init_completion(&ioc->base_cmds.done);
6493 ioc->put_smid_default(ioc, smid);
6494 wait_for_completion_timeout(&ioc->base_cmds.done, 30*HZ);
6495 if (!(ioc->base_cmds.status & MPT3_CMD_COMPLETE)) {
6496 ioc_err(ioc, "%s: timeout\n", __func__);
6497 _debug_dump_mf(mpi_request,
6498 sizeof(Mpi2EventNotificationRequest_t)/4);
6499 if (ioc->base_cmds.status & MPT3_CMD_RESET)
6500 r = -EFAULT;
6501 else
6502 r = -ETIME;
6503 } else
6504 dinitprintk(ioc, ioc_info(ioc, "%s: complete\n", __func__));
6505 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
6506 return r;
6507 }
6508
6509
6510
6511
6512
6513
6514
6515
6516
6517 void
6518 mpt3sas_base_validate_event_type(struct MPT3SAS_ADAPTER *ioc, u32 *event_type)
6519 {
6520 int i, j;
6521 u32 event_mask, desired_event;
6522 u8 send_update_to_fw;
6523
6524 for (i = 0, send_update_to_fw = 0; i <
6525 MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++) {
6526 event_mask = ~event_type[i];
6527 desired_event = 1;
6528 for (j = 0; j < 32; j++) {
6529 if (!(event_mask & desired_event) &&
6530 (ioc->event_masks[i] & desired_event)) {
6531 ioc->event_masks[i] &= ~desired_event;
6532 send_update_to_fw = 1;
6533 }
6534 desired_event = (desired_event << 1);
6535 }
6536 }
6537
6538 if (!send_update_to_fw)
6539 return;
6540
6541 mutex_lock(&ioc->base_cmds.mutex);
6542 _base_event_notification(ioc);
6543 mutex_unlock(&ioc->base_cmds.mutex);
6544 }
6545
6546
6547
6548
6549
6550
6551
6552 static int
6553 _base_diag_reset(struct MPT3SAS_ADAPTER *ioc)
6554 {
6555 u32 host_diagnostic;
6556 u32 ioc_state;
6557 u32 count;
6558 u32 hcb_size;
6559
6560 ioc_info(ioc, "sending diag reset !!\n");
6561
6562 drsprintk(ioc, ioc_info(ioc, "clear interrupts\n"));
6563
6564 count = 0;
6565 do {
6566
6567
6568
6569 drsprintk(ioc, ioc_info(ioc, "write magic sequence\n"));
6570 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6571 writel(MPI2_WRSEQ_1ST_KEY_VALUE, &ioc->chip->WriteSequence);
6572 writel(MPI2_WRSEQ_2ND_KEY_VALUE, &ioc->chip->WriteSequence);
6573 writel(MPI2_WRSEQ_3RD_KEY_VALUE, &ioc->chip->WriteSequence);
6574 writel(MPI2_WRSEQ_4TH_KEY_VALUE, &ioc->chip->WriteSequence);
6575 writel(MPI2_WRSEQ_5TH_KEY_VALUE, &ioc->chip->WriteSequence);
6576 writel(MPI2_WRSEQ_6TH_KEY_VALUE, &ioc->chip->WriteSequence);
6577
6578
6579 msleep(100);
6580
6581 if (count++ > 20)
6582 goto out;
6583
6584 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6585 drsprintk(ioc,
6586 ioc_info(ioc, "wrote magic sequence: count(%d), host_diagnostic(0x%08x)\n",
6587 count, host_diagnostic));
6588
6589 } while ((host_diagnostic & MPI2_DIAG_DIAG_WRITE_ENABLE) == 0);
6590
6591 hcb_size = ioc->base_readl(&ioc->chip->HCBSize);
6592
6593 drsprintk(ioc, ioc_info(ioc, "diag reset: issued\n"));
6594 writel(host_diagnostic | MPI2_DIAG_RESET_ADAPTER,
6595 &ioc->chip->HostDiagnostic);
6596
6597
6598 msleep(MPI2_HARD_RESET_PCIE_FIRST_READ_DELAY_MICRO_SEC/1000);
6599
6600
6601 for (count = 0; count < (300000000 /
6602 MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC); count++) {
6603
6604 host_diagnostic = ioc->base_readl(&ioc->chip->HostDiagnostic);
6605
6606 if (host_diagnostic == 0xFFFFFFFF)
6607 goto out;
6608 if (!(host_diagnostic & MPI2_DIAG_RESET_ADAPTER))
6609 break;
6610
6611 msleep(MPI2_HARD_RESET_PCIE_SECOND_READ_DELAY_MICRO_SEC / 1000);
6612 }
6613
6614 if (host_diagnostic & MPI2_DIAG_HCB_MODE) {
6615
6616 drsprintk(ioc,
6617 ioc_info(ioc, "restart the adapter assuming the HCB Address points to good F/W\n"));
6618 host_diagnostic &= ~MPI2_DIAG_BOOT_DEVICE_SELECT_MASK;
6619 host_diagnostic |= MPI2_DIAG_BOOT_DEVICE_SELECT_HCDW;
6620 writel(host_diagnostic, &ioc->chip->HostDiagnostic);
6621
6622 drsprintk(ioc, ioc_info(ioc, "re-enable the HCDW\n"));
6623 writel(hcb_size | MPI2_HCB_SIZE_HCB_ENABLE,
6624 &ioc->chip->HCBSize);
6625 }
6626
6627 drsprintk(ioc, ioc_info(ioc, "restart the adapter\n"));
6628 writel(host_diagnostic & ~MPI2_DIAG_HOLD_IOC_RESET,
6629 &ioc->chip->HostDiagnostic);
6630
6631 drsprintk(ioc,
6632 ioc_info(ioc, "disable writes to the diagnostic register\n"));
6633 writel(MPI2_WRSEQ_FLUSH_KEY_VALUE, &ioc->chip->WriteSequence);
6634
6635 drsprintk(ioc, ioc_info(ioc, "Wait for FW to go to the READY state\n"));
6636 ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY, 20);
6637 if (ioc_state) {
6638 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6639 __func__, ioc_state);
6640 goto out;
6641 }
6642
6643 ioc_info(ioc, "diag reset: SUCCESS\n");
6644 return 0;
6645
6646 out:
6647 ioc_err(ioc, "diag reset: FAILED\n");
6648 return -EFAULT;
6649 }
6650
6651
6652
6653
6654
6655
6656
6657
6658 static int
6659 _base_make_ioc_ready(struct MPT3SAS_ADAPTER *ioc, enum reset_type type)
6660 {
6661 u32 ioc_state;
6662 int rc;
6663 int count;
6664
6665 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6666
6667 if (ioc->pci_error_recovery)
6668 return 0;
6669
6670 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6671 dhsprintk(ioc,
6672 ioc_info(ioc, "%s: ioc_state(0x%08x)\n",
6673 __func__, ioc_state));
6674
6675
6676 count = 0;
6677 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_RESET) {
6678 while ((ioc_state & MPI2_IOC_STATE_MASK) !=
6679 MPI2_IOC_STATE_READY) {
6680 if (count++ == 10) {
6681 ioc_err(ioc, "%s: failed going to ready state (ioc_state=0x%x)\n",
6682 __func__, ioc_state);
6683 return -EFAULT;
6684 }
6685 ssleep(1);
6686 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
6687 }
6688 }
6689
6690 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_READY)
6691 return 0;
6692
6693 if (ioc_state & MPI2_DOORBELL_USED) {
6694 dhsprintk(ioc, ioc_info(ioc, "unexpected doorbell active!\n"));
6695 goto issue_diag_reset;
6696 }
6697
6698 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT) {
6699 mpt3sas_base_fault_info(ioc, ioc_state &
6700 MPI2_DOORBELL_DATA_MASK);
6701 goto issue_diag_reset;
6702 }
6703
6704 if (type == FORCE_BIG_HAMMER)
6705 goto issue_diag_reset;
6706
6707 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_OPERATIONAL)
6708 if (!(_base_send_ioc_reset(ioc,
6709 MPI2_FUNCTION_IOC_MESSAGE_UNIT_RESET, 15))) {
6710 return 0;
6711 }
6712
6713 issue_diag_reset:
6714 rc = _base_diag_reset(ioc);
6715 return rc;
6716 }
6717
6718
6719
6720
6721
6722
6723
6724 static int
6725 _base_make_ioc_operational(struct MPT3SAS_ADAPTER *ioc)
6726 {
6727 int r, i, index, rc;
6728 unsigned long flags;
6729 u32 reply_address;
6730 u16 smid;
6731 struct _tr_list *delayed_tr, *delayed_tr_next;
6732 struct _sc_list *delayed_sc, *delayed_sc_next;
6733 struct _event_ack_list *delayed_event_ack, *delayed_event_ack_next;
6734 u8 hide_flag;
6735 struct adapter_reply_queue *reply_q;
6736 Mpi2ReplyDescriptorsUnion_t *reply_post_free_contig;
6737
6738 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6739
6740
6741 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6742 &ioc->delayed_tr_list, list) {
6743 list_del(&delayed_tr->list);
6744 kfree(delayed_tr);
6745 }
6746
6747
6748 list_for_each_entry_safe(delayed_tr, delayed_tr_next,
6749 &ioc->delayed_tr_volume_list, list) {
6750 list_del(&delayed_tr->list);
6751 kfree(delayed_tr);
6752 }
6753
6754 list_for_each_entry_safe(delayed_sc, delayed_sc_next,
6755 &ioc->delayed_sc_list, list) {
6756 list_del(&delayed_sc->list);
6757 kfree(delayed_sc);
6758 }
6759
6760 list_for_each_entry_safe(delayed_event_ack, delayed_event_ack_next,
6761 &ioc->delayed_event_ack_list, list) {
6762 list_del(&delayed_event_ack->list);
6763 kfree(delayed_event_ack);
6764 }
6765
6766 spin_lock_irqsave(&ioc->scsi_lookup_lock, flags);
6767
6768
6769 INIT_LIST_HEAD(&ioc->hpr_free_list);
6770 smid = ioc->hi_priority_smid;
6771 for (i = 0; i < ioc->hi_priority_depth; i++, smid++) {
6772 ioc->hpr_lookup[i].cb_idx = 0xFF;
6773 ioc->hpr_lookup[i].smid = smid;
6774 list_add_tail(&ioc->hpr_lookup[i].tracker_list,
6775 &ioc->hpr_free_list);
6776 }
6777
6778
6779 INIT_LIST_HEAD(&ioc->internal_free_list);
6780 smid = ioc->internal_smid;
6781 for (i = 0; i < ioc->internal_depth; i++, smid++) {
6782 ioc->internal_lookup[i].cb_idx = 0xFF;
6783 ioc->internal_lookup[i].smid = smid;
6784 list_add_tail(&ioc->internal_lookup[i].tracker_list,
6785 &ioc->internal_free_list);
6786 }
6787
6788 spin_unlock_irqrestore(&ioc->scsi_lookup_lock, flags);
6789
6790
6791 for (i = 0, reply_address = (u32)ioc->reply_dma ;
6792 i < ioc->reply_free_queue_depth ; i++, reply_address +=
6793 ioc->reply_sz) {
6794 ioc->reply_free[i] = cpu_to_le32(reply_address);
6795 if (ioc->is_mcpu_endpoint)
6796 _base_clone_reply_to_sys_mem(ioc,
6797 reply_address, i);
6798 }
6799
6800
6801 if (ioc->is_driver_loading)
6802 _base_assign_reply_queues(ioc);
6803
6804
6805 index = 0;
6806 reply_post_free_contig = ioc->reply_post[0].reply_post_free;
6807 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
6808
6809
6810
6811
6812 if (ioc->rdpq_array_enable) {
6813 reply_q->reply_post_free =
6814 ioc->reply_post[index++].reply_post_free;
6815 } else {
6816 reply_q->reply_post_free = reply_post_free_contig;
6817 reply_post_free_contig += ioc->reply_post_queue_depth;
6818 }
6819
6820 reply_q->reply_post_host_index = 0;
6821 for (i = 0; i < ioc->reply_post_queue_depth; i++)
6822 reply_q->reply_post_free[i].Words =
6823 cpu_to_le64(ULLONG_MAX);
6824 if (!_base_is_controller_msix_enabled(ioc))
6825 goto skip_init_reply_post_free_queue;
6826 }
6827 skip_init_reply_post_free_queue:
6828
6829 r = _base_send_ioc_init(ioc);
6830 if (r) {
6831
6832
6833
6834
6835
6836 if (!ioc->is_driver_loading)
6837 return r;
6838
6839 rc = _base_check_for_fault_and_issue_reset(ioc);
6840 if (rc || (_base_send_ioc_init(ioc)))
6841 return r;
6842 }
6843
6844
6845 ioc->reply_free_host_index = ioc->reply_free_queue_depth - 1;
6846 writel(ioc->reply_free_host_index, &ioc->chip->ReplyFreeHostIndex);
6847
6848
6849 list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
6850 if (ioc->combined_reply_queue)
6851 writel((reply_q->msix_index & 7)<<
6852 MPI2_RPHI_MSIX_INDEX_SHIFT,
6853 ioc->replyPostRegisterIndex[reply_q->msix_index/8]);
6854 else
6855 writel(reply_q->msix_index <<
6856 MPI2_RPHI_MSIX_INDEX_SHIFT,
6857 &ioc->chip->ReplyPostHostIndex);
6858
6859 if (!_base_is_controller_msix_enabled(ioc))
6860 goto skip_init_reply_post_host_index;
6861 }
6862
6863 skip_init_reply_post_host_index:
6864
6865 _base_unmask_interrupts(ioc);
6866
6867 if (ioc->hba_mpi_version_belonged != MPI2_VERSION) {
6868 r = _base_display_fwpkg_version(ioc);
6869 if (r)
6870 return r;
6871 }
6872
6873 _base_static_config_pages(ioc);
6874 r = _base_event_notification(ioc);
6875 if (r)
6876 return r;
6877
6878 if (ioc->is_driver_loading) {
6879
6880 if (ioc->is_warpdrive && ioc->manu_pg10.OEMIdentifier
6881 == 0x80) {
6882 hide_flag = (u8) (
6883 le32_to_cpu(ioc->manu_pg10.OEMSpecificFlags0) &
6884 MFG_PAGE10_HIDE_SSDS_MASK);
6885 if (hide_flag != MFG_PAGE10_HIDE_SSDS_MASK)
6886 ioc->mfg_pg10_hide_flag = hide_flag;
6887 }
6888
6889 ioc->wait_for_discovery_to_complete =
6890 _base_determine_wait_on_discovery(ioc);
6891
6892 return r;
6893 }
6894
6895 r = _base_send_port_enable(ioc);
6896 if (r)
6897 return r;
6898
6899 return r;
6900 }
6901
6902
6903
6904
6905
6906 void
6907 mpt3sas_base_free_resources(struct MPT3SAS_ADAPTER *ioc)
6908 {
6909 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6910
6911
6912 mutex_lock(&ioc->pci_access_mutex);
6913 if (ioc->chip_phys && ioc->chip) {
6914 _base_mask_interrupts(ioc);
6915 ioc->shost_recovery = 1;
6916 _base_make_ioc_ready(ioc, SOFT_RESET);
6917 ioc->shost_recovery = 0;
6918 }
6919
6920 mpt3sas_base_unmap_resources(ioc);
6921 mutex_unlock(&ioc->pci_access_mutex);
6922 return;
6923 }
6924
6925
6926
6927
6928
6929
6930
6931 int
6932 mpt3sas_base_attach(struct MPT3SAS_ADAPTER *ioc)
6933 {
6934 int r, i, rc;
6935 int cpu_id, last_cpu_id = 0;
6936
6937 dinitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
6938
6939
6940 ioc->cpu_count = num_online_cpus();
6941 for_each_online_cpu(cpu_id)
6942 last_cpu_id = cpu_id;
6943 ioc->cpu_msix_table_sz = last_cpu_id + 1;
6944 ioc->cpu_msix_table = kzalloc(ioc->cpu_msix_table_sz, GFP_KERNEL);
6945 ioc->reply_queue_count = 1;
6946 if (!ioc->cpu_msix_table) {
6947 dfailprintk(ioc,
6948 ioc_info(ioc, "allocation for cpu_msix_table failed!!!\n"));
6949 r = -ENOMEM;
6950 goto out_free_resources;
6951 }
6952
6953 if (ioc->is_warpdrive) {
6954 ioc->reply_post_host_index = kcalloc(ioc->cpu_msix_table_sz,
6955 sizeof(resource_size_t *), GFP_KERNEL);
6956 if (!ioc->reply_post_host_index) {
6957 dfailprintk(ioc,
6958 ioc_info(ioc, "allocation for reply_post_host_index failed!!!\n"));
6959 r = -ENOMEM;
6960 goto out_free_resources;
6961 }
6962 }
6963
6964 ioc->smp_affinity_enable = smp_affinity_enable;
6965
6966 ioc->rdpq_array_enable_assigned = 0;
6967 ioc->dma_mask = 0;
6968 if (ioc->is_aero_ioc)
6969 ioc->base_readl = &_base_readl_aero;
6970 else
6971 ioc->base_readl = &_base_readl;
6972 r = mpt3sas_base_map_resources(ioc);
6973 if (r)
6974 goto out_free_resources;
6975
6976 pci_set_drvdata(ioc->pdev, ioc->shost);
6977 r = _base_get_ioc_facts(ioc);
6978 if (r) {
6979 rc = _base_check_for_fault_and_issue_reset(ioc);
6980 if (rc || (_base_get_ioc_facts(ioc)))
6981 goto out_free_resources;
6982 }
6983
6984 switch (ioc->hba_mpi_version_belonged) {
6985 case MPI2_VERSION:
6986 ioc->build_sg_scmd = &_base_build_sg_scmd;
6987 ioc->build_sg = &_base_build_sg;
6988 ioc->build_zero_len_sge = &_base_build_zero_len_sge;
6989 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
6990 break;
6991 case MPI25_VERSION:
6992 case MPI26_VERSION:
6993
6994
6995
6996
6997
6998
6999 ioc->build_sg_scmd = &_base_build_sg_scmd_ieee;
7000 ioc->build_sg = &_base_build_sg_ieee;
7001 ioc->build_nvme_prp = &_base_build_nvme_prp;
7002 ioc->build_zero_len_sge = &_base_build_zero_len_sge_ieee;
7003 ioc->sge_size_ieee = sizeof(Mpi2IeeeSgeSimple64_t);
7004 if (ioc->high_iops_queues)
7005 ioc->get_msix_index_for_smlio =
7006 &_base_get_high_iops_msix_index;
7007 else
7008 ioc->get_msix_index_for_smlio = &_base_get_msix_index;
7009 break;
7010 }
7011 if (ioc->atomic_desc_capable) {
7012 ioc->put_smid_default = &_base_put_smid_default_atomic;
7013 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io_atomic;
7014 ioc->put_smid_fast_path =
7015 &_base_put_smid_fast_path_atomic;
7016 ioc->put_smid_hi_priority =
7017 &_base_put_smid_hi_priority_atomic;
7018 } else {
7019 ioc->put_smid_default = &_base_put_smid_default;
7020 ioc->put_smid_fast_path = &_base_put_smid_fast_path;
7021 ioc->put_smid_hi_priority = &_base_put_smid_hi_priority;
7022 if (ioc->is_mcpu_endpoint)
7023 ioc->put_smid_scsi_io =
7024 &_base_put_smid_mpi_ep_scsi_io;
7025 else
7026 ioc->put_smid_scsi_io = &_base_put_smid_scsi_io;
7027 }
7028
7029
7030
7031
7032
7033
7034 ioc->build_sg_mpi = &_base_build_sg;
7035 ioc->build_zero_len_sge_mpi = &_base_build_zero_len_sge;
7036
7037 r = _base_make_ioc_ready(ioc, SOFT_RESET);
7038 if (r)
7039 goto out_free_resources;
7040
7041 ioc->pfacts = kcalloc(ioc->facts.NumberOfPorts,
7042 sizeof(struct mpt3sas_port_facts), GFP_KERNEL);
7043 if (!ioc->pfacts) {
7044 r = -ENOMEM;
7045 goto out_free_resources;
7046 }
7047
7048 for (i = 0 ; i < ioc->facts.NumberOfPorts; i++) {
7049 r = _base_get_port_facts(ioc, i);
7050 if (r) {
7051 rc = _base_check_for_fault_and_issue_reset(ioc);
7052 if (rc || (_base_get_port_facts(ioc, i)))
7053 goto out_free_resources;
7054 }
7055 }
7056
7057 r = _base_allocate_memory_pools(ioc);
7058 if (r)
7059 goto out_free_resources;
7060
7061 if (irqpoll_weight > 0)
7062 ioc->thresh_hold = irqpoll_weight;
7063 else
7064 ioc->thresh_hold = ioc->hba_queue_depth/4;
7065
7066 _base_init_irqpolls(ioc);
7067 init_waitqueue_head(&ioc->reset_wq);
7068
7069
7070 ioc->pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7071 if (ioc->facts.MaxDevHandle % 8)
7072 ioc->pd_handles_sz++;
7073 ioc->pd_handles = kzalloc(ioc->pd_handles_sz,
7074 GFP_KERNEL);
7075 if (!ioc->pd_handles) {
7076 r = -ENOMEM;
7077 goto out_free_resources;
7078 }
7079 ioc->blocking_handles = kzalloc(ioc->pd_handles_sz,
7080 GFP_KERNEL);
7081 if (!ioc->blocking_handles) {
7082 r = -ENOMEM;
7083 goto out_free_resources;
7084 }
7085
7086
7087 ioc->pend_os_device_add_sz = (ioc->facts.MaxDevHandle / 8);
7088 if (ioc->facts.MaxDevHandle % 8)
7089 ioc->pend_os_device_add_sz++;
7090 ioc->pend_os_device_add = kzalloc(ioc->pend_os_device_add_sz,
7091 GFP_KERNEL);
7092 if (!ioc->pend_os_device_add)
7093 goto out_free_resources;
7094
7095 ioc->device_remove_in_progress_sz = ioc->pend_os_device_add_sz;
7096 ioc->device_remove_in_progress =
7097 kzalloc(ioc->device_remove_in_progress_sz, GFP_KERNEL);
7098 if (!ioc->device_remove_in_progress)
7099 goto out_free_resources;
7100
7101 ioc->fwfault_debug = mpt3sas_fwfault_debug;
7102
7103
7104 mutex_init(&ioc->base_cmds.mutex);
7105 ioc->base_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7106 ioc->base_cmds.status = MPT3_CMD_NOT_USED;
7107
7108
7109 ioc->port_enable_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7110 ioc->port_enable_cmds.status = MPT3_CMD_NOT_USED;
7111
7112
7113 ioc->transport_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7114 ioc->transport_cmds.status = MPT3_CMD_NOT_USED;
7115 mutex_init(&ioc->transport_cmds.mutex);
7116
7117
7118 ioc->scsih_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7119 ioc->scsih_cmds.status = MPT3_CMD_NOT_USED;
7120 mutex_init(&ioc->scsih_cmds.mutex);
7121
7122
7123 ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7124 ioc->tm_cmds.status = MPT3_CMD_NOT_USED;
7125 mutex_init(&ioc->tm_cmds.mutex);
7126
7127
7128 ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7129 ioc->config_cmds.status = MPT3_CMD_NOT_USED;
7130 mutex_init(&ioc->config_cmds.mutex);
7131
7132
7133 ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
7134 ioc->ctl_cmds.sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
7135 ioc->ctl_cmds.status = MPT3_CMD_NOT_USED;
7136 mutex_init(&ioc->ctl_cmds.mutex);
7137
7138 if (!ioc->base_cmds.reply || !ioc->port_enable_cmds.reply ||
7139 !ioc->transport_cmds.reply || !ioc->scsih_cmds.reply ||
7140 !ioc->tm_cmds.reply || !ioc->config_cmds.reply ||
7141 !ioc->ctl_cmds.reply || !ioc->ctl_cmds.sense) {
7142 r = -ENOMEM;
7143 goto out_free_resources;
7144 }
7145
7146 for (i = 0; i < MPI2_EVENT_NOTIFY_EVENTMASK_WORDS; i++)
7147 ioc->event_masks[i] = -1;
7148
7149
7150 _base_unmask_events(ioc, MPI2_EVENT_SAS_DISCOVERY);
7151 _base_unmask_events(ioc, MPI2_EVENT_SAS_BROADCAST_PRIMITIVE);
7152 _base_unmask_events(ioc, MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST);
7153 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_STATUS_CHANGE);
7154 _base_unmask_events(ioc, MPI2_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE);
7155 _base_unmask_events(ioc, MPI2_EVENT_IR_CONFIGURATION_CHANGE_LIST);
7156 _base_unmask_events(ioc, MPI2_EVENT_IR_VOLUME);
7157 _base_unmask_events(ioc, MPI2_EVENT_IR_PHYSICAL_DISK);
7158 _base_unmask_events(ioc, MPI2_EVENT_IR_OPERATION_STATUS);
7159 _base_unmask_events(ioc, MPI2_EVENT_LOG_ENTRY_ADDED);
7160 _base_unmask_events(ioc, MPI2_EVENT_TEMP_THRESHOLD);
7161 _base_unmask_events(ioc, MPI2_EVENT_ACTIVE_CABLE_EXCEPTION);
7162 _base_unmask_events(ioc, MPI2_EVENT_SAS_DEVICE_DISCOVERY_ERROR);
7163 if (ioc->hba_mpi_version_belonged == MPI26_VERSION) {
7164 if (ioc->is_gen35_ioc) {
7165 _base_unmask_events(ioc,
7166 MPI2_EVENT_PCIE_DEVICE_STATUS_CHANGE);
7167 _base_unmask_events(ioc, MPI2_EVENT_PCIE_ENUMERATION);
7168 _base_unmask_events(ioc,
7169 MPI2_EVENT_PCIE_TOPOLOGY_CHANGE_LIST);
7170 }
7171 }
7172 r = _base_make_ioc_operational(ioc);
7173 if (r)
7174 goto out_free_resources;
7175
7176
7177
7178
7179
7180 memcpy(&ioc->prev_fw_facts, &ioc->facts,
7181 sizeof(struct mpt3sas_facts));
7182
7183 ioc->non_operational_loop = 0;
7184 ioc->got_task_abort_from_ioctl = 0;
7185 return 0;
7186
7187 out_free_resources:
7188
7189 ioc->remove_host = 1;
7190
7191 mpt3sas_base_free_resources(ioc);
7192 _base_release_memory_pools(ioc);
7193 pci_set_drvdata(ioc->pdev, NULL);
7194 kfree(ioc->cpu_msix_table);
7195 if (ioc->is_warpdrive)
7196 kfree(ioc->reply_post_host_index);
7197 kfree(ioc->pd_handles);
7198 kfree(ioc->blocking_handles);
7199 kfree(ioc->device_remove_in_progress);
7200 kfree(ioc->pend_os_device_add);
7201 kfree(ioc->tm_cmds.reply);
7202 kfree(ioc->transport_cmds.reply);
7203 kfree(ioc->scsih_cmds.reply);
7204 kfree(ioc->config_cmds.reply);
7205 kfree(ioc->base_cmds.reply);
7206 kfree(ioc->port_enable_cmds.reply);
7207 kfree(ioc->ctl_cmds.reply);
7208 kfree(ioc->ctl_cmds.sense);
7209 kfree(ioc->pfacts);
7210 ioc->ctl_cmds.reply = NULL;
7211 ioc->base_cmds.reply = NULL;
7212 ioc->tm_cmds.reply = NULL;
7213 ioc->scsih_cmds.reply = NULL;
7214 ioc->transport_cmds.reply = NULL;
7215 ioc->config_cmds.reply = NULL;
7216 ioc->pfacts = NULL;
7217 return r;
7218 }
7219
7220
7221
7222
7223
7224
7225 void
7226 mpt3sas_base_detach(struct MPT3SAS_ADAPTER *ioc)
7227 {
7228 dexitprintk(ioc, ioc_info(ioc, "%s\n", __func__));
7229
7230 mpt3sas_base_stop_watchdog(ioc);
7231 mpt3sas_base_free_resources(ioc);
7232 _base_release_memory_pools(ioc);
7233 mpt3sas_free_enclosure_list(ioc);
7234 pci_set_drvdata(ioc->pdev, NULL);
7235 kfree(ioc->cpu_msix_table);
7236 if (ioc->is_warpdrive)
7237 kfree(ioc->reply_post_host_index);
7238 kfree(ioc->pd_handles);
7239 kfree(ioc->blocking_handles);
7240 kfree(ioc->device_remove_in_progress);
7241 kfree(ioc->pend_os_device_add);
7242 kfree(ioc->pfacts);
7243 kfree(ioc->ctl_cmds.reply);
7244 kfree(ioc->ctl_cmds.sense);
7245 kfree(ioc->base_cmds.reply);
7246 kfree(ioc->port_enable_cmds.reply);
7247 kfree(ioc->tm_cmds.reply);
7248 kfree(ioc->transport_cmds.reply);
7249 kfree(ioc->scsih_cmds.reply);
7250 kfree(ioc->config_cmds.reply);
7251 }
7252
7253
7254
7255
7256
7257 static void _base_pre_reset_handler(struct MPT3SAS_ADAPTER *ioc)
7258 {
7259 mpt3sas_scsih_pre_reset_handler(ioc);
7260 mpt3sas_ctl_pre_reset_handler(ioc);
7261 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_PRE_RESET\n", __func__));
7262 }
7263
7264
7265
7266
7267
7268 static void _base_after_reset_handler(struct MPT3SAS_ADAPTER *ioc)
7269 {
7270 mpt3sas_scsih_after_reset_handler(ioc);
7271 mpt3sas_ctl_after_reset_handler(ioc);
7272 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_AFTER_RESET\n", __func__));
7273 if (ioc->transport_cmds.status & MPT3_CMD_PENDING) {
7274 ioc->transport_cmds.status |= MPT3_CMD_RESET;
7275 mpt3sas_base_free_smid(ioc, ioc->transport_cmds.smid);
7276 complete(&ioc->transport_cmds.done);
7277 }
7278 if (ioc->base_cmds.status & MPT3_CMD_PENDING) {
7279 ioc->base_cmds.status |= MPT3_CMD_RESET;
7280 mpt3sas_base_free_smid(ioc, ioc->base_cmds.smid);
7281 complete(&ioc->base_cmds.done);
7282 }
7283 if (ioc->port_enable_cmds.status & MPT3_CMD_PENDING) {
7284 ioc->port_enable_failed = 1;
7285 ioc->port_enable_cmds.status |= MPT3_CMD_RESET;
7286 mpt3sas_base_free_smid(ioc, ioc->port_enable_cmds.smid);
7287 if (ioc->is_driver_loading) {
7288 ioc->start_scan_failed =
7289 MPI2_IOCSTATUS_INTERNAL_ERROR;
7290 ioc->start_scan = 0;
7291 ioc->port_enable_cmds.status =
7292 MPT3_CMD_NOT_USED;
7293 } else {
7294 complete(&ioc->port_enable_cmds.done);
7295 }
7296 }
7297 if (ioc->config_cmds.status & MPT3_CMD_PENDING) {
7298 ioc->config_cmds.status |= MPT3_CMD_RESET;
7299 mpt3sas_base_free_smid(ioc, ioc->config_cmds.smid);
7300 ioc->config_cmds.smid = USHRT_MAX;
7301 complete(&ioc->config_cmds.done);
7302 }
7303 }
7304
7305
7306
7307
7308
7309 static void _base_reset_done_handler(struct MPT3SAS_ADAPTER *ioc)
7310 {
7311 mpt3sas_scsih_reset_done_handler(ioc);
7312 mpt3sas_ctl_reset_done_handler(ioc);
7313 dtmprintk(ioc, ioc_info(ioc, "%s: MPT3_IOC_DONE_RESET\n", __func__));
7314 }
7315
7316
7317
7318
7319
7320
7321
7322
7323 void
7324 mpt3sas_wait_for_commands_to_complete(struct MPT3SAS_ADAPTER *ioc)
7325 {
7326 u32 ioc_state;
7327
7328 ioc->pending_io_count = 0;
7329
7330 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7331 if ((ioc_state & MPI2_IOC_STATE_MASK) != MPI2_IOC_STATE_OPERATIONAL)
7332 return;
7333
7334
7335 ioc->pending_io_count = scsi_host_busy(ioc->shost);
7336
7337 if (!ioc->pending_io_count)
7338 return;
7339
7340
7341 wait_event_timeout(ioc->reset_wq, ioc->pending_io_count == 0, 10 * HZ);
7342 }
7343
7344
7345
7346
7347
7348
7349
7350
7351 static int
7352 _base_check_ioc_facts_changes(struct MPT3SAS_ADAPTER *ioc)
7353 {
7354 u16 pd_handles_sz;
7355 void *pd_handles = NULL, *blocking_handles = NULL;
7356 void *pend_os_device_add = NULL, *device_remove_in_progress = NULL;
7357 struct mpt3sas_facts *old_facts = &ioc->prev_fw_facts;
7358
7359 if (ioc->facts.MaxDevHandle > old_facts->MaxDevHandle) {
7360 pd_handles_sz = (ioc->facts.MaxDevHandle / 8);
7361 if (ioc->facts.MaxDevHandle % 8)
7362 pd_handles_sz++;
7363
7364 pd_handles = krealloc(ioc->pd_handles, pd_handles_sz,
7365 GFP_KERNEL);
7366 if (!pd_handles) {
7367 ioc_info(ioc,
7368 "Unable to allocate the memory for pd_handles of sz: %d\n",
7369 pd_handles_sz);
7370 return -ENOMEM;
7371 }
7372 memset(pd_handles + ioc->pd_handles_sz, 0,
7373 (pd_handles_sz - ioc->pd_handles_sz));
7374 ioc->pd_handles = pd_handles;
7375
7376 blocking_handles = krealloc(ioc->blocking_handles,
7377 pd_handles_sz, GFP_KERNEL);
7378 if (!blocking_handles) {
7379 ioc_info(ioc,
7380 "Unable to allocate the memory for "
7381 "blocking_handles of sz: %d\n",
7382 pd_handles_sz);
7383 return -ENOMEM;
7384 }
7385 memset(blocking_handles + ioc->pd_handles_sz, 0,
7386 (pd_handles_sz - ioc->pd_handles_sz));
7387 ioc->blocking_handles = blocking_handles;
7388 ioc->pd_handles_sz = pd_handles_sz;
7389
7390 pend_os_device_add = krealloc(ioc->pend_os_device_add,
7391 pd_handles_sz, GFP_KERNEL);
7392 if (!pend_os_device_add) {
7393 ioc_info(ioc,
7394 "Unable to allocate the memory for pend_os_device_add of sz: %d\n",
7395 pd_handles_sz);
7396 return -ENOMEM;
7397 }
7398 memset(pend_os_device_add + ioc->pend_os_device_add_sz, 0,
7399 (pd_handles_sz - ioc->pend_os_device_add_sz));
7400 ioc->pend_os_device_add = pend_os_device_add;
7401 ioc->pend_os_device_add_sz = pd_handles_sz;
7402
7403 device_remove_in_progress = krealloc(
7404 ioc->device_remove_in_progress, pd_handles_sz, GFP_KERNEL);
7405 if (!device_remove_in_progress) {
7406 ioc_info(ioc,
7407 "Unable to allocate the memory for "
7408 "device_remove_in_progress of sz: %d\n "
7409 , pd_handles_sz);
7410 return -ENOMEM;
7411 }
7412 memset(device_remove_in_progress +
7413 ioc->device_remove_in_progress_sz, 0,
7414 (pd_handles_sz - ioc->device_remove_in_progress_sz));
7415 ioc->device_remove_in_progress = device_remove_in_progress;
7416 ioc->device_remove_in_progress_sz = pd_handles_sz;
7417 }
7418
7419 memcpy(&ioc->prev_fw_facts, &ioc->facts, sizeof(struct mpt3sas_facts));
7420 return 0;
7421 }
7422
7423
7424
7425
7426
7427
7428
7429
7430 int
7431 mpt3sas_base_hard_reset_handler(struct MPT3SAS_ADAPTER *ioc,
7432 enum reset_type type)
7433 {
7434 int r;
7435 unsigned long flags;
7436 u32 ioc_state;
7437 u8 is_fault = 0, is_trigger = 0;
7438
7439 dtmprintk(ioc, ioc_info(ioc, "%s: enter\n", __func__));
7440
7441 if (ioc->pci_error_recovery) {
7442 ioc_err(ioc, "%s: pci error recovery reset\n", __func__);
7443 r = 0;
7444 goto out_unlocked;
7445 }
7446
7447 if (mpt3sas_fwfault_debug)
7448 mpt3sas_halt_firmware(ioc);
7449
7450
7451 mutex_lock(&ioc->reset_in_progress_mutex);
7452
7453 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7454 ioc->shost_recovery = 1;
7455 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7456
7457 if ((ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7458 MPT3_DIAG_BUFFER_IS_REGISTERED) &&
7459 (!(ioc->diag_buffer_status[MPI2_DIAG_BUF_TYPE_TRACE] &
7460 MPT3_DIAG_BUFFER_IS_RELEASED))) {
7461 is_trigger = 1;
7462 ioc_state = mpt3sas_base_get_iocstate(ioc, 0);
7463 if ((ioc_state & MPI2_IOC_STATE_MASK) == MPI2_IOC_STATE_FAULT)
7464 is_fault = 1;
7465 }
7466 _base_pre_reset_handler(ioc);
7467 mpt3sas_wait_for_commands_to_complete(ioc);
7468 _base_mask_interrupts(ioc);
7469 r = _base_make_ioc_ready(ioc, type);
7470 if (r)
7471 goto out;
7472 _base_after_reset_handler(ioc);
7473
7474
7475
7476
7477 if (ioc->is_driver_loading && ioc->port_enable_failed) {
7478 ioc->remove_host = 1;
7479 r = -EFAULT;
7480 goto out;
7481 }
7482 r = _base_get_ioc_facts(ioc);
7483 if (r)
7484 goto out;
7485
7486 r = _base_check_ioc_facts_changes(ioc);
7487 if (r) {
7488 ioc_info(ioc,
7489 "Some of the parameters got changed in this new firmware"
7490 " image and it requires system reboot\n");
7491 goto out;
7492 }
7493 if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
7494 panic("%s: Issue occurred with flashing controller firmware."
7495 "Please reboot the system and ensure that the correct"
7496 " firmware version is running\n", ioc->name);
7497
7498 r = _base_make_ioc_operational(ioc);
7499 if (!r)
7500 _base_reset_done_handler(ioc);
7501
7502 out:
7503 dtmprintk(ioc,
7504 ioc_info(ioc, "%s: %s\n",
7505 __func__, r == 0 ? "SUCCESS" : "FAILED"));
7506
7507 spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
7508 ioc->shost_recovery = 0;
7509 spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
7510 ioc->ioc_reset_count++;
7511 mutex_unlock(&ioc->reset_in_progress_mutex);
7512
7513 out_unlocked:
7514 if ((r == 0) && is_trigger) {
7515 if (is_fault)
7516 mpt3sas_trigger_master(ioc, MASTER_TRIGGER_FW_FAULT);
7517 else
7518 mpt3sas_trigger_master(ioc,
7519 MASTER_TRIGGER_ADAPTER_RESET);
7520 }
7521 dtmprintk(ioc, ioc_info(ioc, "%s: exit\n", __func__));
7522 return r;
7523 }