This source file includes following definitions.
- ufshcd_is_clkgating_allowed
- ufshcd_can_hibern8_during_gating
- ufshcd_is_clkscaling_supported
- ufshcd_can_autobkops_during_suspend
- ufshcd_is_intr_aggr_allowed
- ufshcd_is_auto_hibern8_supported
- ufshcd_is_auto_hibern8_enabled
- ufshcd_rmwl
- check_upiu_size
- ufshcd_set_variant
- ufshcd_get_variant
- ufshcd_keep_autobkops_enabled_except_suspend
- ufshcd_dme_set
- ufshcd_dme_st_set
- ufshcd_dme_peer_set
- ufshcd_dme_peer_st_set
- ufshcd_dme_get
- ufshcd_dme_peer_get
- ufshcd_is_hs_mode
- ufshcd_get_var_name
- ufshcd_vops_init
- ufshcd_vops_exit
- ufshcd_vops_get_ufs_hci_version
- ufshcd_vops_clk_scale_notify
- ufshcd_vops_setup_clocks
- ufshcd_vops_setup_regulators
- ufshcd_vops_hce_enable_notify
- ufshcd_vops_link_startup_notify
- ufshcd_vops_pwr_change_notify
- ufshcd_vops_setup_xfer_req
- ufshcd_vops_setup_task_mgmt
- ufshcd_vops_hibern8_notify
- ufshcd_vops_apply_dev_quirks
- ufshcd_vops_suspend
- ufshcd_vops_resume
- ufshcd_vops_dbg_register_dump
- ufshcd_vops_device_reset
- ufshcd_scsi_to_upiu_lun
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37 #ifndef _UFSHCD_H
38 #define _UFSHCD_H
39
40 #include <linux/module.h>
41 #include <linux/kernel.h>
42 #include <linux/init.h>
43 #include <linux/interrupt.h>
44 #include <linux/io.h>
45 #include <linux/delay.h>
46 #include <linux/slab.h>
47 #include <linux/spinlock.h>
48 #include <linux/rwsem.h>
49 #include <linux/workqueue.h>
50 #include <linux/errno.h>
51 #include <linux/types.h>
52 #include <linux/wait.h>
53 #include <linux/bitops.h>
54 #include <linux/pm_runtime.h>
55 #include <linux/clk.h>
56 #include <linux/completion.h>
57 #include <linux/regulator/consumer.h>
58 #include <linux/bitfield.h>
59 #include "unipro.h"
60
61 #include <asm/irq.h>
62 #include <asm/byteorder.h>
63 #include <scsi/scsi.h>
64 #include <scsi/scsi_cmnd.h>
65 #include <scsi/scsi_host.h>
66 #include <scsi/scsi_tcq.h>
67 #include <scsi/scsi_dbg.h>
68 #include <scsi/scsi_eh.h>
69
70 #include "ufs.h"
71 #include "ufshci.h"
72
73 #define UFSHCD "ufshcd"
74 #define UFSHCD_DRIVER_VERSION "0.2"
75
76 struct ufs_hba;
77
78 enum dev_cmd_type {
79 DEV_CMD_TYPE_NOP = 0x0,
80 DEV_CMD_TYPE_QUERY = 0x1,
81 };
82
83
84
85
86
87
88
89
90
91
92
93 struct uic_command {
94 u32 command;
95 u32 argument1;
96 u32 argument2;
97 u32 argument3;
98 int cmd_active;
99 int result;
100 struct completion done;
101 };
102
103
104 enum ufs_pm_op {
105 UFS_RUNTIME_PM,
106 UFS_SYSTEM_PM,
107 UFS_SHUTDOWN_PM,
108 };
109
110 #define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
111 #define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
112 #define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
113
114
115 enum uic_link_state {
116 UIC_LINK_OFF_STATE = 0,
117 UIC_LINK_ACTIVE_STATE = 1,
118 UIC_LINK_HIBERN8_STATE = 2,
119 };
120
121 #define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
122 #define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
123 UIC_LINK_ACTIVE_STATE)
124 #define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
125 UIC_LINK_HIBERN8_STATE)
126 #define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
127 #define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
128 UIC_LINK_ACTIVE_STATE)
129 #define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
130 UIC_LINK_HIBERN8_STATE)
131
132
133
134
135
136 enum ufs_pm_level {
137 UFS_PM_LVL_0,
138 UFS_PM_LVL_1,
139 UFS_PM_LVL_2,
140 UFS_PM_LVL_3,
141 UFS_PM_LVL_4,
142 UFS_PM_LVL_5,
143 UFS_PM_LVL_MAX
144 };
145
146 struct ufs_pm_lvl_states {
147 enum ufs_dev_pwr_mode dev_state;
148 enum uic_link_state link_state;
149 };
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173 struct ufshcd_lrb {
174 struct utp_transfer_req_desc *utr_descriptor_ptr;
175 struct utp_upiu_req *ucd_req_ptr;
176 struct utp_upiu_rsp *ucd_rsp_ptr;
177 struct ufshcd_sg_entry *ucd_prdt_ptr;
178
179 dma_addr_t utrd_dma_addr;
180 dma_addr_t ucd_req_dma_addr;
181 dma_addr_t ucd_rsp_dma_addr;
182 dma_addr_t ucd_prdt_dma_addr;
183
184 struct scsi_cmnd *cmd;
185 u8 *sense_buffer;
186 unsigned int sense_bufflen;
187 int scsi_status;
188
189 int command_type;
190 int task_tag;
191 u8 lun;
192 bool intr_cmd;
193 ktime_t issue_time_stamp;
194 ktime_t compl_time_stamp;
195
196 bool req_abort_skip;
197 };
198
199
200
201
202
203
204
205 struct ufs_query {
206 struct ufs_query_req request;
207 u8 *descriptor;
208 struct ufs_query_res response;
209 };
210
211
212
213
214
215
216
217
218 struct ufs_dev_cmd {
219 enum dev_cmd_type type;
220 struct mutex lock;
221 struct completion *complete;
222 wait_queue_head_t tag_wq;
223 struct ufs_query query;
224 };
225
226 struct ufs_desc_size {
227 int dev_desc;
228 int pwr_desc;
229 int geom_desc;
230 int interc_desc;
231 int unit_desc;
232 int conf_desc;
233 int hlth_desc;
234 };
235
236
237
238
239
240
241
242
243
244
245
246 struct ufs_clk_info {
247 struct list_head list;
248 struct clk *clk;
249 const char *name;
250 u32 max_freq;
251 u32 min_freq;
252 u32 curr_freq;
253 bool enabled;
254 };
255
256 enum ufs_notify_change_status {
257 PRE_CHANGE,
258 POST_CHANGE,
259 };
260
261 struct ufs_pa_layer_attr {
262 u32 gear_rx;
263 u32 gear_tx;
264 u32 lane_rx;
265 u32 lane_tx;
266 u32 pwr_rx;
267 u32 pwr_tx;
268 u32 hs_rate;
269 };
270
271 struct ufs_pwr_mode_info {
272 bool is_valid;
273 struct ufs_pa_layer_attr info;
274 };
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304 struct ufs_hba_variant_ops {
305 const char *name;
306 int (*init)(struct ufs_hba *);
307 void (*exit)(struct ufs_hba *);
308 u32 (*get_ufs_hci_version)(struct ufs_hba *);
309 int (*clk_scale_notify)(struct ufs_hba *, bool,
310 enum ufs_notify_change_status);
311 int (*setup_clocks)(struct ufs_hba *, bool,
312 enum ufs_notify_change_status);
313 int (*setup_regulators)(struct ufs_hba *, bool);
314 int (*hce_enable_notify)(struct ufs_hba *,
315 enum ufs_notify_change_status);
316 int (*link_startup_notify)(struct ufs_hba *,
317 enum ufs_notify_change_status);
318 int (*pwr_change_notify)(struct ufs_hba *,
319 enum ufs_notify_change_status status,
320 struct ufs_pa_layer_attr *,
321 struct ufs_pa_layer_attr *);
322 void (*setup_xfer_req)(struct ufs_hba *, int, bool);
323 void (*setup_task_mgmt)(struct ufs_hba *, int, u8);
324 void (*hibern8_notify)(struct ufs_hba *, enum uic_cmd_dme,
325 enum ufs_notify_change_status);
326 int (*apply_dev_quirks)(struct ufs_hba *, struct ufs_dev_desc *);
327 int (*suspend)(struct ufs_hba *, enum ufs_pm_op);
328 int (*resume)(struct ufs_hba *, enum ufs_pm_op);
329 void (*dbg_register_dump)(struct ufs_hba *hba);
330 int (*phy_initialization)(struct ufs_hba *);
331 void (*device_reset)(struct ufs_hba *hba);
332 };
333
334
335 enum clk_gating_state {
336 CLKS_OFF,
337 CLKS_ON,
338 REQ_CLKS_OFF,
339 REQ_CLKS_ON,
340 };
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358 struct ufs_clk_gating {
359 struct delayed_work gate_work;
360 struct work_struct ungate_work;
361 enum clk_gating_state state;
362 unsigned long delay_ms;
363 bool is_suspended;
364 struct device_attribute delay_attr;
365 struct device_attribute enable_attr;
366 bool is_enabled;
367 int active_reqs;
368 struct workqueue_struct *clk_gating_workq;
369 };
370
371 struct ufs_saved_pwr_info {
372 struct ufs_pa_layer_attr info;
373 bool is_valid;
374 };
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394 struct ufs_clk_scaling {
395 int active_reqs;
396 unsigned long tot_busy_t;
397 unsigned long window_start_t;
398 ktime_t busy_start_t;
399 struct device_attribute enable_attr;
400 struct ufs_saved_pwr_info saved_pwr_info;
401 struct workqueue_struct *workq;
402 struct work_struct suspend_work;
403 struct work_struct resume_work;
404 bool is_allowed;
405 bool is_busy_started;
406 bool is_suspended;
407 };
408
409
410
411
412
413
414 struct ufs_init_prefetch {
415 u32 icc_level;
416 };
417
418 #define UFS_ERR_REG_HIST_LENGTH 8
419
420
421
422
423
424
425 struct ufs_err_reg_hist {
426 int pos;
427 u32 reg[UFS_ERR_REG_HIST_LENGTH];
428 ktime_t tstamp[UFS_ERR_REG_HIST_LENGTH];
429 };
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451 struct ufs_stats {
452 u32 hibern8_exit_cnt;
453 ktime_t last_hibern8_exit_tstamp;
454
455
456 struct ufs_err_reg_hist pa_err;
457 struct ufs_err_reg_hist dl_err;
458 struct ufs_err_reg_hist nl_err;
459 struct ufs_err_reg_hist tl_err;
460 struct ufs_err_reg_hist dme_err;
461
462
463 struct ufs_err_reg_hist auto_hibern8_err;
464 struct ufs_err_reg_hist fatal_err;
465 struct ufs_err_reg_hist link_startup_err;
466 struct ufs_err_reg_hist resume_err;
467 struct ufs_err_reg_hist suspend_err;
468
469
470 struct ufs_err_reg_hist dev_reset;
471 struct ufs_err_reg_hist host_reset;
472 struct ufs_err_reg_hist task_abort;
473 };
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531 struct ufs_hba {
532 void __iomem *mmio_base;
533
534
535 struct utp_transfer_cmd_desc *ucdl_base_addr;
536 struct utp_transfer_req_desc *utrdl_base_addr;
537 struct utp_task_req_desc *utmrdl_base_addr;
538
539
540 dma_addr_t ucdl_dma_addr;
541 dma_addr_t utrdl_dma_addr;
542 dma_addr_t utmrdl_dma_addr;
543
544 struct Scsi_Host *host;
545 struct device *dev;
546
547
548
549
550 struct scsi_device *sdev_ufs_device;
551
552 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
553 enum uic_link_state uic_link_state;
554
555 enum ufs_pm_level rpm_lvl;
556
557 enum ufs_pm_level spm_lvl;
558 struct device_attribute rpm_lvl_attr;
559 struct device_attribute spm_lvl_attr;
560 int pm_op_in_progress;
561
562
563 u32 ahit;
564
565 struct ufshcd_lrb *lrb;
566 unsigned long lrb_in_use;
567
568 unsigned long outstanding_tasks;
569 unsigned long outstanding_reqs;
570
571 u32 capabilities;
572 int nutrs;
573 int nutmrs;
574 u32 ufs_version;
575 const struct ufs_hba_variant_ops *vops;
576 void *priv;
577 unsigned int irq;
578 bool is_irq_enabled;
579 enum ufs_ref_clk_freq dev_ref_clk_freq;
580
581
582 #define UFSHCD_QUIRK_BROKEN_INTR_AGGR 0x1
583
584
585
586
587
588 #define UFSHCD_QUIRK_DELAY_BEFORE_DME_CMDS 0x2
589
590
591
592
593
594
595
596
597 #define UFSHCD_QUIRK_BROKEN_LCC 0x4
598
599
600
601
602
603
604 #define UFSHCD_QUIRK_BROKEN_PA_RXHSUNTERMCAP 0x8
605
606
607
608
609
610
611 #define UFSHCD_QUIRK_DME_PEER_ACCESS_AUTO_MODE 0x10
612
613
614
615
616
617
618
619 #define UFSHCD_QUIRK_BROKEN_UFS_HCI_VERSION 0x20
620
621
622
623
624
625 #define UFSHCD_QUIRK_PRDT_BYTE_GRAN 0x80
626
627
628
629
630 #define UFSHCI_QUIRK_BROKEN_REQ_LIST_CLR 0x100
631
632
633
634
635
636 #define UFSHCI_QUIRK_SKIP_RESET_INTR_AGGR 0x200
637
638
639
640
641
642 #define UFSHCI_QUIRK_BROKEN_HCE 0x400
643 unsigned int quirks;
644
645
646 unsigned int dev_quirks;
647
648 wait_queue_head_t tm_wq;
649 wait_queue_head_t tm_tag_wq;
650 unsigned long tm_condition;
651 unsigned long tm_slots_in_use;
652
653 struct uic_command *active_uic_cmd;
654 struct mutex uic_cmd_mutex;
655 struct completion *uic_async_done;
656
657 u32 ufshcd_state;
658 u32 eh_flags;
659 u32 intr_mask;
660 u16 ee_ctrl_mask;
661 bool is_powered;
662 bool is_init_prefetch;
663 struct ufs_init_prefetch init_prefetch_data;
664
665
666 struct work_struct eh_work;
667 struct work_struct eeh_work;
668
669
670 u32 errors;
671 u32 uic_error;
672 u32 saved_err;
673 u32 saved_uic_err;
674 struct ufs_stats ufs_stats;
675 bool silence_err_logs;
676
677
678 struct ufs_dev_cmd dev_cmd;
679 ktime_t last_dme_cmd_tstamp;
680
681
682 struct ufs_dev_info dev_info;
683 bool auto_bkops_enabled;
684 struct ufs_vreg_info vreg_info;
685 struct list_head clk_list_head;
686
687 bool wlun_dev_clr_ua;
688
689
690 int req_abort_count;
691
692
693 u32 lanes_per_direction;
694 struct ufs_pa_layer_attr pwr_info;
695 struct ufs_pwr_mode_info max_pwr_info;
696
697 struct ufs_clk_gating clk_gating;
698
699 u32 caps;
700
701 #define UFSHCD_CAP_CLK_GATING (1 << 0)
702
703 #define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
704
705 #define UFSHCD_CAP_CLK_SCALING (1 << 2)
706
707 #define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
708
709
710
711
712
713 #define UFSHCD_CAP_INTR_AGGR (1 << 4)
714
715
716
717
718
719
720
721 #define UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND (1 << 5)
722
723 struct devfreq *devfreq;
724 struct ufs_clk_scaling clk_scaling;
725 bool is_sys_suspended;
726
727 enum bkops_status urgent_bkops_lvl;
728 bool is_urgent_bkops_lvl_checked;
729
730 struct rw_semaphore clk_scaling_lock;
731 struct ufs_desc_size desc_size;
732 atomic_t scsi_block_reqs_cnt;
733
734 struct device bsg_dev;
735 struct request_queue *bsg_queue;
736 };
737
738
739 static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
740 {
741 return hba->caps & UFSHCD_CAP_CLK_GATING;
742 }
743 static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
744 {
745 return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
746 }
747 static inline int ufshcd_is_clkscaling_supported(struct ufs_hba *hba)
748 {
749 return hba->caps & UFSHCD_CAP_CLK_SCALING;
750 }
751 static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
752 {
753 return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
754 }
755
756 static inline bool ufshcd_is_intr_aggr_allowed(struct ufs_hba *hba)
757 {
758
759 #ifndef CONFIG_SCSI_UFS_DWC
760 if ((hba->caps & UFSHCD_CAP_INTR_AGGR) &&
761 !(hba->quirks & UFSHCD_QUIRK_BROKEN_INTR_AGGR))
762 return true;
763 else
764 return false;
765 #else
766 return true;
767 #endif
768 }
769
770 static inline bool ufshcd_is_auto_hibern8_supported(struct ufs_hba *hba)
771 {
772 return (hba->capabilities & MASK_AUTO_HIBERN8_SUPPORT);
773 }
774
775 static inline bool ufshcd_is_auto_hibern8_enabled(struct ufs_hba *hba)
776 {
777 return FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, hba->ahit) ? true : false;
778 }
779
780 #define ufshcd_writel(hba, val, reg) \
781 writel((val), (hba)->mmio_base + (reg))
782 #define ufshcd_readl(hba, reg) \
783 readl((hba)->mmio_base + (reg))
784
785
786
787
788
789
790
791
792 static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
793 {
794 u32 tmp;
795
796 tmp = ufshcd_readl(hba, reg);
797 tmp &= ~mask;
798 tmp |= (val & mask);
799 ufshcd_writel(hba, tmp, reg);
800 }
801
802 int ufshcd_alloc_host(struct device *, struct ufs_hba **);
803 void ufshcd_dealloc_host(struct ufs_hba *);
804 int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
805 void ufshcd_remove(struct ufs_hba *);
806 int ufshcd_wait_for_register(struct ufs_hba *hba, u32 reg, u32 mask,
807 u32 val, unsigned long interval_us,
808 unsigned long timeout_ms, bool can_sleep);
809 void ufshcd_parse_dev_ref_clk_freq(struct ufs_hba *hba, struct clk *refclk);
810
811 static inline void check_upiu_size(void)
812 {
813 BUILD_BUG_ON(ALIGNED_UPIU_SIZE <
814 GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
815 }
816
817
818
819
820
821
822 static inline void ufshcd_set_variant(struct ufs_hba *hba, void *variant)
823 {
824 BUG_ON(!hba);
825 hba->priv = variant;
826 }
827
828
829
830
831
832 static inline void *ufshcd_get_variant(struct ufs_hba *hba)
833 {
834 BUG_ON(!hba);
835 return hba->priv;
836 }
837 static inline bool ufshcd_keep_autobkops_enabled_except_suspend(
838 struct ufs_hba *hba)
839 {
840 return hba->caps & UFSHCD_CAP_KEEP_AUTO_BKOPS_ENABLED_EXCEPT_SUSPEND;
841 }
842
843 extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
844 extern int ufshcd_runtime_resume(struct ufs_hba *hba);
845 extern int ufshcd_runtime_idle(struct ufs_hba *hba);
846 extern int ufshcd_system_suspend(struct ufs_hba *hba);
847 extern int ufshcd_system_resume(struct ufs_hba *hba);
848 extern int ufshcd_shutdown(struct ufs_hba *hba);
849 extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
850 u8 attr_set, u32 mib_val, u8 peer);
851 extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
852 u32 *mib_val, u8 peer);
853 extern int ufshcd_config_pwr_mode(struct ufs_hba *hba,
854 struct ufs_pa_layer_attr *desired_pwr_mode);
855
856
857 #define DME_LOCAL 0
858 #define DME_PEER 1
859 #define ATTR_SET_NOR 0
860 #define ATTR_SET_ST 1
861
862 static inline int ufshcd_dme_set(struct ufs_hba *hba, u32 attr_sel,
863 u32 mib_val)
864 {
865 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
866 mib_val, DME_LOCAL);
867 }
868
869 static inline int ufshcd_dme_st_set(struct ufs_hba *hba, u32 attr_sel,
870 u32 mib_val)
871 {
872 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
873 mib_val, DME_LOCAL);
874 }
875
876 static inline int ufshcd_dme_peer_set(struct ufs_hba *hba, u32 attr_sel,
877 u32 mib_val)
878 {
879 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_NOR,
880 mib_val, DME_PEER);
881 }
882
883 static inline int ufshcd_dme_peer_st_set(struct ufs_hba *hba, u32 attr_sel,
884 u32 mib_val)
885 {
886 return ufshcd_dme_set_attr(hba, attr_sel, ATTR_SET_ST,
887 mib_val, DME_PEER);
888 }
889
890 static inline int ufshcd_dme_get(struct ufs_hba *hba,
891 u32 attr_sel, u32 *mib_val)
892 {
893 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_LOCAL);
894 }
895
896 static inline int ufshcd_dme_peer_get(struct ufs_hba *hba,
897 u32 attr_sel, u32 *mib_val)
898 {
899 return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
900 }
901
902 static inline bool ufshcd_is_hs_mode(struct ufs_pa_layer_attr *pwr_info)
903 {
904 return (pwr_info->pwr_rx == FAST_MODE ||
905 pwr_info->pwr_rx == FASTAUTO_MODE) &&
906 (pwr_info->pwr_tx == FAST_MODE ||
907 pwr_info->pwr_tx == FASTAUTO_MODE);
908 }
909
910
911 int ufshcd_query_descriptor_retry(struct ufs_hba *hba,
912 enum query_opcode opcode,
913 enum desc_idn idn, u8 index,
914 u8 selector,
915 u8 *desc_buf, int *buf_len);
916 int ufshcd_read_desc_param(struct ufs_hba *hba,
917 enum desc_idn desc_id,
918 int desc_index,
919 u8 param_offset,
920 u8 *param_read_buf,
921 u8 param_size);
922 int ufshcd_query_attr(struct ufs_hba *hba, enum query_opcode opcode,
923 enum attr_idn idn, u8 index, u8 selector, u32 *attr_val);
924 int ufshcd_query_flag(struct ufs_hba *hba, enum query_opcode opcode,
925 enum flag_idn idn, bool *flag_res);
926
927 void ufshcd_auto_hibern8_enable(struct ufs_hba *hba);
928
929 #define SD_ASCII_STD true
930 #define SD_RAW false
931 int ufshcd_read_string_desc(struct ufs_hba *hba, u8 desc_index,
932 u8 **buf, bool ascii);
933
934 int ufshcd_hold(struct ufs_hba *hba, bool async);
935 void ufshcd_release(struct ufs_hba *hba);
936
937 int ufshcd_map_desc_id_to_length(struct ufs_hba *hba, enum desc_idn desc_id,
938 int *desc_length);
939
940 u32 ufshcd_get_local_unipro_ver(struct ufs_hba *hba);
941
942 int ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd);
943
944 int ufshcd_exec_raw_upiu_cmd(struct ufs_hba *hba,
945 struct utp_upiu_req *req_upiu,
946 struct utp_upiu_req *rsp_upiu,
947 int msgcode,
948 u8 *desc_buff, int *buff_len,
949 enum query_opcode desc_op);
950
951
952 static inline const char *ufshcd_get_var_name(struct ufs_hba *hba)
953 {
954 if (hba->vops)
955 return hba->vops->name;
956 return "";
957 }
958
959 static inline int ufshcd_vops_init(struct ufs_hba *hba)
960 {
961 if (hba->vops && hba->vops->init)
962 return hba->vops->init(hba);
963
964 return 0;
965 }
966
967 static inline void ufshcd_vops_exit(struct ufs_hba *hba)
968 {
969 if (hba->vops && hba->vops->exit)
970 return hba->vops->exit(hba);
971 }
972
973 static inline u32 ufshcd_vops_get_ufs_hci_version(struct ufs_hba *hba)
974 {
975 if (hba->vops && hba->vops->get_ufs_hci_version)
976 return hba->vops->get_ufs_hci_version(hba);
977
978 return ufshcd_readl(hba, REG_UFS_VERSION);
979 }
980
981 static inline int ufshcd_vops_clk_scale_notify(struct ufs_hba *hba,
982 bool up, enum ufs_notify_change_status status)
983 {
984 if (hba->vops && hba->vops->clk_scale_notify)
985 return hba->vops->clk_scale_notify(hba, up, status);
986 return 0;
987 }
988
989 static inline int ufshcd_vops_setup_clocks(struct ufs_hba *hba, bool on,
990 enum ufs_notify_change_status status)
991 {
992 if (hba->vops && hba->vops->setup_clocks)
993 return hba->vops->setup_clocks(hba, on, status);
994 return 0;
995 }
996
997 static inline int ufshcd_vops_setup_regulators(struct ufs_hba *hba, bool status)
998 {
999 if (hba->vops && hba->vops->setup_regulators)
1000 return hba->vops->setup_regulators(hba, status);
1001
1002 return 0;
1003 }
1004
1005 static inline int ufshcd_vops_hce_enable_notify(struct ufs_hba *hba,
1006 bool status)
1007 {
1008 if (hba->vops && hba->vops->hce_enable_notify)
1009 return hba->vops->hce_enable_notify(hba, status);
1010
1011 return 0;
1012 }
1013 static inline int ufshcd_vops_link_startup_notify(struct ufs_hba *hba,
1014 bool status)
1015 {
1016 if (hba->vops && hba->vops->link_startup_notify)
1017 return hba->vops->link_startup_notify(hba, status);
1018
1019 return 0;
1020 }
1021
1022 static inline int ufshcd_vops_pwr_change_notify(struct ufs_hba *hba,
1023 bool status,
1024 struct ufs_pa_layer_attr *dev_max_params,
1025 struct ufs_pa_layer_attr *dev_req_params)
1026 {
1027 if (hba->vops && hba->vops->pwr_change_notify)
1028 return hba->vops->pwr_change_notify(hba, status,
1029 dev_max_params, dev_req_params);
1030
1031 return -ENOTSUPP;
1032 }
1033
1034 static inline void ufshcd_vops_setup_xfer_req(struct ufs_hba *hba, int tag,
1035 bool is_scsi_cmd)
1036 {
1037 if (hba->vops && hba->vops->setup_xfer_req)
1038 return hba->vops->setup_xfer_req(hba, tag, is_scsi_cmd);
1039 }
1040
1041 static inline void ufshcd_vops_setup_task_mgmt(struct ufs_hba *hba,
1042 int tag, u8 tm_function)
1043 {
1044 if (hba->vops && hba->vops->setup_task_mgmt)
1045 return hba->vops->setup_task_mgmt(hba, tag, tm_function);
1046 }
1047
1048 static inline void ufshcd_vops_hibern8_notify(struct ufs_hba *hba,
1049 enum uic_cmd_dme cmd,
1050 enum ufs_notify_change_status status)
1051 {
1052 if (hba->vops && hba->vops->hibern8_notify)
1053 return hba->vops->hibern8_notify(hba, cmd, status);
1054 }
1055
1056 static inline int ufshcd_vops_apply_dev_quirks(struct ufs_hba *hba,
1057 struct ufs_dev_desc *card)
1058 {
1059 if (hba->vops && hba->vops->apply_dev_quirks)
1060 return hba->vops->apply_dev_quirks(hba, card);
1061 return 0;
1062 }
1063
1064 static inline int ufshcd_vops_suspend(struct ufs_hba *hba, enum ufs_pm_op op)
1065 {
1066 if (hba->vops && hba->vops->suspend)
1067 return hba->vops->suspend(hba, op);
1068
1069 return 0;
1070 }
1071
1072 static inline int ufshcd_vops_resume(struct ufs_hba *hba, enum ufs_pm_op op)
1073 {
1074 if (hba->vops && hba->vops->resume)
1075 return hba->vops->resume(hba, op);
1076
1077 return 0;
1078 }
1079
1080 static inline void ufshcd_vops_dbg_register_dump(struct ufs_hba *hba)
1081 {
1082 if (hba->vops && hba->vops->dbg_register_dump)
1083 hba->vops->dbg_register_dump(hba);
1084 }
1085
1086 static inline void ufshcd_vops_device_reset(struct ufs_hba *hba)
1087 {
1088 if (hba->vops && hba->vops->device_reset)
1089 hba->vops->device_reset(hba);
1090 }
1091
1092 extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
1093
1094
1095
1096
1097
1098
1099
1100 static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
1101 {
1102 if (scsi_is_wlun(scsi_lun))
1103 return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
1104 | UFS_UPIU_WLUN_ID;
1105 else
1106 return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
1107 }
1108
1109 int ufshcd_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
1110 const char *prefix);
1111
1112 #endif