1 /*
2  * NVM Express device driver
3  * Copyright (c) 2011-2014, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  */
14 
15 /*
16  * Refer to the SCSI-NVMe Translation spec for details on how
17  * each command is translated.
18  */
19 
20 #include <linux/nvme.h>
21 #include <linux/bio.h>
22 #include <linux/bitops.h>
23 #include <linux/blkdev.h>
24 #include <linux/compat.h>
25 #include <linux/delay.h>
26 #include <linux/errno.h>
27 #include <linux/fs.h>
28 #include <linux/genhd.h>
29 #include <linux/idr.h>
30 #include <linux/init.h>
31 #include <linux/interrupt.h>
32 #include <linux/io.h>
33 #include <linux/kdev_t.h>
34 #include <linux/kthread.h>
35 #include <linux/kernel.h>
36 #include <linux/mm.h>
37 #include <linux/module.h>
38 #include <linux/moduleparam.h>
39 #include <linux/pci.h>
40 #include <linux/poison.h>
41 #include <linux/sched.h>
42 #include <linux/slab.h>
43 #include <linux/types.h>
44 #include <scsi/sg.h>
45 #include <scsi/scsi.h>
46 
47 
48 static int sg_version_num = 30534;	/* 2 digits for each component */
49 
50 #define SNTI_TRANSLATION_SUCCESS			0
51 #define SNTI_INTERNAL_ERROR				1
52 
53 /* VPD Page Codes */
54 #define VPD_SUPPORTED_PAGES				0x00
55 #define VPD_SERIAL_NUMBER				0x80
56 #define VPD_DEVICE_IDENTIFIERS				0x83
57 #define VPD_EXTENDED_INQUIRY				0x86
58 #define VPD_BLOCK_LIMITS				0xB0
59 #define VPD_BLOCK_DEV_CHARACTERISTICS			0xB1
60 
61 /* CDB offsets */
62 #define REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET		6
63 #define REPORT_LUNS_SR_OFFSET				2
64 #define READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET		10
65 #define REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET		4
66 #define REQUEST_SENSE_DESC_OFFSET			1
67 #define REQUEST_SENSE_DESC_MASK				0x01
68 #define DESCRIPTOR_FORMAT_SENSE_DATA_TYPE		1
69 #define INQUIRY_EVPD_BYTE_OFFSET			1
70 #define INQUIRY_PAGE_CODE_BYTE_OFFSET			2
71 #define INQUIRY_EVPD_BIT_MASK				1
72 #define INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET		3
73 #define START_STOP_UNIT_CDB_IMMED_OFFSET		1
74 #define START_STOP_UNIT_CDB_IMMED_MASK			0x1
75 #define START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET	3
76 #define START_STOP_UNIT_CDB_POWER_COND_MOD_MASK		0xF
77 #define START_STOP_UNIT_CDB_POWER_COND_OFFSET		4
78 #define START_STOP_UNIT_CDB_POWER_COND_MASK		0xF0
79 #define START_STOP_UNIT_CDB_NO_FLUSH_OFFSET		4
80 #define START_STOP_UNIT_CDB_NO_FLUSH_MASK		0x4
81 #define START_STOP_UNIT_CDB_START_OFFSET		4
82 #define START_STOP_UNIT_CDB_START_MASK			0x1
83 #define WRITE_BUFFER_CDB_MODE_OFFSET			1
84 #define WRITE_BUFFER_CDB_MODE_MASK			0x1F
85 #define WRITE_BUFFER_CDB_BUFFER_ID_OFFSET		2
86 #define WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET		3
87 #define WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET	6
88 #define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET		1
89 #define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK		0xC0
90 #define FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT		6
91 #define FORMAT_UNIT_CDB_LONG_LIST_OFFSET		1
92 #define FORMAT_UNIT_CDB_LONG_LIST_MASK			0x20
93 #define FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET		1
94 #define FORMAT_UNIT_CDB_FORMAT_DATA_MASK		0x10
95 #define FORMAT_UNIT_SHORT_PARM_LIST_LEN			4
96 #define FORMAT_UNIT_LONG_PARM_LIST_LEN			8
97 #define FORMAT_UNIT_PROT_INT_OFFSET			3
98 #define FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET		0
99 #define FORMAT_UNIT_PROT_FIELD_USAGE_MASK		0x07
100 #define UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET		7
101 
102 /* Misc. defines */
103 #define NIBBLE_SHIFT					4
104 #define FIXED_SENSE_DATA				0x70
105 #define DESC_FORMAT_SENSE_DATA				0x72
106 #define FIXED_SENSE_DATA_ADD_LENGTH			10
107 #define LUN_ENTRY_SIZE					8
108 #define LUN_DATA_HEADER_SIZE				8
109 #define ALL_LUNS_RETURNED				0x02
110 #define ALL_WELL_KNOWN_LUNS_RETURNED			0x01
111 #define RESTRICTED_LUNS_RETURNED			0x00
112 #define NVME_POWER_STATE_START_VALID			0x00
113 #define NVME_POWER_STATE_ACTIVE				0x01
114 #define NVME_POWER_STATE_IDLE				0x02
115 #define NVME_POWER_STATE_STANDBY			0x03
116 #define NVME_POWER_STATE_LU_CONTROL			0x07
117 #define POWER_STATE_0					0
118 #define POWER_STATE_1					1
119 #define POWER_STATE_2					2
120 #define POWER_STATE_3					3
121 #define DOWNLOAD_SAVE_ACTIVATE				0x05
122 #define DOWNLOAD_SAVE_DEFER_ACTIVATE			0x0E
123 #define ACTIVATE_DEFERRED_MICROCODE			0x0F
124 #define FORMAT_UNIT_IMMED_MASK				0x2
125 #define FORMAT_UNIT_IMMED_OFFSET			1
126 #define KELVIN_TEMP_FACTOR				273
127 #define FIXED_FMT_SENSE_DATA_SIZE			18
128 #define DESC_FMT_SENSE_DATA_SIZE			8
129 
130 /* SCSI/NVMe defines and bit masks */
131 #define INQ_STANDARD_INQUIRY_PAGE			0x00
132 #define INQ_SUPPORTED_VPD_PAGES_PAGE			0x00
133 #define INQ_UNIT_SERIAL_NUMBER_PAGE			0x80
134 #define INQ_DEVICE_IDENTIFICATION_PAGE			0x83
135 #define INQ_EXTENDED_INQUIRY_DATA_PAGE			0x86
136 #define INQ_BDEV_LIMITS_PAGE				0xB0
137 #define INQ_BDEV_CHARACTERISTICS_PAGE			0xB1
138 #define INQ_SERIAL_NUMBER_LENGTH			0x14
139 #define INQ_NUM_SUPPORTED_VPD_PAGES			6
140 #define VERSION_SPC_4					0x06
141 #define ACA_UNSUPPORTED					0
142 #define STANDARD_INQUIRY_LENGTH				36
143 #define ADDITIONAL_STD_INQ_LENGTH			31
144 #define EXTENDED_INQUIRY_DATA_PAGE_LENGTH		0x3C
145 #define RESERVED_FIELD					0
146 
147 /* SCSI READ/WRITE Defines */
148 #define IO_CDB_WP_MASK					0xE0
149 #define IO_CDB_WP_SHIFT					5
150 #define IO_CDB_FUA_MASK					0x8
151 #define IO_6_CDB_LBA_OFFSET				0
152 #define IO_6_CDB_LBA_MASK				0x001FFFFF
153 #define IO_6_CDB_TX_LEN_OFFSET				4
154 #define IO_6_DEFAULT_TX_LEN				256
155 #define IO_10_CDB_LBA_OFFSET				2
156 #define IO_10_CDB_TX_LEN_OFFSET				7
157 #define IO_10_CDB_WP_OFFSET				1
158 #define IO_10_CDB_FUA_OFFSET				1
159 #define IO_12_CDB_LBA_OFFSET				2
160 #define IO_12_CDB_TX_LEN_OFFSET				6
161 #define IO_12_CDB_WP_OFFSET				1
162 #define IO_12_CDB_FUA_OFFSET				1
163 #define IO_16_CDB_FUA_OFFSET				1
164 #define IO_16_CDB_WP_OFFSET				1
165 #define IO_16_CDB_LBA_OFFSET				2
166 #define IO_16_CDB_TX_LEN_OFFSET				10
167 
168 /* Mode Sense/Select defines */
169 #define MODE_PAGE_INFO_EXCEP				0x1C
170 #define MODE_PAGE_CACHING				0x08
171 #define MODE_PAGE_CONTROL				0x0A
172 #define MODE_PAGE_POWER_CONDITION			0x1A
173 #define MODE_PAGE_RETURN_ALL				0x3F
174 #define MODE_PAGE_BLK_DES_LEN				0x08
175 #define MODE_PAGE_LLBAA_BLK_DES_LEN			0x10
176 #define MODE_PAGE_CACHING_LEN				0x14
177 #define MODE_PAGE_CONTROL_LEN				0x0C
178 #define MODE_PAGE_POW_CND_LEN				0x28
179 #define MODE_PAGE_INF_EXC_LEN				0x0C
180 #define MODE_PAGE_ALL_LEN				0x54
181 #define MODE_SENSE6_MPH_SIZE				4
182 #define MODE_SENSE6_ALLOC_LEN_OFFSET			4
183 #define MODE_SENSE_PAGE_CONTROL_OFFSET			2
184 #define MODE_SENSE_PAGE_CONTROL_MASK			0xC0
185 #define MODE_SENSE_PAGE_CODE_OFFSET			2
186 #define MODE_SENSE_PAGE_CODE_MASK			0x3F
187 #define MODE_SENSE_LLBAA_OFFSET				1
188 #define MODE_SENSE_LLBAA_MASK				0x10
189 #define MODE_SENSE_LLBAA_SHIFT				4
190 #define MODE_SENSE_DBD_OFFSET				1
191 #define MODE_SENSE_DBD_MASK				8
192 #define MODE_SENSE_DBD_SHIFT				3
193 #define MODE_SENSE10_MPH_SIZE				8
194 #define MODE_SENSE10_ALLOC_LEN_OFFSET			7
195 #define MODE_SELECT_CDB_PAGE_FORMAT_OFFSET		1
196 #define MODE_SELECT_CDB_SAVE_PAGES_OFFSET		1
197 #define MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET	4
198 #define MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET	7
199 #define MODE_SELECT_CDB_PAGE_FORMAT_MASK		0x10
200 #define MODE_SELECT_CDB_SAVE_PAGES_MASK			0x1
201 #define MODE_SELECT_6_BD_OFFSET				3
202 #define MODE_SELECT_10_BD_OFFSET			6
203 #define MODE_SELECT_10_LLBAA_OFFSET			4
204 #define MODE_SELECT_10_LLBAA_MASK			1
205 #define MODE_SELECT_6_MPH_SIZE				4
206 #define MODE_SELECT_10_MPH_SIZE				8
207 #define CACHING_MODE_PAGE_WCE_MASK			0x04
208 #define MODE_SENSE_BLK_DESC_ENABLED			0
209 #define MODE_SENSE_BLK_DESC_COUNT			1
210 #define MODE_SELECT_PAGE_CODE_MASK			0x3F
211 #define SHORT_DESC_BLOCK				8
212 #define LONG_DESC_BLOCK					16
213 #define MODE_PAGE_POW_CND_LEN_FIELD			0x26
214 #define MODE_PAGE_INF_EXC_LEN_FIELD			0x0A
215 #define MODE_PAGE_CACHING_LEN_FIELD			0x12
216 #define MODE_PAGE_CONTROL_LEN_FIELD			0x0A
217 #define MODE_SENSE_PC_CURRENT_VALUES			0
218 
219 /* Log Sense defines */
220 #define LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE		0x00
221 #define LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH		0x07
222 #define LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE		0x2F
223 #define LOG_PAGE_TEMPERATURE_PAGE			0x0D
224 #define LOG_SENSE_CDB_SP_OFFSET				1
225 #define LOG_SENSE_CDB_SP_NOT_ENABLED			0
226 #define LOG_SENSE_CDB_PC_OFFSET				2
227 #define LOG_SENSE_CDB_PC_MASK				0xC0
228 #define LOG_SENSE_CDB_PC_SHIFT				6
229 #define LOG_SENSE_CDB_PC_CUMULATIVE_VALUES		1
230 #define LOG_SENSE_CDB_PAGE_CODE_MASK			0x3F
231 #define LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET		7
232 #define REMAINING_INFO_EXCP_PAGE_LENGTH			0x8
233 #define LOG_INFO_EXCP_PAGE_LENGTH			0xC
234 #define REMAINING_TEMP_PAGE_LENGTH			0xC
235 #define LOG_TEMP_PAGE_LENGTH				0x10
236 #define LOG_TEMP_UNKNOWN				0xFF
237 #define SUPPORTED_LOG_PAGES_PAGE_LENGTH			0x3
238 
239 /* Read Capacity defines */
240 #define READ_CAP_10_RESP_SIZE				8
241 #define READ_CAP_16_RESP_SIZE				32
242 
243 /* NVMe Namespace and Command Defines */
244 #define BYTES_TO_DWORDS					4
245 #define NVME_MAX_FIRMWARE_SLOT				7
246 
247 /* Report LUNs defines */
248 #define REPORT_LUNS_FIRST_LUN_OFFSET			8
249 
250 /* SCSI ADDITIONAL SENSE Codes */
251 
252 #define SCSI_ASC_NO_SENSE				0x00
253 #define SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT		0x03
254 #define SCSI_ASC_LUN_NOT_READY				0x04
255 #define SCSI_ASC_WARNING				0x0B
256 #define SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED		0x10
257 #define SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED		0x10
258 #define SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED		0x10
259 #define SCSI_ASC_UNRECOVERED_READ_ERROR			0x11
260 #define SCSI_ASC_MISCOMPARE_DURING_VERIFY		0x1D
261 #define SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID		0x20
262 #define SCSI_ASC_ILLEGAL_COMMAND			0x20
263 #define SCSI_ASC_ILLEGAL_BLOCK				0x21
264 #define SCSI_ASC_INVALID_CDB				0x24
265 #define SCSI_ASC_INVALID_LUN				0x25
266 #define SCSI_ASC_INVALID_PARAMETER			0x26
267 #define SCSI_ASC_FORMAT_COMMAND_FAILED			0x31
268 #define SCSI_ASC_INTERNAL_TARGET_FAILURE		0x44
269 
270 /* SCSI ADDITIONAL SENSE Code Qualifiers */
271 
272 #define SCSI_ASCQ_CAUSE_NOT_REPORTABLE			0x00
273 #define SCSI_ASCQ_FORMAT_COMMAND_FAILED			0x01
274 #define SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED		0x01
275 #define SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED		0x02
276 #define SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED		0x03
277 #define SCSI_ASCQ_FORMAT_IN_PROGRESS			0x04
278 #define SCSI_ASCQ_POWER_LOSS_EXPECTED			0x08
279 #define SCSI_ASCQ_INVALID_LUN_ID			0x09
280 
281 /**
282  * DEVICE_SPECIFIC_PARAMETER in mode parameter header (see sbc2r16) to
283  * enable DPOFUA support type 0x10 value.
284  */
285 #define DEVICE_SPECIFIC_PARAMETER			0
286 #define VPD_ID_DESCRIPTOR_LENGTH sizeof(VPD_IDENTIFICATION_DESCRIPTOR)
287 
288 /* MACROs to extract information from CDBs */
289 
290 #define GET_OPCODE(cdb)		cdb[0]
291 
292 #define GET_U8_FROM_CDB(cdb, index) (cdb[index] << 0)
293 
294 #define GET_U16_FROM_CDB(cdb, index) ((cdb[index] << 8) | (cdb[index + 1] << 0))
295 
296 #define GET_U24_FROM_CDB(cdb, index) ((cdb[index] << 16) | \
297 (cdb[index + 1] <<  8) | \
298 (cdb[index + 2] <<  0))
299 
300 #define GET_U32_FROM_CDB(cdb, index) ((cdb[index] << 24) | \
301 (cdb[index + 1] << 16) | \
302 (cdb[index + 2] <<  8) | \
303 (cdb[index + 3] <<  0))
304 
305 #define GET_U64_FROM_CDB(cdb, index) ((((u64)cdb[index]) << 56) | \
306 (((u64)cdb[index + 1]) << 48) | \
307 (((u64)cdb[index + 2]) << 40) | \
308 (((u64)cdb[index + 3]) << 32) | \
309 (((u64)cdb[index + 4]) << 24) | \
310 (((u64)cdb[index + 5]) << 16) | \
311 (((u64)cdb[index + 6]) <<  8) | \
312 (((u64)cdb[index + 7]) <<  0))
313 
314 /* Inquiry Helper Macros */
315 #define GET_INQ_EVPD_BIT(cdb) \
316 ((GET_U8_FROM_CDB(cdb, INQUIRY_EVPD_BYTE_OFFSET) &		\
317 INQUIRY_EVPD_BIT_MASK) ? 1 : 0)
318 
319 #define GET_INQ_PAGE_CODE(cdb)					\
320 (GET_U8_FROM_CDB(cdb, INQUIRY_PAGE_CODE_BYTE_OFFSET))
321 
322 #define GET_INQ_ALLOC_LENGTH(cdb)				\
323 (GET_U16_FROM_CDB(cdb, INQUIRY_CDB_ALLOCATION_LENGTH_OFFSET))
324 
325 /* Report LUNs Helper Macros */
326 #define GET_REPORT_LUNS_ALLOC_LENGTH(cdb)			\
327 (GET_U32_FROM_CDB(cdb, REPORT_LUNS_CDB_ALLOC_LENGTH_OFFSET))
328 
329 /* Read Capacity Helper Macros */
330 #define GET_READ_CAP_16_ALLOC_LENGTH(cdb)			\
331 (GET_U32_FROM_CDB(cdb, READ_CAP_16_CDB_ALLOC_LENGTH_OFFSET))
332 
333 #define IS_READ_CAP_16(cdb)					\
334 ((cdb[0] == SERVICE_ACTION_IN_16 && cdb[1] == SAI_READ_CAPACITY_16) ? 1 : 0)
335 
336 /* Request Sense Helper Macros */
337 #define GET_REQUEST_SENSE_ALLOC_LENGTH(cdb)			\
338 (GET_U8_FROM_CDB(cdb, REQUEST_SENSE_CDB_ALLOC_LENGTH_OFFSET))
339 
340 /* Mode Sense Helper Macros */
341 #define GET_MODE_SENSE_DBD(cdb)					\
342 ((GET_U8_FROM_CDB(cdb, MODE_SENSE_DBD_OFFSET) & MODE_SENSE_DBD_MASK) >>	\
343 MODE_SENSE_DBD_SHIFT)
344 
345 #define GET_MODE_SENSE_LLBAA(cdb)				\
346 ((GET_U8_FROM_CDB(cdb, MODE_SENSE_LLBAA_OFFSET) &		\
347 MODE_SENSE_LLBAA_MASK) >> MODE_SENSE_LLBAA_SHIFT)
348 
349 #define GET_MODE_SENSE_MPH_SIZE(cdb10)				\
350 (cdb10 ? MODE_SENSE10_MPH_SIZE : MODE_SENSE6_MPH_SIZE)
351 
352 
353 /* Struct to gather data that needs to be extracted from a SCSI CDB.
354    Not conforming to any particular CDB variant, but compatible with all. */
355 
356 struct nvme_trans_io_cdb {
357 	u8 fua;
358 	u8 prot_info;
359 	u64 lba;
360 	u32 xfer_len;
361 };
362 
363 
364 /* Internal Helper Functions */
365 
366 
367 /* Copy data to userspace memory */
368 
nvme_trans_copy_to_user(struct sg_io_hdr * hdr,void * from,unsigned long n)369 static int nvme_trans_copy_to_user(struct sg_io_hdr *hdr, void *from,
370 								unsigned long n)
371 {
372 	int res = SNTI_TRANSLATION_SUCCESS;
373 	unsigned long not_copied;
374 	int i;
375 	void *index = from;
376 	size_t remaining = n;
377 	size_t xfer_len;
378 
379 	if (hdr->iovec_count > 0) {
380 		struct sg_iovec sgl;
381 
382 		for (i = 0; i < hdr->iovec_count; i++) {
383 			not_copied = copy_from_user(&sgl, hdr->dxferp +
384 						i * sizeof(struct sg_iovec),
385 						sizeof(struct sg_iovec));
386 			if (not_copied)
387 				return -EFAULT;
388 			xfer_len = min(remaining, sgl.iov_len);
389 			not_copied = copy_to_user(sgl.iov_base, index,
390 								xfer_len);
391 			if (not_copied) {
392 				res = -EFAULT;
393 				break;
394 			}
395 			index += xfer_len;
396 			remaining -= xfer_len;
397 			if (remaining == 0)
398 				break;
399 		}
400 		return res;
401 	}
402 	not_copied = copy_to_user(hdr->dxferp, from, n);
403 	if (not_copied)
404 		res = -EFAULT;
405 	return res;
406 }
407 
408 /* Copy data from userspace memory */
409 
nvme_trans_copy_from_user(struct sg_io_hdr * hdr,void * to,unsigned long n)410 static int nvme_trans_copy_from_user(struct sg_io_hdr *hdr, void *to,
411 								unsigned long n)
412 {
413 	int res = SNTI_TRANSLATION_SUCCESS;
414 	unsigned long not_copied;
415 	int i;
416 	void *index = to;
417 	size_t remaining = n;
418 	size_t xfer_len;
419 
420 	if (hdr->iovec_count > 0) {
421 		struct sg_iovec sgl;
422 
423 		for (i = 0; i < hdr->iovec_count; i++) {
424 			not_copied = copy_from_user(&sgl, hdr->dxferp +
425 						i * sizeof(struct sg_iovec),
426 						sizeof(struct sg_iovec));
427 			if (not_copied)
428 				return -EFAULT;
429 			xfer_len = min(remaining, sgl.iov_len);
430 			not_copied = copy_from_user(index, sgl.iov_base,
431 								xfer_len);
432 			if (not_copied) {
433 				res = -EFAULT;
434 				break;
435 			}
436 			index += xfer_len;
437 			remaining -= xfer_len;
438 			if (remaining == 0)
439 				break;
440 		}
441 		return res;
442 	}
443 
444 	not_copied = copy_from_user(to, hdr->dxferp, n);
445 	if (not_copied)
446 		res = -EFAULT;
447 	return res;
448 }
449 
450 /* Status/Sense Buffer Writeback */
451 
nvme_trans_completion(struct sg_io_hdr * hdr,u8 status,u8 sense_key,u8 asc,u8 ascq)452 static int nvme_trans_completion(struct sg_io_hdr *hdr, u8 status, u8 sense_key,
453 				 u8 asc, u8 ascq)
454 {
455 	int res = SNTI_TRANSLATION_SUCCESS;
456 	u8 xfer_len;
457 	u8 resp[DESC_FMT_SENSE_DATA_SIZE];
458 
459 	if (scsi_status_is_good(status)) {
460 		hdr->status = SAM_STAT_GOOD;
461 		hdr->masked_status = GOOD;
462 		hdr->host_status = DID_OK;
463 		hdr->driver_status = DRIVER_OK;
464 		hdr->sb_len_wr = 0;
465 	} else {
466 		hdr->status = status;
467 		hdr->masked_status = status >> 1;
468 		hdr->host_status = DID_OK;
469 		hdr->driver_status = DRIVER_OK;
470 
471 		memset(resp, 0, DESC_FMT_SENSE_DATA_SIZE);
472 		resp[0] = DESC_FORMAT_SENSE_DATA;
473 		resp[1] = sense_key;
474 		resp[2] = asc;
475 		resp[3] = ascq;
476 
477 		xfer_len = min_t(u8, hdr->mx_sb_len, DESC_FMT_SENSE_DATA_SIZE);
478 		hdr->sb_len_wr = xfer_len;
479 		if (copy_to_user(hdr->sbp, resp, xfer_len) > 0)
480 			res = -EFAULT;
481 	}
482 
483 	return res;
484 }
485 
nvme_trans_status_code(struct sg_io_hdr * hdr,int nvme_sc)486 static int nvme_trans_status_code(struct sg_io_hdr *hdr, int nvme_sc)
487 {
488 	u8 status, sense_key, asc, ascq;
489 	int res = SNTI_TRANSLATION_SUCCESS;
490 
491 	/* For non-nvme (Linux) errors, simply return the error code */
492 	if (nvme_sc < 0)
493 		return nvme_sc;
494 
495 	/* Mask DNR, More, and reserved fields */
496 	nvme_sc &= 0x7FF;
497 
498 	switch (nvme_sc) {
499 	/* Generic Command Status */
500 	case NVME_SC_SUCCESS:
501 		status = SAM_STAT_GOOD;
502 		sense_key = NO_SENSE;
503 		asc = SCSI_ASC_NO_SENSE;
504 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
505 		break;
506 	case NVME_SC_INVALID_OPCODE:
507 		status = SAM_STAT_CHECK_CONDITION;
508 		sense_key = ILLEGAL_REQUEST;
509 		asc = SCSI_ASC_ILLEGAL_COMMAND;
510 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
511 		break;
512 	case NVME_SC_INVALID_FIELD:
513 		status = SAM_STAT_CHECK_CONDITION;
514 		sense_key = ILLEGAL_REQUEST;
515 		asc = SCSI_ASC_INVALID_CDB;
516 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
517 		break;
518 	case NVME_SC_DATA_XFER_ERROR:
519 		status = SAM_STAT_CHECK_CONDITION;
520 		sense_key = MEDIUM_ERROR;
521 		asc = SCSI_ASC_NO_SENSE;
522 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
523 		break;
524 	case NVME_SC_POWER_LOSS:
525 		status = SAM_STAT_TASK_ABORTED;
526 		sense_key = ABORTED_COMMAND;
527 		asc = SCSI_ASC_WARNING;
528 		ascq = SCSI_ASCQ_POWER_LOSS_EXPECTED;
529 		break;
530 	case NVME_SC_INTERNAL:
531 		status = SAM_STAT_CHECK_CONDITION;
532 		sense_key = HARDWARE_ERROR;
533 		asc = SCSI_ASC_INTERNAL_TARGET_FAILURE;
534 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
535 		break;
536 	case NVME_SC_ABORT_REQ:
537 		status = SAM_STAT_TASK_ABORTED;
538 		sense_key = ABORTED_COMMAND;
539 		asc = SCSI_ASC_NO_SENSE;
540 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
541 		break;
542 	case NVME_SC_ABORT_QUEUE:
543 		status = SAM_STAT_TASK_ABORTED;
544 		sense_key = ABORTED_COMMAND;
545 		asc = SCSI_ASC_NO_SENSE;
546 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
547 		break;
548 	case NVME_SC_FUSED_FAIL:
549 		status = SAM_STAT_TASK_ABORTED;
550 		sense_key = ABORTED_COMMAND;
551 		asc = SCSI_ASC_NO_SENSE;
552 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
553 		break;
554 	case NVME_SC_FUSED_MISSING:
555 		status = SAM_STAT_TASK_ABORTED;
556 		sense_key = ABORTED_COMMAND;
557 		asc = SCSI_ASC_NO_SENSE;
558 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
559 		break;
560 	case NVME_SC_INVALID_NS:
561 		status = SAM_STAT_CHECK_CONDITION;
562 		sense_key = ILLEGAL_REQUEST;
563 		asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
564 		ascq = SCSI_ASCQ_INVALID_LUN_ID;
565 		break;
566 	case NVME_SC_LBA_RANGE:
567 		status = SAM_STAT_CHECK_CONDITION;
568 		sense_key = ILLEGAL_REQUEST;
569 		asc = SCSI_ASC_ILLEGAL_BLOCK;
570 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
571 		break;
572 	case NVME_SC_CAP_EXCEEDED:
573 		status = SAM_STAT_CHECK_CONDITION;
574 		sense_key = MEDIUM_ERROR;
575 		asc = SCSI_ASC_NO_SENSE;
576 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
577 		break;
578 	case NVME_SC_NS_NOT_READY:
579 		status = SAM_STAT_CHECK_CONDITION;
580 		sense_key = NOT_READY;
581 		asc = SCSI_ASC_LUN_NOT_READY;
582 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
583 		break;
584 
585 	/* Command Specific Status */
586 	case NVME_SC_INVALID_FORMAT:
587 		status = SAM_STAT_CHECK_CONDITION;
588 		sense_key = ILLEGAL_REQUEST;
589 		asc = SCSI_ASC_FORMAT_COMMAND_FAILED;
590 		ascq = SCSI_ASCQ_FORMAT_COMMAND_FAILED;
591 		break;
592 	case NVME_SC_BAD_ATTRIBUTES:
593 		status = SAM_STAT_CHECK_CONDITION;
594 		sense_key = ILLEGAL_REQUEST;
595 		asc = SCSI_ASC_INVALID_CDB;
596 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
597 		break;
598 
599 	/* Media Errors */
600 	case NVME_SC_WRITE_FAULT:
601 		status = SAM_STAT_CHECK_CONDITION;
602 		sense_key = MEDIUM_ERROR;
603 		asc = SCSI_ASC_PERIPHERAL_DEV_WRITE_FAULT;
604 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
605 		break;
606 	case NVME_SC_READ_ERROR:
607 		status = SAM_STAT_CHECK_CONDITION;
608 		sense_key = MEDIUM_ERROR;
609 		asc = SCSI_ASC_UNRECOVERED_READ_ERROR;
610 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
611 		break;
612 	case NVME_SC_GUARD_CHECK:
613 		status = SAM_STAT_CHECK_CONDITION;
614 		sense_key = MEDIUM_ERROR;
615 		asc = SCSI_ASC_LOG_BLOCK_GUARD_CHECK_FAILED;
616 		ascq = SCSI_ASCQ_LOG_BLOCK_GUARD_CHECK_FAILED;
617 		break;
618 	case NVME_SC_APPTAG_CHECK:
619 		status = SAM_STAT_CHECK_CONDITION;
620 		sense_key = MEDIUM_ERROR;
621 		asc = SCSI_ASC_LOG_BLOCK_APPTAG_CHECK_FAILED;
622 		ascq = SCSI_ASCQ_LOG_BLOCK_APPTAG_CHECK_FAILED;
623 		break;
624 	case NVME_SC_REFTAG_CHECK:
625 		status = SAM_STAT_CHECK_CONDITION;
626 		sense_key = MEDIUM_ERROR;
627 		asc = SCSI_ASC_LOG_BLOCK_REFTAG_CHECK_FAILED;
628 		ascq = SCSI_ASCQ_LOG_BLOCK_REFTAG_CHECK_FAILED;
629 		break;
630 	case NVME_SC_COMPARE_FAILED:
631 		status = SAM_STAT_CHECK_CONDITION;
632 		sense_key = MISCOMPARE;
633 		asc = SCSI_ASC_MISCOMPARE_DURING_VERIFY;
634 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
635 		break;
636 	case NVME_SC_ACCESS_DENIED:
637 		status = SAM_STAT_CHECK_CONDITION;
638 		sense_key = ILLEGAL_REQUEST;
639 		asc = SCSI_ASC_ACCESS_DENIED_INVALID_LUN_ID;
640 		ascq = SCSI_ASCQ_INVALID_LUN_ID;
641 		break;
642 
643 	/* Unspecified/Default */
644 	case NVME_SC_CMDID_CONFLICT:
645 	case NVME_SC_CMD_SEQ_ERROR:
646 	case NVME_SC_CQ_INVALID:
647 	case NVME_SC_QID_INVALID:
648 	case NVME_SC_QUEUE_SIZE:
649 	case NVME_SC_ABORT_LIMIT:
650 	case NVME_SC_ABORT_MISSING:
651 	case NVME_SC_ASYNC_LIMIT:
652 	case NVME_SC_FIRMWARE_SLOT:
653 	case NVME_SC_FIRMWARE_IMAGE:
654 	case NVME_SC_INVALID_VECTOR:
655 	case NVME_SC_INVALID_LOG_PAGE:
656 	default:
657 		status = SAM_STAT_CHECK_CONDITION;
658 		sense_key = ILLEGAL_REQUEST;
659 		asc = SCSI_ASC_NO_SENSE;
660 		ascq = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
661 		break;
662 	}
663 
664 	res = nvme_trans_completion(hdr, status, sense_key, asc, ascq);
665 
666 	return res;
667 }
668 
669 /* INQUIRY Helper Functions */
670 
nvme_trans_standard_inquiry_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * inq_response,int alloc_len)671 static int nvme_trans_standard_inquiry_page(struct nvme_ns *ns,
672 					struct sg_io_hdr *hdr, u8 *inq_response,
673 					int alloc_len)
674 {
675 	struct nvme_dev *dev = ns->dev;
676 	dma_addr_t dma_addr;
677 	void *mem;
678 	struct nvme_id_ns *id_ns;
679 	int res = SNTI_TRANSLATION_SUCCESS;
680 	int nvme_sc;
681 	int xfer_len;
682 	u8 resp_data_format = 0x02;
683 	u8 protect;
684 	u8 cmdque = 0x01 << 1;
685 	u8 fw_offset = sizeof(dev->firmware_rev);
686 
687 	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
688 				&dma_addr, GFP_KERNEL);
689 	if (mem == NULL) {
690 		res = -ENOMEM;
691 		goto out_dma;
692 	}
693 
694 	/* nvme ns identify - use DPS value for PROTECT field */
695 	nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
696 	res = nvme_trans_status_code(hdr, nvme_sc);
697 	/*
698 	 * If nvme_sc was -ve, res will be -ve here.
699 	 * If nvme_sc was +ve, the status would bace been translated, and res
700 	 *  can only be 0 or -ve.
701 	 *    - If 0 && nvme_sc > 0, then go into next if where res gets nvme_sc
702 	 *    - If -ve, return because its a Linux error.
703 	 */
704 	if (res)
705 		goto out_free;
706 	if (nvme_sc) {
707 		res = nvme_sc;
708 		goto out_free;
709 	}
710 	id_ns = mem;
711 	(id_ns->dps) ? (protect = 0x01) : (protect = 0);
712 
713 	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
714 	inq_response[2] = VERSION_SPC_4;
715 	inq_response[3] = resp_data_format;	/*normaca=0 | hisup=0 */
716 	inq_response[4] = ADDITIONAL_STD_INQ_LENGTH;
717 	inq_response[5] = protect;	/* sccs=0 | acc=0 | tpgs=0 | pc3=0 */
718 	inq_response[7] = cmdque;	/* wbus16=0 | sync=0 | vs=0 */
719 	strncpy(&inq_response[8], "NVMe    ", 8);
720 	strncpy(&inq_response[16], dev->model, 16);
721 
722 	while (dev->firmware_rev[fw_offset - 1] == ' ' && fw_offset > 4)
723 		fw_offset--;
724 	fw_offset -= 4;
725 	strncpy(&inq_response[32], dev->firmware_rev + fw_offset, 4);
726 
727 	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
728 	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
729 
730  out_free:
731 	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
732 			  dma_addr);
733  out_dma:
734 	return res;
735 }
736 
nvme_trans_supported_vpd_pages(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * inq_response,int alloc_len)737 static int nvme_trans_supported_vpd_pages(struct nvme_ns *ns,
738 					struct sg_io_hdr *hdr, u8 *inq_response,
739 					int alloc_len)
740 {
741 	int res = SNTI_TRANSLATION_SUCCESS;
742 	int xfer_len;
743 
744 	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
745 	inq_response[1] = INQ_SUPPORTED_VPD_PAGES_PAGE;   /* Page Code */
746 	inq_response[3] = INQ_NUM_SUPPORTED_VPD_PAGES;    /* Page Length */
747 	inq_response[4] = INQ_SUPPORTED_VPD_PAGES_PAGE;
748 	inq_response[5] = INQ_UNIT_SERIAL_NUMBER_PAGE;
749 	inq_response[6] = INQ_DEVICE_IDENTIFICATION_PAGE;
750 	inq_response[7] = INQ_EXTENDED_INQUIRY_DATA_PAGE;
751 	inq_response[8] = INQ_BDEV_CHARACTERISTICS_PAGE;
752 	inq_response[9] = INQ_BDEV_LIMITS_PAGE;
753 
754 	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
755 	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
756 
757 	return res;
758 }
759 
nvme_trans_unit_serial_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * inq_response,int alloc_len)760 static int nvme_trans_unit_serial_page(struct nvme_ns *ns,
761 					struct sg_io_hdr *hdr, u8 *inq_response,
762 					int alloc_len)
763 {
764 	struct nvme_dev *dev = ns->dev;
765 	int res = SNTI_TRANSLATION_SUCCESS;
766 	int xfer_len;
767 
768 	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
769 	inq_response[1] = INQ_UNIT_SERIAL_NUMBER_PAGE; /* Page Code */
770 	inq_response[3] = INQ_SERIAL_NUMBER_LENGTH;    /* Page Length */
771 	strncpy(&inq_response[4], dev->serial, INQ_SERIAL_NUMBER_LENGTH);
772 
773 	xfer_len = min(alloc_len, STANDARD_INQUIRY_LENGTH);
774 	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
775 
776 	return res;
777 }
778 
nvme_trans_device_id_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * inq_response,int alloc_len)779 static int nvme_trans_device_id_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
780 					u8 *inq_response, int alloc_len)
781 {
782 	struct nvme_dev *dev = ns->dev;
783 	dma_addr_t dma_addr;
784 	void *mem;
785 	int res = SNTI_TRANSLATION_SUCCESS;
786 	int nvme_sc;
787 	int xfer_len;
788 	__be32 tmp_id = cpu_to_be32(ns->ns_id);
789 
790 	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
791 					&dma_addr, GFP_KERNEL);
792 	if (mem == NULL) {
793 		res = -ENOMEM;
794 		goto out_dma;
795 	}
796 
797 	memset(inq_response, 0, alloc_len);
798 	inq_response[1] = INQ_DEVICE_IDENTIFICATION_PAGE;    /* Page Code */
799 	if (readl(&dev->bar->vs) >= NVME_VS(1, 1)) {
800 		struct nvme_id_ns *id_ns = mem;
801 		void *eui = id_ns->eui64;
802 		int len = sizeof(id_ns->eui64);
803 
804 		nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
805 		res = nvme_trans_status_code(hdr, nvme_sc);
806 		if (res)
807 			goto out_free;
808 		if (nvme_sc) {
809 			res = nvme_sc;
810 			goto out_free;
811 		}
812 
813 		if (readl(&dev->bar->vs) >= NVME_VS(1, 2)) {
814 			if (bitmap_empty(eui, len * 8)) {
815 				eui = id_ns->nguid;
816 				len = sizeof(id_ns->nguid);
817 			}
818 		}
819 		if (bitmap_empty(eui, len * 8))
820 			goto scsi_string;
821 
822 		inq_response[3] = 4 + len; /* Page Length */
823 		/* Designation Descriptor start */
824 		inq_response[4] = 0x01;    /* Proto ID=0h | Code set=1h */
825 		inq_response[5] = 0x02;    /* PIV=0b | Asso=00b | Designator Type=2h */
826 		inq_response[6] = 0x00;    /* Rsvd */
827 		inq_response[7] = len;     /* Designator Length */
828 		memcpy(&inq_response[8], eui, len);
829 	} else {
830  scsi_string:
831 		if (alloc_len < 72) {
832 			res = nvme_trans_completion(hdr,
833 					SAM_STAT_CHECK_CONDITION,
834 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
835 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
836 			goto out_free;
837 		}
838 		inq_response[3] = 0x48;    /* Page Length */
839 		/* Designation Descriptor start */
840 		inq_response[4] = 0x03;    /* Proto ID=0h | Code set=3h */
841 		inq_response[5] = 0x08;    /* PIV=0b | Asso=00b | Designator Type=8h */
842 		inq_response[6] = 0x00;    /* Rsvd */
843 		inq_response[7] = 0x44;    /* Designator Length */
844 
845 		sprintf(&inq_response[8], "%04x", dev->pci_dev->vendor);
846 		memcpy(&inq_response[12], dev->model, sizeof(dev->model));
847 		sprintf(&inq_response[52], "%04x", tmp_id);
848 		memcpy(&inq_response[56], dev->serial, sizeof(dev->serial));
849 	}
850 	xfer_len = alloc_len;
851 	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
852 
853  out_free:
854 	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
855 			  dma_addr);
856  out_dma:
857 	return res;
858 }
859 
nvme_trans_ext_inq_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,int alloc_len)860 static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
861 					int alloc_len)
862 {
863 	u8 *inq_response;
864 	int res = SNTI_TRANSLATION_SUCCESS;
865 	int nvme_sc;
866 	struct nvme_dev *dev = ns->dev;
867 	dma_addr_t dma_addr;
868 	void *mem;
869 	struct nvme_id_ctrl *id_ctrl;
870 	struct nvme_id_ns *id_ns;
871 	int xfer_len;
872 	u8 microcode = 0x80;
873 	u8 spt;
874 	u8 spt_lut[8] = {0, 0, 2, 1, 4, 6, 5, 7};
875 	u8 grd_chk, app_chk, ref_chk, protect;
876 	u8 uask_sup = 0x20;
877 	u8 v_sup;
878 	u8 luiclr = 0x01;
879 
880 	inq_response = kmalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
881 	if (inq_response == NULL) {
882 		res = -ENOMEM;
883 		goto out_mem;
884 	}
885 
886 	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
887 							&dma_addr, GFP_KERNEL);
888 	if (mem == NULL) {
889 		res = -ENOMEM;
890 		goto out_dma;
891 	}
892 
893 	/* nvme ns identify */
894 	nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
895 	res = nvme_trans_status_code(hdr, nvme_sc);
896 	if (res)
897 		goto out_free;
898 	if (nvme_sc) {
899 		res = nvme_sc;
900 		goto out_free;
901 	}
902 	id_ns = mem;
903 	spt = spt_lut[(id_ns->dpc) & 0x07] << 3;
904 	(id_ns->dps) ? (protect = 0x01) : (protect = 0);
905 	grd_chk = protect << 2;
906 	app_chk = protect << 1;
907 	ref_chk = protect;
908 
909 	/* nvme controller identify */
910 	nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
911 	res = nvme_trans_status_code(hdr, nvme_sc);
912 	if (res)
913 		goto out_free;
914 	if (nvme_sc) {
915 		res = nvme_sc;
916 		goto out_free;
917 	}
918 	id_ctrl = mem;
919 	v_sup = id_ctrl->vwc;
920 
921 	memset(inq_response, 0, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
922 	inq_response[1] = INQ_EXTENDED_INQUIRY_DATA_PAGE;    /* Page Code */
923 	inq_response[2] = 0x00;    /* Page Length MSB */
924 	inq_response[3] = 0x3C;    /* Page Length LSB */
925 	inq_response[4] = microcode | spt | grd_chk | app_chk | ref_chk;
926 	inq_response[5] = uask_sup;
927 	inq_response[6] = v_sup;
928 	inq_response[7] = luiclr;
929 	inq_response[8] = 0;
930 	inq_response[9] = 0;
931 
932 	xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
933 	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
934 
935  out_free:
936 	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
937 			  dma_addr);
938  out_dma:
939 	kfree(inq_response);
940  out_mem:
941 	return res;
942 }
943 
nvme_trans_bdev_limits_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * inq_response,int alloc_len)944 static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
945 					u8 *inq_response, int alloc_len)
946 {
947 	__be32 max_sectors = cpu_to_be32(
948 		nvme_block_nr(ns, queue_max_hw_sectors(ns->queue)));
949 	__be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
950 	__be32 discard_desc_count = cpu_to_be32(0x100);
951 
952 	memset(inq_response, 0, STANDARD_INQUIRY_LENGTH);
953 	inq_response[1] = VPD_BLOCK_LIMITS;
954 	inq_response[3] = 0x3c; /* Page Length */
955 	memcpy(&inq_response[8], &max_sectors, sizeof(u32));
956 	memcpy(&inq_response[20], &max_discard, sizeof(u32));
957 
958 	if (max_discard)
959 		memcpy(&inq_response[24], &discard_desc_count, sizeof(u32));
960 
961 	return nvme_trans_copy_to_user(hdr, inq_response, 0x3c);
962 }
963 
nvme_trans_bdev_char_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,int alloc_len)964 static int nvme_trans_bdev_char_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
965 					int alloc_len)
966 {
967 	u8 *inq_response;
968 	int res = SNTI_TRANSLATION_SUCCESS;
969 	int xfer_len;
970 
971 	inq_response = kzalloc(EXTENDED_INQUIRY_DATA_PAGE_LENGTH, GFP_KERNEL);
972 	if (inq_response == NULL) {
973 		res = -ENOMEM;
974 		goto out_mem;
975 	}
976 
977 	inq_response[1] = INQ_BDEV_CHARACTERISTICS_PAGE;    /* Page Code */
978 	inq_response[2] = 0x00;    /* Page Length MSB */
979 	inq_response[3] = 0x3C;    /* Page Length LSB */
980 	inq_response[4] = 0x00;    /* Medium Rotation Rate MSB */
981 	inq_response[5] = 0x01;    /* Medium Rotation Rate LSB */
982 	inq_response[6] = 0x00;    /* Form Factor */
983 
984 	xfer_len = min(alloc_len, EXTENDED_INQUIRY_DATA_PAGE_LENGTH);
985 	res = nvme_trans_copy_to_user(hdr, inq_response, xfer_len);
986 
987 	kfree(inq_response);
988  out_mem:
989 	return res;
990 }
991 
992 /* LOG SENSE Helper Functions */
993 
nvme_trans_log_supp_pages(struct nvme_ns * ns,struct sg_io_hdr * hdr,int alloc_len)994 static int nvme_trans_log_supp_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
995 					int alloc_len)
996 {
997 	int res = SNTI_TRANSLATION_SUCCESS;
998 	int xfer_len;
999 	u8 *log_response;
1000 
1001 	log_response = kzalloc(LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH, GFP_KERNEL);
1002 	if (log_response == NULL) {
1003 		res = -ENOMEM;
1004 		goto out_mem;
1005 	}
1006 
1007 	log_response[0] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
1008 	/* Subpage=0x00, Page Length MSB=0 */
1009 	log_response[3] = SUPPORTED_LOG_PAGES_PAGE_LENGTH;
1010 	log_response[4] = LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE;
1011 	log_response[5] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
1012 	log_response[6] = LOG_PAGE_TEMPERATURE_PAGE;
1013 
1014 	xfer_len = min(alloc_len, LOG_PAGE_SUPPORTED_LOG_PAGES_LENGTH);
1015 	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1016 
1017 	kfree(log_response);
1018  out_mem:
1019 	return res;
1020 }
1021 
nvme_trans_log_info_exceptions(struct nvme_ns * ns,struct sg_io_hdr * hdr,int alloc_len)1022 static int nvme_trans_log_info_exceptions(struct nvme_ns *ns,
1023 					struct sg_io_hdr *hdr, int alloc_len)
1024 {
1025 	int res = SNTI_TRANSLATION_SUCCESS;
1026 	int xfer_len;
1027 	u8 *log_response;
1028 	struct nvme_command c;
1029 	struct nvme_dev *dev = ns->dev;
1030 	struct nvme_smart_log *smart_log;
1031 	dma_addr_t dma_addr;
1032 	void *mem;
1033 	u8 temp_c;
1034 	u16 temp_k;
1035 
1036 	log_response = kzalloc(LOG_INFO_EXCP_PAGE_LENGTH, GFP_KERNEL);
1037 	if (log_response == NULL) {
1038 		res = -ENOMEM;
1039 		goto out_mem;
1040 	}
1041 
1042 	mem = dma_alloc_coherent(&dev->pci_dev->dev,
1043 					sizeof(struct nvme_smart_log),
1044 					&dma_addr, GFP_KERNEL);
1045 	if (mem == NULL) {
1046 		res = -ENOMEM;
1047 		goto out_dma;
1048 	}
1049 
1050 	/* Get SMART Log Page */
1051 	memset(&c, 0, sizeof(c));
1052 	c.common.opcode = nvme_admin_get_log_page;
1053 	c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1054 	c.common.prp1 = cpu_to_le64(dma_addr);
1055 	c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
1056 			BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
1057 	res = nvme_submit_admin_cmd(dev, &c, NULL);
1058 	if (res != NVME_SC_SUCCESS) {
1059 		temp_c = LOG_TEMP_UNKNOWN;
1060 	} else {
1061 		smart_log = mem;
1062 		temp_k = (smart_log->temperature[1] << 8) +
1063 				(smart_log->temperature[0]);
1064 		temp_c = temp_k - KELVIN_TEMP_FACTOR;
1065 	}
1066 
1067 	log_response[0] = LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE;
1068 	/* Subpage=0x00, Page Length MSB=0 */
1069 	log_response[3] = REMAINING_INFO_EXCP_PAGE_LENGTH;
1070 	/* Informational Exceptions Log Parameter 1 Start */
1071 	/* Parameter Code=0x0000 bytes 4,5 */
1072 	log_response[6] = 0x23; /* DU=0, TSD=1, ETC=0, TMC=0, FMT_AND_LNK=11b */
1073 	log_response[7] = 0x04; /* PARAMETER LENGTH */
1074 	/* Add sense Code and qualifier = 0x00 each */
1075 	/* Use Temperature from NVMe Get Log Page, convert to C from K */
1076 	log_response[10] = temp_c;
1077 
1078 	xfer_len = min(alloc_len, LOG_INFO_EXCP_PAGE_LENGTH);
1079 	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1080 
1081 	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
1082 			  mem, dma_addr);
1083  out_dma:
1084 	kfree(log_response);
1085  out_mem:
1086 	return res;
1087 }
1088 
nvme_trans_log_temperature(struct nvme_ns * ns,struct sg_io_hdr * hdr,int alloc_len)1089 static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1090 					int alloc_len)
1091 {
1092 	int res = SNTI_TRANSLATION_SUCCESS;
1093 	int xfer_len;
1094 	u8 *log_response;
1095 	struct nvme_command c;
1096 	struct nvme_dev *dev = ns->dev;
1097 	struct nvme_smart_log *smart_log;
1098 	dma_addr_t dma_addr;
1099 	void *mem;
1100 	u32 feature_resp;
1101 	u8 temp_c_cur, temp_c_thresh;
1102 	u16 temp_k;
1103 
1104 	log_response = kzalloc(LOG_TEMP_PAGE_LENGTH, GFP_KERNEL);
1105 	if (log_response == NULL) {
1106 		res = -ENOMEM;
1107 		goto out_mem;
1108 	}
1109 
1110 	mem = dma_alloc_coherent(&dev->pci_dev->dev,
1111 					sizeof(struct nvme_smart_log),
1112 					&dma_addr, GFP_KERNEL);
1113 	if (mem == NULL) {
1114 		res = -ENOMEM;
1115 		goto out_dma;
1116 	}
1117 
1118 	/* Get SMART Log Page */
1119 	memset(&c, 0, sizeof(c));
1120 	c.common.opcode = nvme_admin_get_log_page;
1121 	c.common.nsid = cpu_to_le32(0xFFFFFFFF);
1122 	c.common.prp1 = cpu_to_le64(dma_addr);
1123 	c.common.cdw10[0] = cpu_to_le32((((sizeof(struct nvme_smart_log) /
1124 			BYTES_TO_DWORDS) - 1) << 16) | NVME_LOG_SMART);
1125 	res = nvme_submit_admin_cmd(dev, &c, NULL);
1126 	if (res != NVME_SC_SUCCESS) {
1127 		temp_c_cur = LOG_TEMP_UNKNOWN;
1128 	} else {
1129 		smart_log = mem;
1130 		temp_k = (smart_log->temperature[1] << 8) +
1131 				(smart_log->temperature[0]);
1132 		temp_c_cur = temp_k - KELVIN_TEMP_FACTOR;
1133 	}
1134 
1135 	/* Get Features for Temp Threshold */
1136 	res = nvme_get_features(dev, NVME_FEAT_TEMP_THRESH, 0, 0,
1137 								&feature_resp);
1138 	if (res != NVME_SC_SUCCESS)
1139 		temp_c_thresh = LOG_TEMP_UNKNOWN;
1140 	else
1141 		temp_c_thresh = (feature_resp & 0xFFFF) - KELVIN_TEMP_FACTOR;
1142 
1143 	log_response[0] = LOG_PAGE_TEMPERATURE_PAGE;
1144 	/* Subpage=0x00, Page Length MSB=0 */
1145 	log_response[3] = REMAINING_TEMP_PAGE_LENGTH;
1146 	/* Temperature Log Parameter 1 (Temperature) Start */
1147 	/* Parameter Code = 0x0000 */
1148 	log_response[6] = 0x01;		/* Format and Linking = 01b */
1149 	log_response[7] = 0x02;		/* Parameter Length */
1150 	/* Use Temperature from NVMe Get Log Page, convert to C from K */
1151 	log_response[9] = temp_c_cur;
1152 	/* Temperature Log Parameter 2 (Reference Temperature) Start */
1153 	log_response[11] = 0x01;	/* Parameter Code = 0x0001 */
1154 	log_response[12] = 0x01;	/* Format and Linking = 01b */
1155 	log_response[13] = 0x02;	/* Parameter Length */
1156 	/* Use Temperature Thresh from NVMe Get Log Page, convert to C from K */
1157 	log_response[15] = temp_c_thresh;
1158 
1159 	xfer_len = min(alloc_len, LOG_TEMP_PAGE_LENGTH);
1160 	res = nvme_trans_copy_to_user(hdr, log_response, xfer_len);
1161 
1162 	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_smart_log),
1163 			  mem, dma_addr);
1164  out_dma:
1165 	kfree(log_response);
1166  out_mem:
1167 	return res;
1168 }
1169 
1170 /* MODE SENSE Helper Functions */
1171 
nvme_trans_fill_mode_parm_hdr(u8 * resp,int len,u8 cdb10,u8 llbaa,u16 mode_data_length,u16 blk_desc_len)1172 static int nvme_trans_fill_mode_parm_hdr(u8 *resp, int len, u8 cdb10, u8 llbaa,
1173 					u16 mode_data_length, u16 blk_desc_len)
1174 {
1175 	/* Quick check to make sure I don't stomp on my own memory... */
1176 	if ((cdb10 && len < 8) || (!cdb10 && len < 4))
1177 		return SNTI_INTERNAL_ERROR;
1178 
1179 	if (cdb10) {
1180 		resp[0] = (mode_data_length & 0xFF00) >> 8;
1181 		resp[1] = (mode_data_length & 0x00FF);
1182 		/* resp[2] and [3] are zero */
1183 		resp[4] = llbaa;
1184 		resp[5] = RESERVED_FIELD;
1185 		resp[6] = (blk_desc_len & 0xFF00) >> 8;
1186 		resp[7] = (blk_desc_len & 0x00FF);
1187 	} else {
1188 		resp[0] = (mode_data_length & 0x00FF);
1189 		/* resp[1] and [2] are zero */
1190 		resp[3] = (blk_desc_len & 0x00FF);
1191 	}
1192 
1193 	return SNTI_TRANSLATION_SUCCESS;
1194 }
1195 
nvme_trans_fill_blk_desc(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len,u8 llbaa)1196 static int nvme_trans_fill_blk_desc(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1197 				    u8 *resp, int len, u8 llbaa)
1198 {
1199 	int res = SNTI_TRANSLATION_SUCCESS;
1200 	int nvme_sc;
1201 	struct nvme_dev *dev = ns->dev;
1202 	dma_addr_t dma_addr;
1203 	void *mem;
1204 	struct nvme_id_ns *id_ns;
1205 	u8 flbas;
1206 	u32 lba_length;
1207 
1208 	if (llbaa == 0 && len < MODE_PAGE_BLK_DES_LEN)
1209 		return SNTI_INTERNAL_ERROR;
1210 	else if (llbaa > 0 && len < MODE_PAGE_LLBAA_BLK_DES_LEN)
1211 		return SNTI_INTERNAL_ERROR;
1212 
1213 	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1214 							&dma_addr, GFP_KERNEL);
1215 	if (mem == NULL) {
1216 		res = -ENOMEM;
1217 		goto out;
1218 	}
1219 
1220 	/* nvme ns identify */
1221 	nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1222 	res = nvme_trans_status_code(hdr, nvme_sc);
1223 	if (res)
1224 		goto out_dma;
1225 	if (nvme_sc) {
1226 		res = nvme_sc;
1227 		goto out_dma;
1228 	}
1229 	id_ns = mem;
1230 	flbas = (id_ns->flbas) & 0x0F;
1231 	lba_length = (1 << (id_ns->lbaf[flbas].ds));
1232 
1233 	if (llbaa == 0) {
1234 		__be32 tmp_cap = cpu_to_be32(le64_to_cpu(id_ns->ncap));
1235 		/* Byte 4 is reserved */
1236 		__be32 tmp_len = cpu_to_be32(lba_length & 0x00FFFFFF);
1237 
1238 		memcpy(resp, &tmp_cap, sizeof(u32));
1239 		memcpy(&resp[4], &tmp_len, sizeof(u32));
1240 	} else {
1241 		__be64 tmp_cap = cpu_to_be64(le64_to_cpu(id_ns->ncap));
1242 		__be32 tmp_len = cpu_to_be32(lba_length);
1243 
1244 		memcpy(resp, &tmp_cap, sizeof(u64));
1245 		/* Bytes 8, 9, 10, 11 are reserved */
1246 		memcpy(&resp[12], &tmp_len, sizeof(u32));
1247 	}
1248 
1249  out_dma:
1250 	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
1251 			  dma_addr);
1252  out:
1253 	return res;
1254 }
1255 
nvme_trans_fill_control_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len)1256 static int nvme_trans_fill_control_page(struct nvme_ns *ns,
1257 					struct sg_io_hdr *hdr, u8 *resp,
1258 					int len)
1259 {
1260 	if (len < MODE_PAGE_CONTROL_LEN)
1261 		return SNTI_INTERNAL_ERROR;
1262 
1263 	resp[0] = MODE_PAGE_CONTROL;
1264 	resp[1] = MODE_PAGE_CONTROL_LEN_FIELD;
1265 	resp[2] = 0x0E;		/* TST=000b, TMF_ONLY=0, DPICZ=1,
1266 				 * D_SENSE=1, GLTSD=1, RLEC=0 */
1267 	resp[3] = 0x12;		/* Q_ALGO_MODIFIER=1h, NUAR=0, QERR=01b */
1268 	/* Byte 4:  VS=0, RAC=0, UA_INT=0, SWP=0 */
1269 	resp[5] = 0x40;		/* ATO=0, TAS=1, ATMPE=0, RWWP=0, AUTOLOAD=0 */
1270 	/* resp[6] and [7] are obsolete, thus zero */
1271 	resp[8] = 0xFF;		/* Busy timeout period = 0xffff */
1272 	resp[9] = 0xFF;
1273 	/* Bytes 10,11: Extended selftest completion time = 0x0000 */
1274 
1275 	return SNTI_TRANSLATION_SUCCESS;
1276 }
1277 
nvme_trans_fill_caching_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len)1278 static int nvme_trans_fill_caching_page(struct nvme_ns *ns,
1279 					struct sg_io_hdr *hdr,
1280 					u8 *resp, int len)
1281 {
1282 	int res = SNTI_TRANSLATION_SUCCESS;
1283 	int nvme_sc;
1284 	struct nvme_dev *dev = ns->dev;
1285 	u32 feature_resp;
1286 	u8 vwc;
1287 
1288 	if (len < MODE_PAGE_CACHING_LEN)
1289 		return SNTI_INTERNAL_ERROR;
1290 
1291 	nvme_sc = nvme_get_features(dev, NVME_FEAT_VOLATILE_WC, 0, 0,
1292 								&feature_resp);
1293 	res = nvme_trans_status_code(hdr, nvme_sc);
1294 	if (res)
1295 		goto out;
1296 	if (nvme_sc) {
1297 		res = nvme_sc;
1298 		goto out;
1299 	}
1300 	vwc = feature_resp & 0x00000001;
1301 
1302 	resp[0] = MODE_PAGE_CACHING;
1303 	resp[1] = MODE_PAGE_CACHING_LEN_FIELD;
1304 	resp[2] = vwc << 2;
1305 
1306  out:
1307 	return res;
1308 }
1309 
nvme_trans_fill_pow_cnd_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len)1310 static int nvme_trans_fill_pow_cnd_page(struct nvme_ns *ns,
1311 					struct sg_io_hdr *hdr, u8 *resp,
1312 					int len)
1313 {
1314 	int res = SNTI_TRANSLATION_SUCCESS;
1315 
1316 	if (len < MODE_PAGE_POW_CND_LEN)
1317 		return SNTI_INTERNAL_ERROR;
1318 
1319 	resp[0] = MODE_PAGE_POWER_CONDITION;
1320 	resp[1] = MODE_PAGE_POW_CND_LEN_FIELD;
1321 	/* All other bytes are zero */
1322 
1323 	return res;
1324 }
1325 
nvme_trans_fill_inf_exc_page(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len)1326 static int nvme_trans_fill_inf_exc_page(struct nvme_ns *ns,
1327 					struct sg_io_hdr *hdr, u8 *resp,
1328 					int len)
1329 {
1330 	int res = SNTI_TRANSLATION_SUCCESS;
1331 
1332 	if (len < MODE_PAGE_INF_EXC_LEN)
1333 		return SNTI_INTERNAL_ERROR;
1334 
1335 	resp[0] = MODE_PAGE_INFO_EXCEP;
1336 	resp[1] = MODE_PAGE_INF_EXC_LEN_FIELD;
1337 	resp[2] = 0x88;
1338 	/* All other bytes are zero */
1339 
1340 	return res;
1341 }
1342 
nvme_trans_fill_all_pages(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * resp,int len)1343 static int nvme_trans_fill_all_pages(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1344 				     u8 *resp, int len)
1345 {
1346 	int res = SNTI_TRANSLATION_SUCCESS;
1347 	u16 mode_pages_offset_1 = 0;
1348 	u16 mode_pages_offset_2, mode_pages_offset_3, mode_pages_offset_4;
1349 
1350 	mode_pages_offset_2 = mode_pages_offset_1 + MODE_PAGE_CACHING_LEN;
1351 	mode_pages_offset_3 = mode_pages_offset_2 + MODE_PAGE_CONTROL_LEN;
1352 	mode_pages_offset_4 = mode_pages_offset_3 + MODE_PAGE_POW_CND_LEN;
1353 
1354 	res = nvme_trans_fill_caching_page(ns, hdr, &resp[mode_pages_offset_1],
1355 					MODE_PAGE_CACHING_LEN);
1356 	if (res != SNTI_TRANSLATION_SUCCESS)
1357 		goto out;
1358 	res = nvme_trans_fill_control_page(ns, hdr, &resp[mode_pages_offset_2],
1359 					MODE_PAGE_CONTROL_LEN);
1360 	if (res != SNTI_TRANSLATION_SUCCESS)
1361 		goto out;
1362 	res = nvme_trans_fill_pow_cnd_page(ns, hdr, &resp[mode_pages_offset_3],
1363 					MODE_PAGE_POW_CND_LEN);
1364 	if (res != SNTI_TRANSLATION_SUCCESS)
1365 		goto out;
1366 	res = nvme_trans_fill_inf_exc_page(ns, hdr, &resp[mode_pages_offset_4],
1367 					MODE_PAGE_INF_EXC_LEN);
1368 	if (res != SNTI_TRANSLATION_SUCCESS)
1369 		goto out;
1370 
1371  out:
1372 	return res;
1373 }
1374 
nvme_trans_get_blk_desc_len(u8 dbd,u8 llbaa)1375 static inline int nvme_trans_get_blk_desc_len(u8 dbd, u8 llbaa)
1376 {
1377 	if (dbd == MODE_SENSE_BLK_DESC_ENABLED) {
1378 		/* SPC-4: len = 8 x Num_of_descriptors if llbaa = 0, 16x if 1 */
1379 		return 8 * (llbaa + 1) * MODE_SENSE_BLK_DESC_COUNT;
1380 	} else {
1381 		return 0;
1382 	}
1383 }
1384 
nvme_trans_mode_page_create(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd,u16 alloc_len,u8 cdb10,int (* mode_page_fill_func)(struct nvme_ns *,struct sg_io_hdr * hdr,u8 *,int),u16 mode_pages_tot_len)1385 static int nvme_trans_mode_page_create(struct nvme_ns *ns,
1386 					struct sg_io_hdr *hdr, u8 *cmd,
1387 					u16 alloc_len, u8 cdb10,
1388 					int (*mode_page_fill_func)
1389 					(struct nvme_ns *,
1390 					struct sg_io_hdr *hdr, u8 *, int),
1391 					u16 mode_pages_tot_len)
1392 {
1393 	int res = SNTI_TRANSLATION_SUCCESS;
1394 	int xfer_len;
1395 	u8 *response;
1396 	u8 dbd, llbaa;
1397 	u16 resp_size;
1398 	int mph_size;
1399 	u16 mode_pages_offset_1;
1400 	u16 blk_desc_len, blk_desc_offset, mode_data_length;
1401 
1402 	dbd = GET_MODE_SENSE_DBD(cmd);
1403 	llbaa = GET_MODE_SENSE_LLBAA(cmd);
1404 	mph_size = GET_MODE_SENSE_MPH_SIZE(cdb10);
1405 	blk_desc_len = nvme_trans_get_blk_desc_len(dbd, llbaa);
1406 
1407 	resp_size = mph_size + blk_desc_len + mode_pages_tot_len;
1408 	/* Refer spc4r34 Table 440 for calculation of Mode data Length field */
1409 	mode_data_length = 3 + (3 * cdb10) + blk_desc_len + mode_pages_tot_len;
1410 
1411 	blk_desc_offset = mph_size;
1412 	mode_pages_offset_1 = blk_desc_offset + blk_desc_len;
1413 
1414 	response = kzalloc(resp_size, GFP_KERNEL);
1415 	if (response == NULL) {
1416 		res = -ENOMEM;
1417 		goto out_mem;
1418 	}
1419 
1420 	res = nvme_trans_fill_mode_parm_hdr(&response[0], mph_size, cdb10,
1421 					llbaa, mode_data_length, blk_desc_len);
1422 	if (res != SNTI_TRANSLATION_SUCCESS)
1423 		goto out_free;
1424 	if (blk_desc_len > 0) {
1425 		res = nvme_trans_fill_blk_desc(ns, hdr,
1426 					       &response[blk_desc_offset],
1427 					       blk_desc_len, llbaa);
1428 		if (res != SNTI_TRANSLATION_SUCCESS)
1429 			goto out_free;
1430 	}
1431 	res = mode_page_fill_func(ns, hdr, &response[mode_pages_offset_1],
1432 					mode_pages_tot_len);
1433 	if (res != SNTI_TRANSLATION_SUCCESS)
1434 		goto out_free;
1435 
1436 	xfer_len = min(alloc_len, resp_size);
1437 	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
1438 
1439  out_free:
1440 	kfree(response);
1441  out_mem:
1442 	return res;
1443 }
1444 
1445 /* Read Capacity Helper Functions */
1446 
nvme_trans_fill_read_cap(u8 * response,struct nvme_id_ns * id_ns,u8 cdb16)1447 static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns,
1448 								u8 cdb16)
1449 {
1450 	u8 flbas;
1451 	u32 lba_length;
1452 	u64 rlba;
1453 	u8 prot_en;
1454 	u8 p_type_lut[4] = {0, 0, 1, 2};
1455 	__be64 tmp_rlba;
1456 	__be32 tmp_rlba_32;
1457 	__be32 tmp_len;
1458 
1459 	flbas = (id_ns->flbas) & 0x0F;
1460 	lba_length = (1 << (id_ns->lbaf[flbas].ds));
1461 	rlba = le64_to_cpup(&id_ns->nsze) - 1;
1462 	(id_ns->dps) ? (prot_en = 0x01) : (prot_en = 0);
1463 
1464 	if (!cdb16) {
1465 		if (rlba > 0xFFFFFFFF)
1466 			rlba = 0xFFFFFFFF;
1467 		tmp_rlba_32 = cpu_to_be32(rlba);
1468 		tmp_len = cpu_to_be32(lba_length);
1469 		memcpy(response, &tmp_rlba_32, sizeof(u32));
1470 		memcpy(&response[4], &tmp_len, sizeof(u32));
1471 	} else {
1472 		tmp_rlba = cpu_to_be64(rlba);
1473 		tmp_len = cpu_to_be32(lba_length);
1474 		memcpy(response, &tmp_rlba, sizeof(u64));
1475 		memcpy(&response[8], &tmp_len, sizeof(u32));
1476 		response[12] = (p_type_lut[id_ns->dps & 0x3] << 1) | prot_en;
1477 		/* P_I_Exponent = 0x0 | LBPPBE = 0x0 */
1478 		/* LBPME = 0 | LBPRZ = 0 | LALBA = 0x00 */
1479 		/* Bytes 16-31 - Reserved */
1480 	}
1481 }
1482 
1483 /* Start Stop Unit Helper Functions */
1484 
nvme_trans_power_state(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 pc,u8 pcmod,u8 start)1485 static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1486 						u8 pc, u8 pcmod, u8 start)
1487 {
1488 	int res = SNTI_TRANSLATION_SUCCESS;
1489 	int nvme_sc;
1490 	struct nvme_dev *dev = ns->dev;
1491 	dma_addr_t dma_addr;
1492 	void *mem;
1493 	struct nvme_id_ctrl *id_ctrl;
1494 	int lowest_pow_st;	/* max npss = lowest power consumption */
1495 	unsigned ps_desired = 0;
1496 
1497 	/* NVMe Controller Identify */
1498 	mem = dma_alloc_coherent(&dev->pci_dev->dev,
1499 				sizeof(struct nvme_id_ctrl),
1500 				&dma_addr, GFP_KERNEL);
1501 	if (mem == NULL) {
1502 		res = -ENOMEM;
1503 		goto out;
1504 	}
1505 	nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
1506 	res = nvme_trans_status_code(hdr, nvme_sc);
1507 	if (res)
1508 		goto out_dma;
1509 	if (nvme_sc) {
1510 		res = nvme_sc;
1511 		goto out_dma;
1512 	}
1513 	id_ctrl = mem;
1514 	lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1));
1515 
1516 	switch (pc) {
1517 	case NVME_POWER_STATE_START_VALID:
1518 		/* Action unspecified if POWER CONDITION MODIFIER != 0 */
1519 		if (pcmod == 0 && start == 0x1)
1520 			ps_desired = POWER_STATE_0;
1521 		if (pcmod == 0 && start == 0x0)
1522 			ps_desired = lowest_pow_st;
1523 		break;
1524 	case NVME_POWER_STATE_ACTIVE:
1525 		/* Action unspecified if POWER CONDITION MODIFIER != 0 */
1526 		if (pcmod == 0)
1527 			ps_desired = POWER_STATE_0;
1528 		break;
1529 	case NVME_POWER_STATE_IDLE:
1530 		/* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */
1531 		if (pcmod == 0x0)
1532 			ps_desired = POWER_STATE_1;
1533 		else if (pcmod == 0x1)
1534 			ps_desired = POWER_STATE_2;
1535 		else if (pcmod == 0x2)
1536 			ps_desired = POWER_STATE_3;
1537 		break;
1538 	case NVME_POWER_STATE_STANDBY:
1539 		/* Action unspecified if POWER CONDITION MODIFIER != [0,1] */
1540 		if (pcmod == 0x0)
1541 			ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2));
1542 		else if (pcmod == 0x1)
1543 			ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1));
1544 		break;
1545 	case NVME_POWER_STATE_LU_CONTROL:
1546 	default:
1547 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1548 				ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1549 				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1550 		break;
1551 	}
1552 	nvme_sc = nvme_set_features(dev, NVME_FEAT_POWER_MGMT, ps_desired, 0,
1553 				    NULL);
1554 	res = nvme_trans_status_code(hdr, nvme_sc);
1555 	if (res)
1556 		goto out_dma;
1557 	if (nvme_sc)
1558 		res = nvme_sc;
1559  out_dma:
1560 	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
1561 			  dma_addr);
1562  out:
1563 	return res;
1564 }
1565 
1566 /* Write Buffer Helper Functions */
1567 /* Also using this for Format Unit with hdr passed as NULL, and buffer_id, 0 */
1568 
nvme_trans_send_fw_cmd(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 opcode,u32 tot_len,u32 offset,u8 buffer_id)1569 static int nvme_trans_send_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1570 					u8 opcode, u32 tot_len, u32 offset,
1571 					u8 buffer_id)
1572 {
1573 	int res = SNTI_TRANSLATION_SUCCESS;
1574 	int nvme_sc;
1575 	struct nvme_dev *dev = ns->dev;
1576 	struct nvme_command c;
1577 	struct nvme_iod *iod = NULL;
1578 	unsigned length;
1579 
1580 	memset(&c, 0, sizeof(c));
1581 	c.common.opcode = opcode;
1582 	if (opcode == nvme_admin_download_fw) {
1583 		if (hdr->iovec_count > 0) {
1584 			/* Assuming SGL is not allowed for this command */
1585 			res = nvme_trans_completion(hdr,
1586 						SAM_STAT_CHECK_CONDITION,
1587 						ILLEGAL_REQUEST,
1588 						SCSI_ASC_INVALID_CDB,
1589 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1590 			goto out;
1591 		}
1592 		iod = nvme_map_user_pages(dev, DMA_TO_DEVICE,
1593 				(unsigned long)hdr->dxferp, tot_len);
1594 		if (IS_ERR(iod)) {
1595 			res = PTR_ERR(iod);
1596 			goto out;
1597 		}
1598 		length = nvme_setup_prps(dev, iod, tot_len, GFP_KERNEL);
1599 		if (length != tot_len) {
1600 			res = -ENOMEM;
1601 			goto out_unmap;
1602 		}
1603 
1604 		c.dlfw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
1605 		c.dlfw.prp2 = cpu_to_le64(iod->first_dma);
1606 		c.dlfw.numd = cpu_to_le32((tot_len/BYTES_TO_DWORDS) - 1);
1607 		c.dlfw.offset = cpu_to_le32(offset/BYTES_TO_DWORDS);
1608 	} else if (opcode == nvme_admin_activate_fw) {
1609 		u32 cdw10 = buffer_id | NVME_FWACT_REPL_ACTV;
1610 		c.common.cdw10[0] = cpu_to_le32(cdw10);
1611 	}
1612 
1613 	nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
1614 	res = nvme_trans_status_code(hdr, nvme_sc);
1615 	if (res)
1616 		goto out_unmap;
1617 	if (nvme_sc)
1618 		res = nvme_sc;
1619 
1620  out_unmap:
1621 	if (opcode == nvme_admin_download_fw) {
1622 		nvme_unmap_user_pages(dev, DMA_TO_DEVICE, iod);
1623 		nvme_free_iod(dev, iod);
1624 	}
1625  out:
1626 	return res;
1627 }
1628 
1629 /* Mode Select Helper Functions */
1630 
nvme_trans_modesel_get_bd_len(u8 * parm_list,u8 cdb10,u16 * bd_len,u8 * llbaa)1631 static inline void nvme_trans_modesel_get_bd_len(u8 *parm_list, u8 cdb10,
1632 						u16 *bd_len, u8 *llbaa)
1633 {
1634 	if (cdb10) {
1635 		/* 10 Byte CDB */
1636 		*bd_len = (parm_list[MODE_SELECT_10_BD_OFFSET] << 8) +
1637 			parm_list[MODE_SELECT_10_BD_OFFSET + 1];
1638 		*llbaa = parm_list[MODE_SELECT_10_LLBAA_OFFSET] &
1639 				MODE_SELECT_10_LLBAA_MASK;
1640 	} else {
1641 		/* 6 Byte CDB */
1642 		*bd_len = parm_list[MODE_SELECT_6_BD_OFFSET];
1643 	}
1644 }
1645 
nvme_trans_modesel_save_bd(struct nvme_ns * ns,u8 * parm_list,u16 idx,u16 bd_len,u8 llbaa)1646 static void nvme_trans_modesel_save_bd(struct nvme_ns *ns, u8 *parm_list,
1647 					u16 idx, u16 bd_len, u8 llbaa)
1648 {
1649 	u16 bd_num;
1650 
1651 	bd_num = bd_len / ((llbaa == 0) ?
1652 			SHORT_DESC_BLOCK : LONG_DESC_BLOCK);
1653 	/* Store block descriptor info if a FORMAT UNIT comes later */
1654 	/* TODO Saving 1st BD info; what to do if multiple BD received? */
1655 	if (llbaa == 0) {
1656 		/* Standard Block Descriptor - spc4r34 7.5.5.1 */
1657 		ns->mode_select_num_blocks =
1658 				(parm_list[idx + 1] << 16) +
1659 				(parm_list[idx + 2] << 8) +
1660 				(parm_list[idx + 3]);
1661 
1662 		ns->mode_select_block_len =
1663 				(parm_list[idx + 5] << 16) +
1664 				(parm_list[idx + 6] << 8) +
1665 				(parm_list[idx + 7]);
1666 	} else {
1667 		/* Long LBA Block Descriptor - sbc3r27 6.4.2.3 */
1668 		ns->mode_select_num_blocks =
1669 				(((u64)parm_list[idx + 0]) << 56) +
1670 				(((u64)parm_list[idx + 1]) << 48) +
1671 				(((u64)parm_list[idx + 2]) << 40) +
1672 				(((u64)parm_list[idx + 3]) << 32) +
1673 				(((u64)parm_list[idx + 4]) << 24) +
1674 				(((u64)parm_list[idx + 5]) << 16) +
1675 				(((u64)parm_list[idx + 6]) << 8) +
1676 				((u64)parm_list[idx + 7]);
1677 
1678 		ns->mode_select_block_len =
1679 				(parm_list[idx + 12] << 24) +
1680 				(parm_list[idx + 13] << 16) +
1681 				(parm_list[idx + 14] << 8) +
1682 				(parm_list[idx + 15]);
1683 	}
1684 }
1685 
nvme_trans_modesel_get_mp(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * mode_page,u8 page_code)1686 static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1687 					u8 *mode_page, u8 page_code)
1688 {
1689 	int res = SNTI_TRANSLATION_SUCCESS;
1690 	int nvme_sc;
1691 	struct nvme_dev *dev = ns->dev;
1692 	unsigned dword11;
1693 
1694 	switch (page_code) {
1695 	case MODE_PAGE_CACHING:
1696 		dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0);
1697 		nvme_sc = nvme_set_features(dev, NVME_FEAT_VOLATILE_WC, dword11,
1698 					    0, NULL);
1699 		res = nvme_trans_status_code(hdr, nvme_sc);
1700 		if (res)
1701 			break;
1702 		if (nvme_sc) {
1703 			res = nvme_sc;
1704 			break;
1705 		}
1706 		break;
1707 	case MODE_PAGE_CONTROL:
1708 		break;
1709 	case MODE_PAGE_POWER_CONDITION:
1710 		/* Verify the OS is not trying to set timers */
1711 		if ((mode_page[2] & 0x01) != 0 || (mode_page[3] & 0x0F) != 0) {
1712 			res = nvme_trans_completion(hdr,
1713 						SAM_STAT_CHECK_CONDITION,
1714 						ILLEGAL_REQUEST,
1715 						SCSI_ASC_INVALID_PARAMETER,
1716 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1717 			if (!res)
1718 				res = SNTI_INTERNAL_ERROR;
1719 			break;
1720 		}
1721 		break;
1722 	default:
1723 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1724 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1725 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1726 		if (!res)
1727 			res = SNTI_INTERNAL_ERROR;
1728 		break;
1729 	}
1730 
1731 	return res;
1732 }
1733 
nvme_trans_modesel_data(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd,u16 parm_list_len,u8 pf,u8 sp,u8 cdb10)1734 static int nvme_trans_modesel_data(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1735 					u8 *cmd, u16 parm_list_len, u8 pf,
1736 					u8 sp, u8 cdb10)
1737 {
1738 	int res = SNTI_TRANSLATION_SUCCESS;
1739 	u8 *parm_list;
1740 	u16 bd_len;
1741 	u8 llbaa = 0;
1742 	u16 index, saved_index;
1743 	u8 page_code;
1744 	u16 mp_size;
1745 
1746 	/* Get parm list from data-in/out buffer */
1747 	parm_list = kmalloc(parm_list_len, GFP_KERNEL);
1748 	if (parm_list == NULL) {
1749 		res = -ENOMEM;
1750 		goto out;
1751 	}
1752 
1753 	res = nvme_trans_copy_from_user(hdr, parm_list, parm_list_len);
1754 	if (res != SNTI_TRANSLATION_SUCCESS)
1755 		goto out_mem;
1756 
1757 	nvme_trans_modesel_get_bd_len(parm_list, cdb10, &bd_len, &llbaa);
1758 	index = (cdb10) ? (MODE_SELECT_10_MPH_SIZE) : (MODE_SELECT_6_MPH_SIZE);
1759 
1760 	if (bd_len != 0) {
1761 		/* Block Descriptors present, parse */
1762 		nvme_trans_modesel_save_bd(ns, parm_list, index, bd_len, llbaa);
1763 		index += bd_len;
1764 	}
1765 	saved_index = index;
1766 
1767 	/* Multiple mode pages may be present; iterate through all */
1768 	/* In 1st Iteration, don't do NVME Command, only check for CDB errors */
1769 	do {
1770 		page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1771 		mp_size = parm_list[index + 1] + 2;
1772 		if ((page_code != MODE_PAGE_CACHING) &&
1773 		    (page_code != MODE_PAGE_CONTROL) &&
1774 		    (page_code != MODE_PAGE_POWER_CONDITION)) {
1775 			res = nvme_trans_completion(hdr,
1776 						SAM_STAT_CHECK_CONDITION,
1777 						ILLEGAL_REQUEST,
1778 						SCSI_ASC_INVALID_CDB,
1779 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1780 			goto out_mem;
1781 		}
1782 		index += mp_size;
1783 	} while (index < parm_list_len);
1784 
1785 	/* In 2nd Iteration, do the NVME Commands */
1786 	index = saved_index;
1787 	do {
1788 		page_code = parm_list[index] & MODE_SELECT_PAGE_CODE_MASK;
1789 		mp_size = parm_list[index + 1] + 2;
1790 		res = nvme_trans_modesel_get_mp(ns, hdr, &parm_list[index],
1791 								page_code);
1792 		if (res != SNTI_TRANSLATION_SUCCESS)
1793 			break;
1794 		index += mp_size;
1795 	} while (index < parm_list_len);
1796 
1797  out_mem:
1798 	kfree(parm_list);
1799  out:
1800 	return res;
1801 }
1802 
1803 /* Format Unit Helper Functions */
1804 
nvme_trans_fmt_set_blk_size_count(struct nvme_ns * ns,struct sg_io_hdr * hdr)1805 static int nvme_trans_fmt_set_blk_size_count(struct nvme_ns *ns,
1806 					     struct sg_io_hdr *hdr)
1807 {
1808 	int res = SNTI_TRANSLATION_SUCCESS;
1809 	int nvme_sc;
1810 	struct nvme_dev *dev = ns->dev;
1811 	dma_addr_t dma_addr;
1812 	void *mem;
1813 	struct nvme_id_ns *id_ns;
1814 	u8 flbas;
1815 
1816 	/*
1817 	 * SCSI Expects a MODE SELECT would have been issued prior to
1818 	 * a FORMAT UNIT, and the block size and number would be used
1819 	 * from the block descriptor in it. If a MODE SELECT had not
1820 	 * been issued, FORMAT shall use the current values for both.
1821 	 */
1822 
1823 	if (ns->mode_select_num_blocks == 0 || ns->mode_select_block_len == 0) {
1824 		mem = dma_alloc_coherent(&dev->pci_dev->dev,
1825 			sizeof(struct nvme_id_ns), &dma_addr, GFP_KERNEL);
1826 		if (mem == NULL) {
1827 			res = -ENOMEM;
1828 			goto out;
1829 		}
1830 		/* nvme ns identify */
1831 		nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1832 		res = nvme_trans_status_code(hdr, nvme_sc);
1833 		if (res)
1834 			goto out_dma;
1835 		if (nvme_sc) {
1836 			res = nvme_sc;
1837 			goto out_dma;
1838 		}
1839 		id_ns = mem;
1840 
1841 		if (ns->mode_select_num_blocks == 0)
1842 			ns->mode_select_num_blocks = le64_to_cpu(id_ns->ncap);
1843 		if (ns->mode_select_block_len == 0) {
1844 			flbas = (id_ns->flbas) & 0x0F;
1845 			ns->mode_select_block_len =
1846 						(1 << (id_ns->lbaf[flbas].ds));
1847 		}
1848  out_dma:
1849 		dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1850 				  mem, dma_addr);
1851 	}
1852  out:
1853 	return res;
1854 }
1855 
nvme_trans_fmt_get_parm_header(struct sg_io_hdr * hdr,u8 len,u8 format_prot_info,u8 * nvme_pf_code)1856 static int nvme_trans_fmt_get_parm_header(struct sg_io_hdr *hdr, u8 len,
1857 					u8 format_prot_info, u8 *nvme_pf_code)
1858 {
1859 	int res = SNTI_TRANSLATION_SUCCESS;
1860 	u8 *parm_list;
1861 	u8 pf_usage, pf_code;
1862 
1863 	parm_list = kmalloc(len, GFP_KERNEL);
1864 	if (parm_list == NULL) {
1865 		res = -ENOMEM;
1866 		goto out;
1867 	}
1868 	res = nvme_trans_copy_from_user(hdr, parm_list, len);
1869 	if (res != SNTI_TRANSLATION_SUCCESS)
1870 		goto out_mem;
1871 
1872 	if ((parm_list[FORMAT_UNIT_IMMED_OFFSET] &
1873 				FORMAT_UNIT_IMMED_MASK) != 0) {
1874 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1875 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1876 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1877 		goto out_mem;
1878 	}
1879 
1880 	if (len == FORMAT_UNIT_LONG_PARM_LIST_LEN &&
1881 	    (parm_list[FORMAT_UNIT_PROT_INT_OFFSET] & 0x0F) != 0) {
1882 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1883 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1884 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1885 		goto out_mem;
1886 	}
1887 	pf_usage = parm_list[FORMAT_UNIT_PROT_FIELD_USAGE_OFFSET] &
1888 			FORMAT_UNIT_PROT_FIELD_USAGE_MASK;
1889 	pf_code = (pf_usage << 2) | format_prot_info;
1890 	switch (pf_code) {
1891 	case 0:
1892 		*nvme_pf_code = 0;
1893 		break;
1894 	case 2:
1895 		*nvme_pf_code = 1;
1896 		break;
1897 	case 3:
1898 		*nvme_pf_code = 2;
1899 		break;
1900 	case 7:
1901 		*nvme_pf_code = 3;
1902 		break;
1903 	default:
1904 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1905 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
1906 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1907 		break;
1908 	}
1909 
1910  out_mem:
1911 	kfree(parm_list);
1912  out:
1913 	return res;
1914 }
1915 
nvme_trans_fmt_send_cmd(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 prot_info)1916 static int nvme_trans_fmt_send_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr,
1917 				   u8 prot_info)
1918 {
1919 	int res = SNTI_TRANSLATION_SUCCESS;
1920 	int nvme_sc;
1921 	struct nvme_dev *dev = ns->dev;
1922 	dma_addr_t dma_addr;
1923 	void *mem;
1924 	struct nvme_id_ns *id_ns;
1925 	u8 i;
1926 	u8 flbas, nlbaf;
1927 	u8 selected_lbaf = 0xFF;
1928 	u32 cdw10 = 0;
1929 	struct nvme_command c;
1930 
1931 	/* Loop thru LBAF's in id_ns to match reqd lbaf, put in cdw10 */
1932 	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
1933 							&dma_addr, GFP_KERNEL);
1934 	if (mem == NULL) {
1935 		res = -ENOMEM;
1936 		goto out;
1937 	}
1938 	/* nvme ns identify */
1939 	nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
1940 	res = nvme_trans_status_code(hdr, nvme_sc);
1941 	if (res)
1942 		goto out_dma;
1943 	if (nvme_sc) {
1944 		res = nvme_sc;
1945 		goto out_dma;
1946 	}
1947 	id_ns = mem;
1948 	flbas = (id_ns->flbas) & 0x0F;
1949 	nlbaf = id_ns->nlbaf;
1950 
1951 	for (i = 0; i < nlbaf; i++) {
1952 		if (ns->mode_select_block_len == (1 << (id_ns->lbaf[i].ds))) {
1953 			selected_lbaf = i;
1954 			break;
1955 		}
1956 	}
1957 	if (selected_lbaf > 0x0F) {
1958 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1959 				ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1960 				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1961 	}
1962 	if (ns->mode_select_num_blocks != le64_to_cpu(id_ns->ncap)) {
1963 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
1964 				ILLEGAL_REQUEST, SCSI_ASC_INVALID_PARAMETER,
1965 				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
1966 	}
1967 
1968 	cdw10 |= prot_info << 5;
1969 	cdw10 |= selected_lbaf & 0x0F;
1970 	memset(&c, 0, sizeof(c));
1971 	c.format.opcode = nvme_admin_format_nvm;
1972 	c.format.nsid = cpu_to_le32(ns->ns_id);
1973 	c.format.cdw10 = cpu_to_le32(cdw10);
1974 
1975 	nvme_sc = nvme_submit_admin_cmd(dev, &c, NULL);
1976 	res = nvme_trans_status_code(hdr, nvme_sc);
1977 	if (res)
1978 		goto out_dma;
1979 	if (nvme_sc)
1980 		res = nvme_sc;
1981 
1982  out_dma:
1983 	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
1984 			  dma_addr);
1985  out:
1986 	return res;
1987 }
1988 
1989 /* Read/Write Helper Functions */
1990 
nvme_trans_get_io_cdb6(u8 * cmd,struct nvme_trans_io_cdb * cdb_info)1991 static inline void nvme_trans_get_io_cdb6(u8 *cmd,
1992 					struct nvme_trans_io_cdb *cdb_info)
1993 {
1994 	cdb_info->fua = 0;
1995 	cdb_info->prot_info = 0;
1996 	cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_6_CDB_LBA_OFFSET) &
1997 					IO_6_CDB_LBA_MASK;
1998 	cdb_info->xfer_len = GET_U8_FROM_CDB(cmd, IO_6_CDB_TX_LEN_OFFSET);
1999 
2000 	/* sbc3r27 sec 5.32 - TRANSFER LEN of 0 implies a 256 Block transfer */
2001 	if (cdb_info->xfer_len == 0)
2002 		cdb_info->xfer_len = IO_6_DEFAULT_TX_LEN;
2003 }
2004 
nvme_trans_get_io_cdb10(u8 * cmd,struct nvme_trans_io_cdb * cdb_info)2005 static inline void nvme_trans_get_io_cdb10(u8 *cmd,
2006 					struct nvme_trans_io_cdb *cdb_info)
2007 {
2008 	cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_10_CDB_FUA_OFFSET) &
2009 					IO_CDB_FUA_MASK;
2010 	cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_10_CDB_WP_OFFSET) &
2011 					IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
2012 	cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_10_CDB_LBA_OFFSET);
2013 	cdb_info->xfer_len = GET_U16_FROM_CDB(cmd, IO_10_CDB_TX_LEN_OFFSET);
2014 }
2015 
nvme_trans_get_io_cdb12(u8 * cmd,struct nvme_trans_io_cdb * cdb_info)2016 static inline void nvme_trans_get_io_cdb12(u8 *cmd,
2017 					struct nvme_trans_io_cdb *cdb_info)
2018 {
2019 	cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_12_CDB_FUA_OFFSET) &
2020 					IO_CDB_FUA_MASK;
2021 	cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_12_CDB_WP_OFFSET) &
2022 					IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
2023 	cdb_info->lba = GET_U32_FROM_CDB(cmd, IO_12_CDB_LBA_OFFSET);
2024 	cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_12_CDB_TX_LEN_OFFSET);
2025 }
2026 
nvme_trans_get_io_cdb16(u8 * cmd,struct nvme_trans_io_cdb * cdb_info)2027 static inline void nvme_trans_get_io_cdb16(u8 *cmd,
2028 					struct nvme_trans_io_cdb *cdb_info)
2029 {
2030 	cdb_info->fua = GET_U8_FROM_CDB(cmd, IO_16_CDB_FUA_OFFSET) &
2031 					IO_CDB_FUA_MASK;
2032 	cdb_info->prot_info = GET_U8_FROM_CDB(cmd, IO_16_CDB_WP_OFFSET) &
2033 					IO_CDB_WP_MASK >> IO_CDB_WP_SHIFT;
2034 	cdb_info->lba = GET_U64_FROM_CDB(cmd, IO_16_CDB_LBA_OFFSET);
2035 	cdb_info->xfer_len = GET_U32_FROM_CDB(cmd, IO_16_CDB_TX_LEN_OFFSET);
2036 }
2037 
nvme_trans_io_get_num_cmds(struct sg_io_hdr * hdr,struct nvme_trans_io_cdb * cdb_info,u32 max_blocks)2038 static inline u32 nvme_trans_io_get_num_cmds(struct sg_io_hdr *hdr,
2039 					struct nvme_trans_io_cdb *cdb_info,
2040 					u32 max_blocks)
2041 {
2042 	/* If using iovecs, send one nvme command per vector */
2043 	if (hdr->iovec_count > 0)
2044 		return hdr->iovec_count;
2045 	else if (cdb_info->xfer_len > max_blocks)
2046 		return ((cdb_info->xfer_len - 1) / max_blocks) + 1;
2047 	else
2048 		return 1;
2049 }
2050 
nvme_trans_io_get_control(struct nvme_ns * ns,struct nvme_trans_io_cdb * cdb_info)2051 static u16 nvme_trans_io_get_control(struct nvme_ns *ns,
2052 					struct nvme_trans_io_cdb *cdb_info)
2053 {
2054 	u16 control = 0;
2055 
2056 	/* When Protection information support is added, implement here */
2057 
2058 	if (cdb_info->fua > 0)
2059 		control |= NVME_RW_FUA;
2060 
2061 	return control;
2062 }
2063 
nvme_trans_do_nvme_io(struct nvme_ns * ns,struct sg_io_hdr * hdr,struct nvme_trans_io_cdb * cdb_info,u8 is_write)2064 static int nvme_trans_do_nvme_io(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2065 				struct nvme_trans_io_cdb *cdb_info, u8 is_write)
2066 {
2067 	int res = SNTI_TRANSLATION_SUCCESS;
2068 	int nvme_sc;
2069 	struct nvme_dev *dev = ns->dev;
2070 	u32 num_cmds;
2071 	struct nvme_iod *iod;
2072 	u64 unit_len;
2073 	u64 unit_num_blocks;	/* Number of blocks to xfer in each nvme cmd */
2074 	u32 retcode;
2075 	u32 i = 0;
2076 	u64 nvme_offset = 0;
2077 	void __user *next_mapping_addr;
2078 	struct nvme_command c;
2079 	u8 opcode = (is_write ? nvme_cmd_write : nvme_cmd_read);
2080 	u16 control;
2081 	u32 max_blocks = queue_max_hw_sectors(ns->queue);
2082 
2083 	num_cmds = nvme_trans_io_get_num_cmds(hdr, cdb_info, max_blocks);
2084 
2085 	/*
2086 	 * This loop handles two cases.
2087 	 * First, when an SGL is used in the form of an iovec list:
2088 	 *   - Use iov_base as the next mapping address for the nvme command_id
2089 	 *   - Use iov_len as the data transfer length for the command.
2090 	 * Second, when we have a single buffer
2091 	 *   - If larger than max_blocks, split into chunks, offset
2092 	 *        each nvme command accordingly.
2093 	 */
2094 	for (i = 0; i < num_cmds; i++) {
2095 		memset(&c, 0, sizeof(c));
2096 		if (hdr->iovec_count > 0) {
2097 			struct sg_iovec sgl;
2098 
2099 			retcode = copy_from_user(&sgl, hdr->dxferp +
2100 					i * sizeof(struct sg_iovec),
2101 					sizeof(struct sg_iovec));
2102 			if (retcode)
2103 				return -EFAULT;
2104 			unit_len = sgl.iov_len;
2105 			unit_num_blocks = unit_len >> ns->lba_shift;
2106 			next_mapping_addr = sgl.iov_base;
2107 		} else {
2108 			unit_num_blocks = min((u64)max_blocks,
2109 					(cdb_info->xfer_len - nvme_offset));
2110 			unit_len = unit_num_blocks << ns->lba_shift;
2111 			next_mapping_addr = hdr->dxferp +
2112 					((1 << ns->lba_shift) * nvme_offset);
2113 		}
2114 
2115 		c.rw.opcode = opcode;
2116 		c.rw.nsid = cpu_to_le32(ns->ns_id);
2117 		c.rw.slba = cpu_to_le64(cdb_info->lba + nvme_offset);
2118 		c.rw.length = cpu_to_le16(unit_num_blocks - 1);
2119 		control = nvme_trans_io_get_control(ns, cdb_info);
2120 		c.rw.control = cpu_to_le16(control);
2121 
2122 		iod = nvme_map_user_pages(dev,
2123 			(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2124 			(unsigned long)next_mapping_addr, unit_len);
2125 		if (IS_ERR(iod)) {
2126 			res = PTR_ERR(iod);
2127 			goto out;
2128 		}
2129 		retcode = nvme_setup_prps(dev, iod, unit_len, GFP_KERNEL);
2130 		if (retcode != unit_len) {
2131 			nvme_unmap_user_pages(dev,
2132 				(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2133 				iod);
2134 			nvme_free_iod(dev, iod);
2135 			res = -ENOMEM;
2136 			goto out;
2137 		}
2138 		c.rw.prp1 = cpu_to_le64(sg_dma_address(iod->sg));
2139 		c.rw.prp2 = cpu_to_le64(iod->first_dma);
2140 
2141 		nvme_offset += unit_num_blocks;
2142 
2143 		nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL);
2144 		if (nvme_sc != NVME_SC_SUCCESS) {
2145 			nvme_unmap_user_pages(dev,
2146 				(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2147 				iod);
2148 			nvme_free_iod(dev, iod);
2149 			res = nvme_trans_status_code(hdr, nvme_sc);
2150 			goto out;
2151 		}
2152 		nvme_unmap_user_pages(dev,
2153 				(is_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE,
2154 				iod);
2155 		nvme_free_iod(dev, iod);
2156 	}
2157 	res = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2158 
2159  out:
2160 	return res;
2161 }
2162 
2163 
2164 /* SCSI Command Translation Functions */
2165 
nvme_trans_io(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 is_write,u8 * cmd)2166 static int nvme_trans_io(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 is_write,
2167 							u8 *cmd)
2168 {
2169 	int res = SNTI_TRANSLATION_SUCCESS;
2170 	struct nvme_trans_io_cdb cdb_info;
2171 	u8 opcode = cmd[0];
2172 	u64 xfer_bytes;
2173 	u64 sum_iov_len = 0;
2174 	struct sg_iovec sgl;
2175 	int i;
2176 	size_t not_copied;
2177 
2178 	/* Extract Fields from CDB */
2179 	switch (opcode) {
2180 	case WRITE_6:
2181 	case READ_6:
2182 		nvme_trans_get_io_cdb6(cmd, &cdb_info);
2183 		break;
2184 	case WRITE_10:
2185 	case READ_10:
2186 		nvme_trans_get_io_cdb10(cmd, &cdb_info);
2187 		break;
2188 	case WRITE_12:
2189 	case READ_12:
2190 		nvme_trans_get_io_cdb12(cmd, &cdb_info);
2191 		break;
2192 	case WRITE_16:
2193 	case READ_16:
2194 		nvme_trans_get_io_cdb16(cmd, &cdb_info);
2195 		break;
2196 	default:
2197 		/* Will never really reach here */
2198 		res = SNTI_INTERNAL_ERROR;
2199 		goto out;
2200 	}
2201 
2202 	/* Calculate total length of transfer (in bytes) */
2203 	if (hdr->iovec_count > 0) {
2204 		for (i = 0; i < hdr->iovec_count; i++) {
2205 			not_copied = copy_from_user(&sgl, hdr->dxferp +
2206 						i * sizeof(struct sg_iovec),
2207 						sizeof(struct sg_iovec));
2208 			if (not_copied)
2209 				return -EFAULT;
2210 			sum_iov_len += sgl.iov_len;
2211 			/* IO vector sizes should be multiples of block size */
2212 			if (sgl.iov_len % (1 << ns->lba_shift) != 0) {
2213 				res = nvme_trans_completion(hdr,
2214 						SAM_STAT_CHECK_CONDITION,
2215 						ILLEGAL_REQUEST,
2216 						SCSI_ASC_INVALID_PARAMETER,
2217 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2218 				goto out;
2219 			}
2220 		}
2221 	} else {
2222 		sum_iov_len = hdr->dxfer_len;
2223 	}
2224 
2225 	/* As Per sg ioctl howto, if the lengths differ, use the lower one */
2226 	xfer_bytes = min(((u64)hdr->dxfer_len), sum_iov_len);
2227 
2228 	/* If block count and actual data buffer size dont match, error out */
2229 	if (xfer_bytes != (cdb_info.xfer_len << ns->lba_shift)) {
2230 		res = -EINVAL;
2231 		goto out;
2232 	}
2233 
2234 	/* Check for 0 length transfer - it is not illegal */
2235 	if (cdb_info.xfer_len == 0)
2236 		goto out;
2237 
2238 	/* Send NVMe IO Command(s) */
2239 	res = nvme_trans_do_nvme_io(ns, hdr, &cdb_info, is_write);
2240 	if (res != SNTI_TRANSLATION_SUCCESS)
2241 		goto out;
2242 
2243  out:
2244 	return res;
2245 }
2246 
nvme_trans_inquiry(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2247 static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2248 							u8 *cmd)
2249 {
2250 	int res = SNTI_TRANSLATION_SUCCESS;
2251 	u8 evpd;
2252 	u8 page_code;
2253 	int alloc_len;
2254 	u8 *inq_response;
2255 
2256 	evpd = GET_INQ_EVPD_BIT(cmd);
2257 	page_code = GET_INQ_PAGE_CODE(cmd);
2258 	alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
2259 
2260 	inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
2261 				GFP_KERNEL);
2262 	if (inq_response == NULL) {
2263 		res = -ENOMEM;
2264 		goto out_mem;
2265 	}
2266 
2267 	if (evpd == 0) {
2268 		if (page_code == INQ_STANDARD_INQUIRY_PAGE) {
2269 			res = nvme_trans_standard_inquiry_page(ns, hdr,
2270 						inq_response, alloc_len);
2271 		} else {
2272 			res = nvme_trans_completion(hdr,
2273 						SAM_STAT_CHECK_CONDITION,
2274 						ILLEGAL_REQUEST,
2275 						SCSI_ASC_INVALID_CDB,
2276 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2277 		}
2278 	} else {
2279 		switch (page_code) {
2280 		case VPD_SUPPORTED_PAGES:
2281 			res = nvme_trans_supported_vpd_pages(ns, hdr,
2282 						inq_response, alloc_len);
2283 			break;
2284 		case VPD_SERIAL_NUMBER:
2285 			res = nvme_trans_unit_serial_page(ns, hdr, inq_response,
2286 								alloc_len);
2287 			break;
2288 		case VPD_DEVICE_IDENTIFIERS:
2289 			res = nvme_trans_device_id_page(ns, hdr, inq_response,
2290 								alloc_len);
2291 			break;
2292 		case VPD_EXTENDED_INQUIRY:
2293 			res = nvme_trans_ext_inq_page(ns, hdr, alloc_len);
2294 			break;
2295 		case VPD_BLOCK_LIMITS:
2296 			res = nvme_trans_bdev_limits_page(ns, hdr, inq_response,
2297 								alloc_len);
2298 			break;
2299 		case VPD_BLOCK_DEV_CHARACTERISTICS:
2300 			res = nvme_trans_bdev_char_page(ns, hdr, alloc_len);
2301 			break;
2302 		default:
2303 			res = nvme_trans_completion(hdr,
2304 						SAM_STAT_CHECK_CONDITION,
2305 						ILLEGAL_REQUEST,
2306 						SCSI_ASC_INVALID_CDB,
2307 						SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2308 			break;
2309 		}
2310 	}
2311 	kfree(inq_response);
2312  out_mem:
2313 	return res;
2314 }
2315 
nvme_trans_log_sense(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2316 static int nvme_trans_log_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2317 							u8 *cmd)
2318 {
2319 	int res = SNTI_TRANSLATION_SUCCESS;
2320 	u16 alloc_len;
2321 	u8 sp;
2322 	u8 pc;
2323 	u8 page_code;
2324 
2325 	sp = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_SP_OFFSET);
2326 	if (sp != LOG_SENSE_CDB_SP_NOT_ENABLED) {
2327 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2328 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2329 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2330 		goto out;
2331 	}
2332 	pc = GET_U8_FROM_CDB(cmd, LOG_SENSE_CDB_PC_OFFSET);
2333 	page_code = pc & LOG_SENSE_CDB_PAGE_CODE_MASK;
2334 	pc = (pc & LOG_SENSE_CDB_PC_MASK) >> LOG_SENSE_CDB_PC_SHIFT;
2335 	if (pc != LOG_SENSE_CDB_PC_CUMULATIVE_VALUES) {
2336 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2337 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2338 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2339 		goto out;
2340 	}
2341 	alloc_len = GET_U16_FROM_CDB(cmd, LOG_SENSE_CDB_ALLOC_LENGTH_OFFSET);
2342 	switch (page_code) {
2343 	case LOG_PAGE_SUPPORTED_LOG_PAGES_PAGE:
2344 		res = nvme_trans_log_supp_pages(ns, hdr, alloc_len);
2345 		break;
2346 	case LOG_PAGE_INFORMATIONAL_EXCEPTIONS_PAGE:
2347 		res = nvme_trans_log_info_exceptions(ns, hdr, alloc_len);
2348 		break;
2349 	case LOG_PAGE_TEMPERATURE_PAGE:
2350 		res = nvme_trans_log_temperature(ns, hdr, alloc_len);
2351 		break;
2352 	default:
2353 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2354 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2355 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2356 		break;
2357 	}
2358 
2359  out:
2360 	return res;
2361 }
2362 
nvme_trans_mode_select(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2363 static int nvme_trans_mode_select(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2364 							u8 *cmd)
2365 {
2366 	int res = SNTI_TRANSLATION_SUCCESS;
2367 	u8 cdb10 = 0;
2368 	u16 parm_list_len;
2369 	u8 page_format;
2370 	u8 save_pages;
2371 
2372 	page_format = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_PAGE_FORMAT_OFFSET);
2373 	page_format &= MODE_SELECT_CDB_PAGE_FORMAT_MASK;
2374 
2375 	save_pages = GET_U8_FROM_CDB(cmd, MODE_SELECT_CDB_SAVE_PAGES_OFFSET);
2376 	save_pages &= MODE_SELECT_CDB_SAVE_PAGES_MASK;
2377 
2378 	if (GET_OPCODE(cmd) == MODE_SELECT) {
2379 		parm_list_len = GET_U8_FROM_CDB(cmd,
2380 				MODE_SELECT_6_CDB_PARAM_LIST_LENGTH_OFFSET);
2381 	} else {
2382 		parm_list_len = GET_U16_FROM_CDB(cmd,
2383 				MODE_SELECT_10_CDB_PARAM_LIST_LENGTH_OFFSET);
2384 		cdb10 = 1;
2385 	}
2386 
2387 	if (parm_list_len != 0) {
2388 		/*
2389 		 * According to SPC-4 r24, a paramter list length field of 0
2390 		 * shall not be considered an error
2391 		 */
2392 		res = nvme_trans_modesel_data(ns, hdr, cmd, parm_list_len,
2393 						page_format, save_pages, cdb10);
2394 	}
2395 
2396 	return res;
2397 }
2398 
nvme_trans_mode_sense(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2399 static int nvme_trans_mode_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2400 							u8 *cmd)
2401 {
2402 	int res = SNTI_TRANSLATION_SUCCESS;
2403 	u16 alloc_len;
2404 	u8 cdb10 = 0;
2405 	u8 page_code;
2406 	u8 pc;
2407 
2408 	if (GET_OPCODE(cmd) == MODE_SENSE) {
2409 		alloc_len = GET_U8_FROM_CDB(cmd, MODE_SENSE6_ALLOC_LEN_OFFSET);
2410 	} else {
2411 		alloc_len = GET_U16_FROM_CDB(cmd,
2412 						MODE_SENSE10_ALLOC_LEN_OFFSET);
2413 		cdb10 = 1;
2414 	}
2415 
2416 	pc = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CONTROL_OFFSET) &
2417 						MODE_SENSE_PAGE_CONTROL_MASK;
2418 	if (pc != MODE_SENSE_PC_CURRENT_VALUES) {
2419 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2420 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2421 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2422 		goto out;
2423 	}
2424 
2425 	page_code = GET_U8_FROM_CDB(cmd, MODE_SENSE_PAGE_CODE_OFFSET) &
2426 					MODE_SENSE_PAGE_CODE_MASK;
2427 	switch (page_code) {
2428 	case MODE_PAGE_CACHING:
2429 		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2430 						cdb10,
2431 						&nvme_trans_fill_caching_page,
2432 						MODE_PAGE_CACHING_LEN);
2433 		break;
2434 	case MODE_PAGE_CONTROL:
2435 		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2436 						cdb10,
2437 						&nvme_trans_fill_control_page,
2438 						MODE_PAGE_CONTROL_LEN);
2439 		break;
2440 	case MODE_PAGE_POWER_CONDITION:
2441 		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2442 						cdb10,
2443 						&nvme_trans_fill_pow_cnd_page,
2444 						MODE_PAGE_POW_CND_LEN);
2445 		break;
2446 	case MODE_PAGE_INFO_EXCEP:
2447 		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2448 						cdb10,
2449 						&nvme_trans_fill_inf_exc_page,
2450 						MODE_PAGE_INF_EXC_LEN);
2451 		break;
2452 	case MODE_PAGE_RETURN_ALL:
2453 		res = nvme_trans_mode_page_create(ns, hdr, cmd, alloc_len,
2454 						cdb10,
2455 						&nvme_trans_fill_all_pages,
2456 						MODE_PAGE_ALL_LEN);
2457 		break;
2458 	default:
2459 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2460 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2461 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2462 		break;
2463 	}
2464 
2465  out:
2466 	return res;
2467 }
2468 
nvme_trans_read_capacity(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2469 static int nvme_trans_read_capacity(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2470 							u8 *cmd)
2471 {
2472 	int res = SNTI_TRANSLATION_SUCCESS;
2473 	int nvme_sc;
2474 	u32 alloc_len = READ_CAP_10_RESP_SIZE;
2475 	u32 resp_size = READ_CAP_10_RESP_SIZE;
2476 	u32 xfer_len;
2477 	u8 cdb16;
2478 	struct nvme_dev *dev = ns->dev;
2479 	dma_addr_t dma_addr;
2480 	void *mem;
2481 	struct nvme_id_ns *id_ns;
2482 	u8 *response;
2483 
2484 	cdb16 = IS_READ_CAP_16(cmd);
2485 	if (cdb16) {
2486 		alloc_len = GET_READ_CAP_16_ALLOC_LENGTH(cmd);
2487 		resp_size = READ_CAP_16_RESP_SIZE;
2488 	}
2489 
2490 	mem = dma_alloc_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns),
2491 							&dma_addr, GFP_KERNEL);
2492 	if (mem == NULL) {
2493 		res = -ENOMEM;
2494 		goto out;
2495 	}
2496 	/* nvme ns identify */
2497 	nvme_sc = nvme_identify(dev, ns->ns_id, 0, dma_addr);
2498 	res = nvme_trans_status_code(hdr, nvme_sc);
2499 	if (res)
2500 		goto out_dma;
2501 	if (nvme_sc) {
2502 		res = nvme_sc;
2503 		goto out_dma;
2504 	}
2505 	id_ns = mem;
2506 
2507 	response = kzalloc(resp_size, GFP_KERNEL);
2508 	if (response == NULL) {
2509 		res = -ENOMEM;
2510 		goto out_dma;
2511 	}
2512 	nvme_trans_fill_read_cap(response, id_ns, cdb16);
2513 
2514 	xfer_len = min(alloc_len, resp_size);
2515 	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2516 
2517 	kfree(response);
2518  out_dma:
2519 	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ns), mem,
2520 			  dma_addr);
2521  out:
2522 	return res;
2523 }
2524 
nvme_trans_report_luns(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2525 static int nvme_trans_report_luns(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2526 							u8 *cmd)
2527 {
2528 	int res = SNTI_TRANSLATION_SUCCESS;
2529 	int nvme_sc;
2530 	u32 alloc_len, xfer_len, resp_size;
2531 	u8 select_report;
2532 	u8 *response;
2533 	struct nvme_dev *dev = ns->dev;
2534 	dma_addr_t dma_addr;
2535 	void *mem;
2536 	struct nvme_id_ctrl *id_ctrl;
2537 	u32 ll_length, lun_id;
2538 	u8 lun_id_offset = REPORT_LUNS_FIRST_LUN_OFFSET;
2539 	__be32 tmp_len;
2540 
2541 	alloc_len = GET_REPORT_LUNS_ALLOC_LENGTH(cmd);
2542 	select_report = GET_U8_FROM_CDB(cmd, REPORT_LUNS_SR_OFFSET);
2543 
2544 	if ((select_report != ALL_LUNS_RETURNED) &&
2545 	    (select_report != ALL_WELL_KNOWN_LUNS_RETURNED) &&
2546 	    (select_report != RESTRICTED_LUNS_RETURNED)) {
2547 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2548 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2549 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2550 		goto out;
2551 	} else {
2552 		/* NVMe Controller Identify */
2553 		mem = dma_alloc_coherent(&dev->pci_dev->dev,
2554 					sizeof(struct nvme_id_ctrl),
2555 					&dma_addr, GFP_KERNEL);
2556 		if (mem == NULL) {
2557 			res = -ENOMEM;
2558 			goto out;
2559 		}
2560 		nvme_sc = nvme_identify(dev, 0, 1, dma_addr);
2561 		res = nvme_trans_status_code(hdr, nvme_sc);
2562 		if (res)
2563 			goto out_dma;
2564 		if (nvme_sc) {
2565 			res = nvme_sc;
2566 			goto out_dma;
2567 		}
2568 		id_ctrl = mem;
2569 		ll_length = le32_to_cpu(id_ctrl->nn) * LUN_ENTRY_SIZE;
2570 		resp_size = ll_length + LUN_DATA_HEADER_SIZE;
2571 
2572 		if (alloc_len < resp_size) {
2573 			res = nvme_trans_completion(hdr,
2574 					SAM_STAT_CHECK_CONDITION,
2575 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2576 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2577 			goto out_dma;
2578 		}
2579 
2580 		response = kzalloc(resp_size, GFP_KERNEL);
2581 		if (response == NULL) {
2582 			res = -ENOMEM;
2583 			goto out_dma;
2584 		}
2585 
2586 		/* The first LUN ID will always be 0 per the SAM spec */
2587 		for (lun_id = 0; lun_id < le32_to_cpu(id_ctrl->nn); lun_id++) {
2588 			/*
2589 			 * Set the LUN Id and then increment to the next LUN
2590 			 * location in the parameter data.
2591 			 */
2592 			__be64 tmp_id = cpu_to_be64(lun_id);
2593 			memcpy(&response[lun_id_offset], &tmp_id, sizeof(u64));
2594 			lun_id_offset += LUN_ENTRY_SIZE;
2595 		}
2596 		tmp_len = cpu_to_be32(ll_length);
2597 		memcpy(response, &tmp_len, sizeof(u32));
2598 	}
2599 
2600 	xfer_len = min(alloc_len, resp_size);
2601 	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2602 
2603 	kfree(response);
2604  out_dma:
2605 	dma_free_coherent(&dev->pci_dev->dev, sizeof(struct nvme_id_ctrl), mem,
2606 			  dma_addr);
2607  out:
2608 	return res;
2609 }
2610 
nvme_trans_request_sense(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2611 static int nvme_trans_request_sense(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2612 							u8 *cmd)
2613 {
2614 	int res = SNTI_TRANSLATION_SUCCESS;
2615 	u8 alloc_len, xfer_len, resp_size;
2616 	u8 desc_format;
2617 	u8 *response;
2618 
2619 	alloc_len = GET_REQUEST_SENSE_ALLOC_LENGTH(cmd);
2620 	desc_format = GET_U8_FROM_CDB(cmd, REQUEST_SENSE_DESC_OFFSET);
2621 	desc_format &= REQUEST_SENSE_DESC_MASK;
2622 
2623 	resp_size = ((desc_format) ? (DESC_FMT_SENSE_DATA_SIZE) :
2624 					(FIXED_FMT_SENSE_DATA_SIZE));
2625 	response = kzalloc(resp_size, GFP_KERNEL);
2626 	if (response == NULL) {
2627 		res = -ENOMEM;
2628 		goto out;
2629 	}
2630 
2631 	if (desc_format == DESCRIPTOR_FORMAT_SENSE_DATA_TYPE) {
2632 		/* Descriptor Format Sense Data */
2633 		response[0] = DESC_FORMAT_SENSE_DATA;
2634 		response[1] = NO_SENSE;
2635 		/* TODO How is LOW POWER CONDITION ON handled? (byte 2) */
2636 		response[2] = SCSI_ASC_NO_SENSE;
2637 		response[3] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2638 		/* SDAT_OVFL = 0 | Additional Sense Length = 0 */
2639 	} else {
2640 		/* Fixed Format Sense Data */
2641 		response[0] = FIXED_SENSE_DATA;
2642 		/* Byte 1 = Obsolete */
2643 		response[2] = NO_SENSE; /* FM, EOM, ILI, SDAT_OVFL = 0 */
2644 		/* Bytes 3-6 - Information - set to zero */
2645 		response[7] = FIXED_SENSE_DATA_ADD_LENGTH;
2646 		/* Bytes 8-11 - Cmd Specific Information - set to zero */
2647 		response[12] = SCSI_ASC_NO_SENSE;
2648 		response[13] = SCSI_ASCQ_CAUSE_NOT_REPORTABLE;
2649 		/* Byte 14 = Field Replaceable Unit Code = 0 */
2650 		/* Bytes 15-17 - SKSV=0; Sense Key Specific = 0 */
2651 	}
2652 
2653 	xfer_len = min(alloc_len, resp_size);
2654 	res = nvme_trans_copy_to_user(hdr, response, xfer_len);
2655 
2656 	kfree(response);
2657  out:
2658 	return res;
2659 }
2660 
nvme_trans_security_protocol(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2661 static int nvme_trans_security_protocol(struct nvme_ns *ns,
2662 					struct sg_io_hdr *hdr,
2663 					u8 *cmd)
2664 {
2665 	return nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2666 				ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
2667 				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2668 }
2669 
nvme_trans_start_stop(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2670 static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2671 							u8 *cmd)
2672 {
2673 	int res = SNTI_TRANSLATION_SUCCESS;
2674 	int nvme_sc;
2675 	struct nvme_command c;
2676 	u8 immed, pcmod, pc, no_flush, start;
2677 
2678 	immed = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_IMMED_OFFSET);
2679 	pcmod = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_MOD_OFFSET);
2680 	pc = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_POWER_COND_OFFSET);
2681 	no_flush = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_NO_FLUSH_OFFSET);
2682 	start = GET_U8_FROM_CDB(cmd, START_STOP_UNIT_CDB_START_OFFSET);
2683 
2684 	immed &= START_STOP_UNIT_CDB_IMMED_MASK;
2685 	pcmod &= START_STOP_UNIT_CDB_POWER_COND_MOD_MASK;
2686 	pc = (pc & START_STOP_UNIT_CDB_POWER_COND_MASK) >> NIBBLE_SHIFT;
2687 	no_flush &= START_STOP_UNIT_CDB_NO_FLUSH_MASK;
2688 	start &= START_STOP_UNIT_CDB_START_MASK;
2689 
2690 	if (immed != 0) {
2691 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2692 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2693 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2694 	} else {
2695 		if (no_flush == 0) {
2696 			/* Issue NVME FLUSH command prior to START STOP UNIT */
2697 			memset(&c, 0, sizeof(c));
2698 			c.common.opcode = nvme_cmd_flush;
2699 			c.common.nsid = cpu_to_le32(ns->ns_id);
2700 
2701 			nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL);
2702 			res = nvme_trans_status_code(hdr, nvme_sc);
2703 			if (res)
2704 				goto out;
2705 			if (nvme_sc) {
2706 				res = nvme_sc;
2707 				goto out;
2708 			}
2709 		}
2710 		/* Setup the expected power state transition */
2711 		res = nvme_trans_power_state(ns, hdr, pc, pcmod, start);
2712 	}
2713 
2714  out:
2715 	return res;
2716 }
2717 
nvme_trans_synchronize_cache(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2718 static int nvme_trans_synchronize_cache(struct nvme_ns *ns,
2719 					struct sg_io_hdr *hdr, u8 *cmd)
2720 {
2721 	int res = SNTI_TRANSLATION_SUCCESS;
2722 	int nvme_sc;
2723 	struct nvme_command c;
2724 
2725 	memset(&c, 0, sizeof(c));
2726 	c.common.opcode = nvme_cmd_flush;
2727 	c.common.nsid = cpu_to_le32(ns->ns_id);
2728 
2729 	nvme_sc = nvme_submit_io_cmd(ns->dev, ns, &c, NULL);
2730 
2731 	res = nvme_trans_status_code(hdr, nvme_sc);
2732 	if (res)
2733 		goto out;
2734 	if (nvme_sc)
2735 		res = nvme_sc;
2736 
2737  out:
2738 	return res;
2739 }
2740 
nvme_trans_format_unit(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2741 static int nvme_trans_format_unit(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2742 							u8 *cmd)
2743 {
2744 	int res = SNTI_TRANSLATION_SUCCESS;
2745 	u8 parm_hdr_len = 0;
2746 	u8 nvme_pf_code = 0;
2747 	u8 format_prot_info, long_list, format_data;
2748 
2749 	format_prot_info = GET_U8_FROM_CDB(cmd,
2750 				FORMAT_UNIT_CDB_FORMAT_PROT_INFO_OFFSET);
2751 	long_list = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_LONG_LIST_OFFSET);
2752 	format_data = GET_U8_FROM_CDB(cmd, FORMAT_UNIT_CDB_FORMAT_DATA_OFFSET);
2753 
2754 	format_prot_info = (format_prot_info &
2755 				FORMAT_UNIT_CDB_FORMAT_PROT_INFO_MASK) >>
2756 				FORMAT_UNIT_CDB_FORMAT_PROT_INFO_SHIFT;
2757 	long_list &= FORMAT_UNIT_CDB_LONG_LIST_MASK;
2758 	format_data &= FORMAT_UNIT_CDB_FORMAT_DATA_MASK;
2759 
2760 	if (format_data != 0) {
2761 		if (format_prot_info != 0) {
2762 			if (long_list == 0)
2763 				parm_hdr_len = FORMAT_UNIT_SHORT_PARM_LIST_LEN;
2764 			else
2765 				parm_hdr_len = FORMAT_UNIT_LONG_PARM_LIST_LEN;
2766 		}
2767 	} else if (format_data == 0 && format_prot_info != 0) {
2768 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2769 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2770 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2771 		goto out;
2772 	}
2773 
2774 	/* Get parm header from data-in/out buffer */
2775 	/*
2776 	 * According to the translation spec, the only fields in the parameter
2777 	 * list we are concerned with are in the header. So allocate only that.
2778 	 */
2779 	if (parm_hdr_len > 0) {
2780 		res = nvme_trans_fmt_get_parm_header(hdr, parm_hdr_len,
2781 					format_prot_info, &nvme_pf_code);
2782 		if (res != SNTI_TRANSLATION_SUCCESS)
2783 			goto out;
2784 	}
2785 
2786 	/* Attempt to activate any previously downloaded firmware image */
2787 	res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw, 0, 0, 0);
2788 
2789 	/* Determine Block size and count and send format command */
2790 	res = nvme_trans_fmt_set_blk_size_count(ns, hdr);
2791 	if (res != SNTI_TRANSLATION_SUCCESS)
2792 		goto out;
2793 
2794 	res = nvme_trans_fmt_send_cmd(ns, hdr, nvme_pf_code);
2795 
2796  out:
2797 	return res;
2798 }
2799 
nvme_trans_test_unit_ready(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2800 static int nvme_trans_test_unit_ready(struct nvme_ns *ns,
2801 					struct sg_io_hdr *hdr,
2802 					u8 *cmd)
2803 {
2804 	int res = SNTI_TRANSLATION_SUCCESS;
2805 	struct nvme_dev *dev = ns->dev;
2806 
2807 	if (!(readl(&dev->bar->csts) & NVME_CSTS_RDY))
2808 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2809 					    NOT_READY, SCSI_ASC_LUN_NOT_READY,
2810 					    SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2811 	else
2812 		res = nvme_trans_completion(hdr, SAM_STAT_GOOD, NO_SENSE, 0, 0);
2813 
2814 	return res;
2815 }
2816 
nvme_trans_write_buffer(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2817 static int nvme_trans_write_buffer(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2818 							u8 *cmd)
2819 {
2820 	int res = SNTI_TRANSLATION_SUCCESS;
2821 	u32 buffer_offset, parm_list_length;
2822 	u8 buffer_id, mode;
2823 
2824 	parm_list_length =
2825 		GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_PARM_LIST_LENGTH_OFFSET);
2826 	if (parm_list_length % BYTES_TO_DWORDS != 0) {
2827 		/* NVMe expects Firmware file to be a whole number of DWORDS */
2828 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2829 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2830 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2831 		goto out;
2832 	}
2833 	buffer_id = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_ID_OFFSET);
2834 	if (buffer_id > NVME_MAX_FIRMWARE_SLOT) {
2835 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2836 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2837 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2838 		goto out;
2839 	}
2840 	mode = GET_U8_FROM_CDB(cmd, WRITE_BUFFER_CDB_MODE_OFFSET) &
2841 						WRITE_BUFFER_CDB_MODE_MASK;
2842 	buffer_offset =
2843 		GET_U24_FROM_CDB(cmd, WRITE_BUFFER_CDB_BUFFER_OFFSET_OFFSET);
2844 
2845 	switch (mode) {
2846 	case DOWNLOAD_SAVE_ACTIVATE:
2847 		res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
2848 						parm_list_length, buffer_offset,
2849 						buffer_id);
2850 		if (res != SNTI_TRANSLATION_SUCCESS)
2851 			goto out;
2852 		res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
2853 						parm_list_length, buffer_offset,
2854 						buffer_id);
2855 		break;
2856 	case DOWNLOAD_SAVE_DEFER_ACTIVATE:
2857 		res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_download_fw,
2858 						parm_list_length, buffer_offset,
2859 						buffer_id);
2860 		break;
2861 	case ACTIVATE_DEFERRED_MICROCODE:
2862 		res = nvme_trans_send_fw_cmd(ns, hdr, nvme_admin_activate_fw,
2863 						parm_list_length, buffer_offset,
2864 						buffer_id);
2865 		break;
2866 	default:
2867 		res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
2868 					ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB,
2869 					SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
2870 		break;
2871 	}
2872 
2873  out:
2874 	return res;
2875 }
2876 
2877 struct scsi_unmap_blk_desc {
2878 	__be64	slba;
2879 	__be32	nlb;
2880 	u32	resv;
2881 };
2882 
2883 struct scsi_unmap_parm_list {
2884 	__be16	unmap_data_len;
2885 	__be16	unmap_blk_desc_data_len;
2886 	u32	resv;
2887 	struct scsi_unmap_blk_desc desc[0];
2888 };
2889 
nvme_trans_unmap(struct nvme_ns * ns,struct sg_io_hdr * hdr,u8 * cmd)2890 static int nvme_trans_unmap(struct nvme_ns *ns, struct sg_io_hdr *hdr,
2891 							u8 *cmd)
2892 {
2893 	struct nvme_dev *dev = ns->dev;
2894 	struct scsi_unmap_parm_list *plist;
2895 	struct nvme_dsm_range *range;
2896 	struct nvme_command c;
2897 	int i, nvme_sc, res = -ENOMEM;
2898 	u16 ndesc, list_len;
2899 	dma_addr_t dma_addr;
2900 
2901 	list_len = GET_U16_FROM_CDB(cmd, UNMAP_CDB_PARAM_LIST_LENGTH_OFFSET);
2902 	if (!list_len)
2903 		return -EINVAL;
2904 
2905 	plist = kmalloc(list_len, GFP_KERNEL);
2906 	if (!plist)
2907 		return -ENOMEM;
2908 
2909 	res = nvme_trans_copy_from_user(hdr, plist, list_len);
2910 	if (res != SNTI_TRANSLATION_SUCCESS)
2911 		goto out;
2912 
2913 	ndesc = be16_to_cpu(plist->unmap_blk_desc_data_len) >> 4;
2914 	if (!ndesc || ndesc > 256) {
2915 		res = -EINVAL;
2916 		goto out;
2917 	}
2918 
2919 	range = dma_alloc_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
2920 							&dma_addr, GFP_KERNEL);
2921 	if (!range)
2922 		goto out;
2923 
2924 	for (i = 0; i < ndesc; i++) {
2925 		range[i].nlb = cpu_to_le32(be32_to_cpu(plist->desc[i].nlb));
2926 		range[i].slba = cpu_to_le64(be64_to_cpu(plist->desc[i].slba));
2927 		range[i].cattr = 0;
2928 	}
2929 
2930 	memset(&c, 0, sizeof(c));
2931 	c.dsm.opcode = nvme_cmd_dsm;
2932 	c.dsm.nsid = cpu_to_le32(ns->ns_id);
2933 	c.dsm.prp1 = cpu_to_le64(dma_addr);
2934 	c.dsm.nr = cpu_to_le32(ndesc - 1);
2935 	c.dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
2936 
2937 	nvme_sc = nvme_submit_io_cmd(dev, ns, &c, NULL);
2938 	res = nvme_trans_status_code(hdr, nvme_sc);
2939 
2940 	dma_free_coherent(&dev->pci_dev->dev, ndesc * sizeof(*range),
2941 							range, dma_addr);
2942  out:
2943 	kfree(plist);
2944 	return res;
2945 }
2946 
nvme_scsi_translate(struct nvme_ns * ns,struct sg_io_hdr * hdr)2947 static int nvme_scsi_translate(struct nvme_ns *ns, struct sg_io_hdr *hdr)
2948 {
2949 	u8 cmd[BLK_MAX_CDB];
2950 	int retcode;
2951 	unsigned int opcode;
2952 
2953 	if (hdr->cmdp == NULL)
2954 		return -EMSGSIZE;
2955 	if (copy_from_user(cmd, hdr->cmdp, hdr->cmd_len))
2956 		return -EFAULT;
2957 
2958 	/*
2959 	 * Prime the hdr with good status for scsi commands that don't require
2960 	 * an nvme command for translation.
2961 	 */
2962 	retcode = nvme_trans_status_code(hdr, NVME_SC_SUCCESS);
2963 	if (retcode)
2964 		return retcode;
2965 
2966 	opcode = cmd[0];
2967 
2968 	switch (opcode) {
2969 	case READ_6:
2970 	case READ_10:
2971 	case READ_12:
2972 	case READ_16:
2973 		retcode = nvme_trans_io(ns, hdr, 0, cmd);
2974 		break;
2975 	case WRITE_6:
2976 	case WRITE_10:
2977 	case WRITE_12:
2978 	case WRITE_16:
2979 		retcode = nvme_trans_io(ns, hdr, 1, cmd);
2980 		break;
2981 	case INQUIRY:
2982 		retcode = nvme_trans_inquiry(ns, hdr, cmd);
2983 		break;
2984 	case LOG_SENSE:
2985 		retcode = nvme_trans_log_sense(ns, hdr, cmd);
2986 		break;
2987 	case MODE_SELECT:
2988 	case MODE_SELECT_10:
2989 		retcode = nvme_trans_mode_select(ns, hdr, cmd);
2990 		break;
2991 	case MODE_SENSE:
2992 	case MODE_SENSE_10:
2993 		retcode = nvme_trans_mode_sense(ns, hdr, cmd);
2994 		break;
2995 	case READ_CAPACITY:
2996 		retcode = nvme_trans_read_capacity(ns, hdr, cmd);
2997 		break;
2998 	case SERVICE_ACTION_IN_16:
2999 		if (IS_READ_CAP_16(cmd))
3000 			retcode = nvme_trans_read_capacity(ns, hdr, cmd);
3001 		else
3002 			goto out;
3003 		break;
3004 	case REPORT_LUNS:
3005 		retcode = nvme_trans_report_luns(ns, hdr, cmd);
3006 		break;
3007 	case REQUEST_SENSE:
3008 		retcode = nvme_trans_request_sense(ns, hdr, cmd);
3009 		break;
3010 	case SECURITY_PROTOCOL_IN:
3011 	case SECURITY_PROTOCOL_OUT:
3012 		retcode = nvme_trans_security_protocol(ns, hdr, cmd);
3013 		break;
3014 	case START_STOP:
3015 		retcode = nvme_trans_start_stop(ns, hdr, cmd);
3016 		break;
3017 	case SYNCHRONIZE_CACHE:
3018 		retcode = nvme_trans_synchronize_cache(ns, hdr, cmd);
3019 		break;
3020 	case FORMAT_UNIT:
3021 		retcode = nvme_trans_format_unit(ns, hdr, cmd);
3022 		break;
3023 	case TEST_UNIT_READY:
3024 		retcode = nvme_trans_test_unit_ready(ns, hdr, cmd);
3025 		break;
3026 	case WRITE_BUFFER:
3027 		retcode = nvme_trans_write_buffer(ns, hdr, cmd);
3028 		break;
3029 	case UNMAP:
3030 		retcode = nvme_trans_unmap(ns, hdr, cmd);
3031 		break;
3032 	default:
3033  out:
3034 		retcode = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION,
3035 				ILLEGAL_REQUEST, SCSI_ASC_ILLEGAL_COMMAND,
3036 				SCSI_ASCQ_CAUSE_NOT_REPORTABLE);
3037 		break;
3038 	}
3039 	return retcode;
3040 }
3041 
nvme_sg_io(struct nvme_ns * ns,struct sg_io_hdr __user * u_hdr)3042 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr)
3043 {
3044 	struct sg_io_hdr hdr;
3045 	int retcode;
3046 
3047 	if (!capable(CAP_SYS_ADMIN))
3048 		return -EACCES;
3049 	if (copy_from_user(&hdr, u_hdr, sizeof(hdr)))
3050 		return -EFAULT;
3051 	if (hdr.interface_id != 'S')
3052 		return -EINVAL;
3053 	if (hdr.cmd_len > BLK_MAX_CDB)
3054 		return -EINVAL;
3055 
3056 	retcode = nvme_scsi_translate(ns, &hdr);
3057 	if (retcode < 0)
3058 		return retcode;
3059 	if (retcode > 0)
3060 		retcode = SNTI_TRANSLATION_SUCCESS;
3061 	if (copy_to_user(u_hdr, &hdr, sizeof(sg_io_hdr_t)) > 0)
3062 		return -EFAULT;
3063 
3064 	return retcode;
3065 }
3066 
nvme_sg_get_version_num(int __user * ip)3067 int nvme_sg_get_version_num(int __user *ip)
3068 {
3069 	return put_user(sg_version_num, ip);
3070 }
3071