1/* 2 * SCSI Primary Commands (SPC) parsing and emulation. 3 * 4 * (c) Copyright 2002-2013 Datera, Inc. 5 * 6 * Nicholas A. Bellinger <nab@kernel.org> 7 * 8 * This program is free software; you can redistribute it and/or modify 9 * it under the terms of the GNU General Public License as published by 10 * the Free Software Foundation; either version 2 of the License, or 11 * (at your option) any later version. 12 * 13 * This program is distributed in the hope that it will be useful, 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 * GNU General Public License for more details. 17 * 18 * You should have received a copy of the GNU General Public License 19 * along with this program; if not, write to the Free Software 20 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. 21 */ 22 23#include <linux/kernel.h> 24#include <linux/module.h> 25#include <asm/unaligned.h> 26 27#include <scsi/scsi.h> 28#include <scsi/scsi_tcq.h> 29 30#include <target/target_core_base.h> 31#include <target/target_core_backend.h> 32#include <target/target_core_fabric.h> 33 34#include "target_core_internal.h" 35#include "target_core_alua.h" 36#include "target_core_pr.h" 37#include "target_core_ua.h" 38#include "target_core_xcopy.h" 39 40static void spc_fill_alua_data(struct se_port *port, unsigned char *buf) 41{ 42 struct t10_alua_tg_pt_gp *tg_pt_gp; 43 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 44 45 /* 46 * Set SCCS for MAINTENANCE_IN + REPORT_TARGET_PORT_GROUPS. 47 */ 48 buf[5] = 0x80; 49 50 /* 51 * Set TPGS field for explicit and/or implicit ALUA access type 52 * and opteration. 53 * 54 * See spc4r17 section 6.4.2 Table 135 55 */ 56 if (!port) 57 return; 58 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 59 if (!tg_pt_gp_mem) 60 return; 61 62 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 63 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 64 if (tg_pt_gp) 65 buf[5] |= tg_pt_gp->tg_pt_gp_alua_access_type; 66 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 67} 68 69sense_reason_t 70spc_emulate_inquiry_std(struct se_cmd *cmd, unsigned char *buf) 71{ 72 struct se_lun *lun = cmd->se_lun; 73 struct se_device *dev = cmd->se_dev; 74 struct se_session *sess = cmd->se_sess; 75 76 /* Set RMB (removable media) for tape devices */ 77 if (dev->transport->get_device_type(dev) == TYPE_TAPE) 78 buf[1] = 0x80; 79 80 buf[2] = 0x05; /* SPC-3 */ 81 82 /* 83 * NORMACA and HISUP = 0, RESPONSE DATA FORMAT = 2 84 * 85 * SPC4 says: 86 * A RESPONSE DATA FORMAT field set to 2h indicates that the 87 * standard INQUIRY data is in the format defined in this 88 * standard. Response data format values less than 2h are 89 * obsolete. Response data format values greater than 2h are 90 * reserved. 91 */ 92 buf[3] = 2; 93 94 /* 95 * Enable SCCS and TPGS fields for Emulated ALUA 96 */ 97 spc_fill_alua_data(lun->lun_sep, buf); 98 99 /* 100 * Set Third-Party Copy (3PC) bit to indicate support for EXTENDED_COPY 101 */ 102 if (dev->dev_attrib.emulate_3pc) 103 buf[5] |= 0x8; 104 /* 105 * Set Protection (PROTECT) bit when DIF has been enabled on the 106 * device, and the fabric supports VERIFY + PASS. Also report 107 * PROTECT=1 if sess_prot_type has been configured to allow T10-PI 108 * to unprotected devices. 109 */ 110 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 111 if (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type) 112 buf[5] |= 0x1; 113 } 114 115 buf[7] = 0x2; /* CmdQue=1 */ 116 117 memcpy(&buf[8], "LIO-ORG ", 8); 118 memset(&buf[16], 0x20, 16); 119 memcpy(&buf[16], dev->t10_wwn.model, 120 min_t(size_t, strlen(dev->t10_wwn.model), 16)); 121 memcpy(&buf[32], dev->t10_wwn.revision, 122 min_t(size_t, strlen(dev->t10_wwn.revision), 4)); 123 buf[4] = 31; /* Set additional length to 31 */ 124 125 return 0; 126} 127EXPORT_SYMBOL(spc_emulate_inquiry_std); 128 129/* unit serial number */ 130static sense_reason_t 131spc_emulate_evpd_80(struct se_cmd *cmd, unsigned char *buf) 132{ 133 struct se_device *dev = cmd->se_dev; 134 u16 len; 135 136 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 137 len = sprintf(&buf[4], "%s", dev->t10_wwn.unit_serial); 138 len++; /* Extra Byte for NULL Terminator */ 139 buf[3] = len; 140 } 141 return 0; 142} 143 144void spc_parse_naa_6h_vendor_specific(struct se_device *dev, 145 unsigned char *buf) 146{ 147 unsigned char *p = &dev->t10_wwn.unit_serial[0]; 148 int cnt; 149 bool next = true; 150 151 /* 152 * Generate up to 36 bits of VENDOR SPECIFIC IDENTIFIER starting on 153 * byte 3 bit 3-0 for NAA IEEE Registered Extended DESIGNATOR field 154 * format, followed by 64 bits of VENDOR SPECIFIC IDENTIFIER EXTENSION 155 * to complete the payload. These are based from VPD=0x80 PRODUCT SERIAL 156 * NUMBER set via vpd_unit_serial in target_core_configfs.c to ensure 157 * per device uniqeness. 158 */ 159 for (cnt = 0; *p && cnt < 13; p++) { 160 int val = hex_to_bin(*p); 161 162 if (val < 0) 163 continue; 164 165 if (next) { 166 next = false; 167 buf[cnt++] |= val; 168 } else { 169 next = true; 170 buf[cnt] = val << 4; 171 } 172 } 173} 174 175/* 176 * Device identification VPD, for a complete list of 177 * DESIGNATOR TYPEs see spc4r17 Table 459. 178 */ 179sense_reason_t 180spc_emulate_evpd_83(struct se_cmd *cmd, unsigned char *buf) 181{ 182 struct se_device *dev = cmd->se_dev; 183 struct se_lun *lun = cmd->se_lun; 184 struct se_port *port = NULL; 185 struct se_portal_group *tpg = NULL; 186 struct t10_alua_lu_gp_member *lu_gp_mem; 187 struct t10_alua_tg_pt_gp *tg_pt_gp; 188 struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem; 189 unsigned char *prod = &dev->t10_wwn.model[0]; 190 u32 prod_len; 191 u32 unit_serial_len, off = 0; 192 u16 len = 0, id_len; 193 194 off = 4; 195 196 /* 197 * NAA IEEE Registered Extended Assigned designator format, see 198 * spc4r17 section 7.7.3.6.5 199 * 200 * We depend upon a target_core_mod/ConfigFS provided 201 * /sys/kernel/config/target/core/$HBA/$DEV/wwn/vpd_unit_serial 202 * value in order to return the NAA id. 203 */ 204 if (!(dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL)) 205 goto check_t10_vend_desc; 206 207 /* CODE SET == Binary */ 208 buf[off++] = 0x1; 209 210 /* Set ASSOCIATION == addressed logical unit: 0)b */ 211 buf[off] = 0x00; 212 213 /* Identifier/Designator type == NAA identifier */ 214 buf[off++] |= 0x3; 215 off++; 216 217 /* Identifier/Designator length */ 218 buf[off++] = 0x10; 219 220 /* 221 * Start NAA IEEE Registered Extended Identifier/Designator 222 */ 223 buf[off++] = (0x6 << 4); 224 225 /* 226 * Use OpenFabrics IEEE Company ID: 00 14 05 227 */ 228 buf[off++] = 0x01; 229 buf[off++] = 0x40; 230 buf[off] = (0x5 << 4); 231 232 /* 233 * Return ConfigFS Unit Serial Number information for 234 * VENDOR_SPECIFIC_IDENTIFIER and 235 * VENDOR_SPECIFIC_IDENTIFIER_EXTENTION 236 */ 237 spc_parse_naa_6h_vendor_specific(dev, &buf[off]); 238 239 len = 20; 240 off = (len + 4); 241 242check_t10_vend_desc: 243 /* 244 * T10 Vendor Identifier Page, see spc4r17 section 7.7.3.4 245 */ 246 id_len = 8; /* For Vendor field */ 247 prod_len = 4; /* For VPD Header */ 248 prod_len += 8; /* For Vendor field */ 249 prod_len += strlen(prod); 250 prod_len++; /* For : */ 251 252 if (dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 253 unit_serial_len = strlen(&dev->t10_wwn.unit_serial[0]); 254 unit_serial_len++; /* For NULL Terminator */ 255 256 id_len += sprintf(&buf[off+12], "%s:%s", prod, 257 &dev->t10_wwn.unit_serial[0]); 258 } 259 buf[off] = 0x2; /* ASCII */ 260 buf[off+1] = 0x1; /* T10 Vendor ID */ 261 buf[off+2] = 0x0; 262 memcpy(&buf[off+4], "LIO-ORG", 8); 263 /* Extra Byte for NULL Terminator */ 264 id_len++; 265 /* Identifier Length */ 266 buf[off+3] = id_len; 267 /* Header size for Designation descriptor */ 268 len += (id_len + 4); 269 off += (id_len + 4); 270 /* 271 * struct se_port is only set for INQUIRY VPD=1 through $FABRIC_MOD 272 */ 273 port = lun->lun_sep; 274 if (port) { 275 struct t10_alua_lu_gp *lu_gp; 276 u32 padding, scsi_name_len, scsi_target_len; 277 u16 lu_gp_id = 0; 278 u16 tg_pt_gp_id = 0; 279 u16 tpgt; 280 281 tpg = port->sep_tpg; 282 /* 283 * Relative target port identifer, see spc4r17 284 * section 7.7.3.7 285 * 286 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 287 * section 7.5.1 Table 362 288 */ 289 buf[off] = 290 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 291 buf[off++] |= 0x1; /* CODE SET == Binary */ 292 buf[off] = 0x80; /* Set PIV=1 */ 293 /* Set ASSOCIATION == target port: 01b */ 294 buf[off] |= 0x10; 295 /* DESIGNATOR TYPE == Relative target port identifer */ 296 buf[off++] |= 0x4; 297 off++; /* Skip over Reserved */ 298 buf[off++] = 4; /* DESIGNATOR LENGTH */ 299 /* Skip over Obsolete field in RTPI payload 300 * in Table 472 */ 301 off += 2; 302 buf[off++] = ((port->sep_rtpi >> 8) & 0xff); 303 buf[off++] = (port->sep_rtpi & 0xff); 304 len += 8; /* Header size + Designation descriptor */ 305 /* 306 * Target port group identifier, see spc4r17 307 * section 7.7.3.8 308 * 309 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 310 * section 7.5.1 Table 362 311 */ 312 tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem; 313 if (!tg_pt_gp_mem) 314 goto check_lu_gp; 315 316 spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 317 tg_pt_gp = tg_pt_gp_mem->tg_pt_gp; 318 if (!tg_pt_gp) { 319 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 320 goto check_lu_gp; 321 } 322 tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; 323 spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock); 324 325 buf[off] = 326 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 327 buf[off++] |= 0x1; /* CODE SET == Binary */ 328 buf[off] = 0x80; /* Set PIV=1 */ 329 /* Set ASSOCIATION == target port: 01b */ 330 buf[off] |= 0x10; 331 /* DESIGNATOR TYPE == Target port group identifier */ 332 buf[off++] |= 0x5; 333 off++; /* Skip over Reserved */ 334 buf[off++] = 4; /* DESIGNATOR LENGTH */ 335 off += 2; /* Skip over Reserved Field */ 336 buf[off++] = ((tg_pt_gp_id >> 8) & 0xff); 337 buf[off++] = (tg_pt_gp_id & 0xff); 338 len += 8; /* Header size + Designation descriptor */ 339 /* 340 * Logical Unit Group identifier, see spc4r17 341 * section 7.7.3.8 342 */ 343check_lu_gp: 344 lu_gp_mem = dev->dev_alua_lu_gp_mem; 345 if (!lu_gp_mem) 346 goto check_scsi_name; 347 348 spin_lock(&lu_gp_mem->lu_gp_mem_lock); 349 lu_gp = lu_gp_mem->lu_gp; 350 if (!lu_gp) { 351 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 352 goto check_scsi_name; 353 } 354 lu_gp_id = lu_gp->lu_gp_id; 355 spin_unlock(&lu_gp_mem->lu_gp_mem_lock); 356 357 buf[off++] |= 0x1; /* CODE SET == Binary */ 358 /* DESIGNATOR TYPE == Logical Unit Group identifier */ 359 buf[off++] |= 0x6; 360 off++; /* Skip over Reserved */ 361 buf[off++] = 4; /* DESIGNATOR LENGTH */ 362 off += 2; /* Skip over Reserved Field */ 363 buf[off++] = ((lu_gp_id >> 8) & 0xff); 364 buf[off++] = (lu_gp_id & 0xff); 365 len += 8; /* Header size + Designation descriptor */ 366 /* 367 * SCSI name string designator, see spc4r17 368 * section 7.7.3.11 369 * 370 * Get the PROTOCOL IDENTIFIER as defined by spc4r17 371 * section 7.5.1 Table 362 372 */ 373check_scsi_name: 374 buf[off] = 375 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 376 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 377 buf[off] = 0x80; /* Set PIV=1 */ 378 /* Set ASSOCIATION == target port: 01b */ 379 buf[off] |= 0x10; 380 /* DESIGNATOR TYPE == SCSI name string */ 381 buf[off++] |= 0x8; 382 off += 2; /* Skip over Reserved and length */ 383 /* 384 * SCSI name string identifer containing, $FABRIC_MOD 385 * dependent information. For LIO-Target and iSCSI 386 * Target Port, this means "<iSCSI name>,t,0x<TPGT> in 387 * UTF-8 encoding. 388 */ 389 tpgt = tpg->se_tpg_tfo->tpg_get_tag(tpg); 390 scsi_name_len = sprintf(&buf[off], "%s,t,0x%04x", 391 tpg->se_tpg_tfo->tpg_get_wwn(tpg), tpgt); 392 scsi_name_len += 1 /* Include NULL terminator */; 393 /* 394 * The null-terminated, null-padded (see 4.4.2) SCSI 395 * NAME STRING field contains a UTF-8 format string. 396 * The number of bytes in the SCSI NAME STRING field 397 * (i.e., the value in the DESIGNATOR LENGTH field) 398 * shall be no larger than 256 and shall be a multiple 399 * of four. 400 */ 401 padding = ((-scsi_name_len) & 3); 402 if (padding) 403 scsi_name_len += padding; 404 if (scsi_name_len > 256) 405 scsi_name_len = 256; 406 407 buf[off-1] = scsi_name_len; 408 off += scsi_name_len; 409 /* Header size + Designation descriptor */ 410 len += (scsi_name_len + 4); 411 412 /* 413 * Target device designator 414 */ 415 buf[off] = 416 (tpg->se_tpg_tfo->get_fabric_proto_ident(tpg) << 4); 417 buf[off++] |= 0x3; /* CODE SET == UTF-8 */ 418 buf[off] = 0x80; /* Set PIV=1 */ 419 /* Set ASSOCIATION == target device: 10b */ 420 buf[off] |= 0x20; 421 /* DESIGNATOR TYPE == SCSI name string */ 422 buf[off++] |= 0x8; 423 off += 2; /* Skip over Reserved and length */ 424 /* 425 * SCSI name string identifer containing, $FABRIC_MOD 426 * dependent information. For LIO-Target and iSCSI 427 * Target Port, this means "<iSCSI name>" in 428 * UTF-8 encoding. 429 */ 430 scsi_target_len = sprintf(&buf[off], "%s", 431 tpg->se_tpg_tfo->tpg_get_wwn(tpg)); 432 scsi_target_len += 1 /* Include NULL terminator */; 433 /* 434 * The null-terminated, null-padded (see 4.4.2) SCSI 435 * NAME STRING field contains a UTF-8 format string. 436 * The number of bytes in the SCSI NAME STRING field 437 * (i.e., the value in the DESIGNATOR LENGTH field) 438 * shall be no larger than 256 and shall be a multiple 439 * of four. 440 */ 441 padding = ((-scsi_target_len) & 3); 442 if (padding) 443 scsi_target_len += padding; 444 if (scsi_target_len > 256) 445 scsi_target_len = 256; 446 447 buf[off-1] = scsi_target_len; 448 off += scsi_target_len; 449 450 /* Header size + Designation descriptor */ 451 len += (scsi_target_len + 4); 452 } 453 buf[2] = ((len >> 8) & 0xff); 454 buf[3] = (len & 0xff); /* Page Length for VPD 0x83 */ 455 return 0; 456} 457EXPORT_SYMBOL(spc_emulate_evpd_83); 458 459/* Extended INQUIRY Data VPD Page */ 460static sense_reason_t 461spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf) 462{ 463 struct se_device *dev = cmd->se_dev; 464 struct se_session *sess = cmd->se_sess; 465 466 buf[3] = 0x3c; 467 /* 468 * Set GRD_CHK + REF_CHK for TYPE1 protection, or GRD_CHK 469 * only for TYPE3 protection. 470 */ 471 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 472 if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE1_PROT || 473 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 474 buf[4] = 0x5; 475 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 476 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 477 buf[4] = 0x4; 478 } 479 480 /* Set HEADSUP, ORDSUP, SIMPSUP */ 481 buf[5] = 0x07; 482 483 /* If WriteCache emulation is enabled, set V_SUP */ 484 if (se_dev_check_wce(dev)) 485 buf[6] = 0x01; 486 /* If an LBA map is present set R_SUP */ 487 spin_lock(&cmd->se_dev->t10_alua.lba_map_lock); 488 if (!list_empty(&dev->t10_alua.lba_map_list)) 489 buf[8] = 0x10; 490 spin_unlock(&cmd->se_dev->t10_alua.lba_map_lock); 491 return 0; 492} 493 494/* Block Limits VPD page */ 495static sense_reason_t 496spc_emulate_evpd_b0(struct se_cmd *cmd, unsigned char *buf) 497{ 498 struct se_device *dev = cmd->se_dev; 499 int have_tp = 0; 500 int opt, min; 501 502 /* 503 * Following spc3r22 section 6.5.3 Block Limits VPD page, when 504 * emulate_tpu=1 or emulate_tpws=1 we will be expect a 505 * different page length for Thin Provisioning. 506 */ 507 if (dev->dev_attrib.emulate_tpu || dev->dev_attrib.emulate_tpws) 508 have_tp = 1; 509 510 buf[0] = dev->transport->get_device_type(dev); 511 buf[3] = have_tp ? 0x3c : 0x10; 512 513 /* Set WSNZ to 1 */ 514 buf[4] = 0x01; 515 /* 516 * Set MAXIMUM COMPARE AND WRITE LENGTH 517 */ 518 if (dev->dev_attrib.emulate_caw) 519 buf[5] = 0x01; 520 521 /* 522 * Set OPTIMAL TRANSFER LENGTH GRANULARITY 523 */ 524 if (dev->transport->get_io_min && (min = dev->transport->get_io_min(dev))) 525 put_unaligned_be16(min / dev->dev_attrib.block_size, &buf[6]); 526 else 527 put_unaligned_be16(1, &buf[6]); 528 529 /* 530 * Set MAXIMUM TRANSFER LENGTH 531 */ 532 put_unaligned_be32(dev->dev_attrib.hw_max_sectors, &buf[8]); 533 534 /* 535 * Set OPTIMAL TRANSFER LENGTH 536 */ 537 if (dev->transport->get_io_opt && (opt = dev->transport->get_io_opt(dev))) 538 put_unaligned_be32(opt / dev->dev_attrib.block_size, &buf[12]); 539 else 540 put_unaligned_be32(dev->dev_attrib.optimal_sectors, &buf[12]); 541 542 /* 543 * Exit now if we don't support TP. 544 */ 545 if (!have_tp) 546 goto max_write_same; 547 548 /* 549 * Set MAXIMUM UNMAP LBA COUNT 550 */ 551 put_unaligned_be32(dev->dev_attrib.max_unmap_lba_count, &buf[20]); 552 553 /* 554 * Set MAXIMUM UNMAP BLOCK DESCRIPTOR COUNT 555 */ 556 put_unaligned_be32(dev->dev_attrib.max_unmap_block_desc_count, 557 &buf[24]); 558 559 /* 560 * Set OPTIMAL UNMAP GRANULARITY 561 */ 562 put_unaligned_be32(dev->dev_attrib.unmap_granularity, &buf[28]); 563 564 /* 565 * UNMAP GRANULARITY ALIGNMENT 566 */ 567 put_unaligned_be32(dev->dev_attrib.unmap_granularity_alignment, 568 &buf[32]); 569 if (dev->dev_attrib.unmap_granularity_alignment != 0) 570 buf[32] |= 0x80; /* Set the UGAVALID bit */ 571 572 /* 573 * MAXIMUM WRITE SAME LENGTH 574 */ 575max_write_same: 576 put_unaligned_be64(dev->dev_attrib.max_write_same_len, &buf[36]); 577 578 return 0; 579} 580 581/* Block Device Characteristics VPD page */ 582static sense_reason_t 583spc_emulate_evpd_b1(struct se_cmd *cmd, unsigned char *buf) 584{ 585 struct se_device *dev = cmd->se_dev; 586 587 buf[0] = dev->transport->get_device_type(dev); 588 buf[3] = 0x3c; 589 buf[5] = dev->dev_attrib.is_nonrot ? 1 : 0; 590 591 return 0; 592} 593 594/* Thin Provisioning VPD */ 595static sense_reason_t 596spc_emulate_evpd_b2(struct se_cmd *cmd, unsigned char *buf) 597{ 598 struct se_device *dev = cmd->se_dev; 599 600 /* 601 * From spc3r22 section 6.5.4 Thin Provisioning VPD page: 602 * 603 * The PAGE LENGTH field is defined in SPC-4. If the DP bit is set to 604 * zero, then the page length shall be set to 0004h. If the DP bit 605 * is set to one, then the page length shall be set to the value 606 * defined in table 162. 607 */ 608 buf[0] = dev->transport->get_device_type(dev); 609 610 /* 611 * Set Hardcoded length mentioned above for DP=0 612 */ 613 put_unaligned_be16(0x0004, &buf[2]); 614 615 /* 616 * The THRESHOLD EXPONENT field indicates the threshold set size in 617 * LBAs as a power of 2 (i.e., the threshold set size is equal to 618 * 2(threshold exponent)). 619 * 620 * Note that this is currently set to 0x00 as mkp says it will be 621 * changing again. We can enable this once it has settled in T10 622 * and is actually used by Linux/SCSI ML code. 623 */ 624 buf[4] = 0x00; 625 626 /* 627 * A TPU bit set to one indicates that the device server supports 628 * the UNMAP command (see 5.25). A TPU bit set to zero indicates 629 * that the device server does not support the UNMAP command. 630 */ 631 if (dev->dev_attrib.emulate_tpu != 0) 632 buf[5] = 0x80; 633 634 /* 635 * A TPWS bit set to one indicates that the device server supports 636 * the use of the WRITE SAME (16) command (see 5.42) to unmap LBAs. 637 * A TPWS bit set to zero indicates that the device server does not 638 * support the use of the WRITE SAME (16) command to unmap LBAs. 639 */ 640 if (dev->dev_attrib.emulate_tpws != 0) 641 buf[5] |= 0x40 | 0x20; 642 643 return 0; 644} 645 646/* Referrals VPD page */ 647static sense_reason_t 648spc_emulate_evpd_b3(struct se_cmd *cmd, unsigned char *buf) 649{ 650 struct se_device *dev = cmd->se_dev; 651 652 buf[0] = dev->transport->get_device_type(dev); 653 buf[3] = 0x0c; 654 put_unaligned_be32(dev->t10_alua.lba_map_segment_size, &buf[8]); 655 put_unaligned_be32(dev->t10_alua.lba_map_segment_multiplier, &buf[12]); 656 657 return 0; 658} 659 660static sense_reason_t 661spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf); 662 663static struct { 664 uint8_t page; 665 sense_reason_t (*emulate)(struct se_cmd *, unsigned char *); 666} evpd_handlers[] = { 667 { .page = 0x00, .emulate = spc_emulate_evpd_00 }, 668 { .page = 0x80, .emulate = spc_emulate_evpd_80 }, 669 { .page = 0x83, .emulate = spc_emulate_evpd_83 }, 670 { .page = 0x86, .emulate = spc_emulate_evpd_86 }, 671 { .page = 0xb0, .emulate = spc_emulate_evpd_b0 }, 672 { .page = 0xb1, .emulate = spc_emulate_evpd_b1 }, 673 { .page = 0xb2, .emulate = spc_emulate_evpd_b2 }, 674 { .page = 0xb3, .emulate = spc_emulate_evpd_b3 }, 675}; 676 677/* supported vital product data pages */ 678static sense_reason_t 679spc_emulate_evpd_00(struct se_cmd *cmd, unsigned char *buf) 680{ 681 int p; 682 683 /* 684 * Only report the INQUIRY EVPD=1 pages after a valid NAA 685 * Registered Extended LUN WWN has been set via ConfigFS 686 * during device creation/restart. 687 */ 688 if (cmd->se_dev->dev_flags & DF_EMULATED_VPD_UNIT_SERIAL) { 689 buf[3] = ARRAY_SIZE(evpd_handlers); 690 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) 691 buf[p + 4] = evpd_handlers[p].page; 692 } 693 694 return 0; 695} 696 697static sense_reason_t 698spc_emulate_inquiry(struct se_cmd *cmd) 699{ 700 struct se_device *dev = cmd->se_dev; 701 struct se_portal_group *tpg = cmd->se_lun->lun_sep->sep_tpg; 702 unsigned char *rbuf; 703 unsigned char *cdb = cmd->t_task_cdb; 704 unsigned char *buf; 705 sense_reason_t ret; 706 int p; 707 int len = 0; 708 709 buf = kzalloc(SE_INQUIRY_BUF, GFP_KERNEL); 710 if (!buf) { 711 pr_err("Unable to allocate response buffer for INQUIRY\n"); 712 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 713 } 714 715 if (dev == tpg->tpg_virt_lun0.lun_se_dev) 716 buf[0] = 0x3f; /* Not connected */ 717 else 718 buf[0] = dev->transport->get_device_type(dev); 719 720 if (!(cdb[1] & 0x1)) { 721 if (cdb[2]) { 722 pr_err("INQUIRY with EVPD==0 but PAGE CODE=%02x\n", 723 cdb[2]); 724 ret = TCM_INVALID_CDB_FIELD; 725 goto out; 726 } 727 728 ret = spc_emulate_inquiry_std(cmd, buf); 729 len = buf[4] + 5; 730 goto out; 731 } 732 733 for (p = 0; p < ARRAY_SIZE(evpd_handlers); ++p) { 734 if (cdb[2] == evpd_handlers[p].page) { 735 buf[1] = cdb[2]; 736 ret = evpd_handlers[p].emulate(cmd, buf); 737 len = get_unaligned_be16(&buf[2]) + 4; 738 goto out; 739 } 740 } 741 742 pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]); 743 ret = TCM_INVALID_CDB_FIELD; 744 745out: 746 rbuf = transport_kmap_data_sg(cmd); 747 if (rbuf) { 748 memcpy(rbuf, buf, min_t(u32, SE_INQUIRY_BUF, cmd->data_length)); 749 transport_kunmap_data_sg(cmd); 750 } 751 kfree(buf); 752 753 if (!ret) 754 target_complete_cmd_with_length(cmd, GOOD, len); 755 return ret; 756} 757 758static int spc_modesense_rwrecovery(struct se_cmd *cmd, u8 pc, u8 *p) 759{ 760 p[0] = 0x01; 761 p[1] = 0x0a; 762 763 /* No changeable values for now */ 764 if (pc == 1) 765 goto out; 766 767out: 768 return 12; 769} 770 771static int spc_modesense_control(struct se_cmd *cmd, u8 pc, u8 *p) 772{ 773 struct se_device *dev = cmd->se_dev; 774 struct se_session *sess = cmd->se_sess; 775 776 p[0] = 0x0a; 777 p[1] = 0x0a; 778 779 /* No changeable values for now */ 780 if (pc == 1) 781 goto out; 782 783 p[2] = 2; 784 /* 785 * From spc4r23, 7.4.7 Control mode page 786 * 787 * The QUEUE ALGORITHM MODIFIER field (see table 368) specifies 788 * restrictions on the algorithm used for reordering commands 789 * having the SIMPLE task attribute (see SAM-4). 790 * 791 * Table 368 -- QUEUE ALGORITHM MODIFIER field 792 * Code Description 793 * 0h Restricted reordering 794 * 1h Unrestricted reordering allowed 795 * 2h to 7h Reserved 796 * 8h to Fh Vendor specific 797 * 798 * A value of zero in the QUEUE ALGORITHM MODIFIER field specifies that 799 * the device server shall order the processing sequence of commands 800 * having the SIMPLE task attribute such that data integrity is maintained 801 * for that I_T nexus (i.e., if the transmission of new SCSI transport protocol 802 * requests is halted at any time, the final value of all data observable 803 * on the medium shall be the same as if all the commands had been processed 804 * with the ORDERED task attribute). 805 * 806 * A value of one in the QUEUE ALGORITHM MODIFIER field specifies that the 807 * device server may reorder the processing sequence of commands having the 808 * SIMPLE task attribute in any manner. Any data integrity exposures related to 809 * command sequence order shall be explicitly handled by the application client 810 * through the selection of appropriate ommands and task attributes. 811 */ 812 p[3] = (dev->dev_attrib.emulate_rest_reord == 1) ? 0x00 : 0x10; 813 /* 814 * From spc4r17, section 7.4.6 Control mode Page 815 * 816 * Unit Attention interlocks control (UN_INTLCK_CTRL) to code 00b 817 * 818 * 00b: The logical unit shall clear any unit attention condition 819 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 820 * status and shall not establish a unit attention condition when a com- 821 * mand is completed with BUSY, TASK SET FULL, or RESERVATION CONFLICT 822 * status. 823 * 824 * 10b: The logical unit shall not clear any unit attention condition 825 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 826 * status and shall not establish a unit attention condition when 827 * a command is completed with BUSY, TASK SET FULL, or RESERVATION 828 * CONFLICT status. 829 * 830 * 11b a The logical unit shall not clear any unit attention condition 831 * reported in the same I_T_L_Q nexus transaction as a CHECK CONDITION 832 * status and shall establish a unit attention condition for the 833 * initiator port associated with the I_T nexus on which the BUSY, 834 * TASK SET FULL, or RESERVATION CONFLICT status is being returned. 835 * Depending on the status, the additional sense code shall be set to 836 * PREVIOUS BUSY STATUS, PREVIOUS TASK SET FULL STATUS, or PREVIOUS 837 * RESERVATION CONFLICT STATUS. Until it is cleared by a REQUEST SENSE 838 * command, a unit attention condition shall be established only once 839 * for a BUSY, TASK SET FULL, or RESERVATION CONFLICT status regardless 840 * to the number of commands completed with one of those status codes. 841 */ 842 p[4] = (dev->dev_attrib.emulate_ua_intlck_ctrl == 2) ? 0x30 : 843 (dev->dev_attrib.emulate_ua_intlck_ctrl == 1) ? 0x20 : 0x00; 844 /* 845 * From spc4r17, section 7.4.6 Control mode Page 846 * 847 * Task Aborted Status (TAS) bit set to zero. 848 * 849 * A task aborted status (TAS) bit set to zero specifies that aborted 850 * tasks shall be terminated by the device server without any response 851 * to the application client. A TAS bit set to one specifies that tasks 852 * aborted by the actions of an I_T nexus other than the I_T nexus on 853 * which the command was received shall be completed with TASK ABORTED 854 * status (see SAM-4). 855 */ 856 p[5] = (dev->dev_attrib.emulate_tas) ? 0x40 : 0x00; 857 /* 858 * From spc4r30, section 7.5.7 Control mode page 859 * 860 * Application Tag Owner (ATO) bit set to one. 861 * 862 * If the ATO bit is set to one the device server shall not modify the 863 * LOGICAL BLOCK APPLICATION TAG field and, depending on the protection 864 * type, shall not modify the contents of the LOGICAL BLOCK REFERENCE 865 * TAG field. 866 */ 867 if (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) { 868 if (dev->dev_attrib.pi_prot_type || sess->sess_prot_type) 869 p[5] |= 0x80; 870 } 871 872 p[8] = 0xff; 873 p[9] = 0xff; 874 p[11] = 30; 875 876out: 877 return 12; 878} 879 880static int spc_modesense_caching(struct se_cmd *cmd, u8 pc, u8 *p) 881{ 882 struct se_device *dev = cmd->se_dev; 883 884 p[0] = 0x08; 885 p[1] = 0x12; 886 887 /* No changeable values for now */ 888 if (pc == 1) 889 goto out; 890 891 if (se_dev_check_wce(dev)) 892 p[2] = 0x04; /* Write Cache Enable */ 893 p[12] = 0x20; /* Disabled Read Ahead */ 894 895out: 896 return 20; 897} 898 899static int spc_modesense_informational_exceptions(struct se_cmd *cmd, u8 pc, unsigned char *p) 900{ 901 p[0] = 0x1c; 902 p[1] = 0x0a; 903 904 /* No changeable values for now */ 905 if (pc == 1) 906 goto out; 907 908out: 909 return 12; 910} 911 912static struct { 913 uint8_t page; 914 uint8_t subpage; 915 int (*emulate)(struct se_cmd *, u8, unsigned char *); 916} modesense_handlers[] = { 917 { .page = 0x01, .subpage = 0x00, .emulate = spc_modesense_rwrecovery }, 918 { .page = 0x08, .subpage = 0x00, .emulate = spc_modesense_caching }, 919 { .page = 0x0a, .subpage = 0x00, .emulate = spc_modesense_control }, 920 { .page = 0x1c, .subpage = 0x00, .emulate = spc_modesense_informational_exceptions }, 921}; 922 923static void spc_modesense_write_protect(unsigned char *buf, int type) 924{ 925 /* 926 * I believe that the WP bit (bit 7) in the mode header is the same for 927 * all device types.. 928 */ 929 switch (type) { 930 case TYPE_DISK: 931 case TYPE_TAPE: 932 default: 933 buf[0] |= 0x80; /* WP bit */ 934 break; 935 } 936} 937 938static void spc_modesense_dpofua(unsigned char *buf, int type) 939{ 940 switch (type) { 941 case TYPE_DISK: 942 buf[0] |= 0x10; /* DPOFUA bit */ 943 break; 944 default: 945 break; 946 } 947} 948 949static int spc_modesense_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 950{ 951 *buf++ = 8; 952 put_unaligned_be32(min(blocks, 0xffffffffull), buf); 953 buf += 4; 954 put_unaligned_be32(block_size, buf); 955 return 9; 956} 957 958static int spc_modesense_long_blockdesc(unsigned char *buf, u64 blocks, u32 block_size) 959{ 960 if (blocks <= 0xffffffff) 961 return spc_modesense_blockdesc(buf + 3, blocks, block_size) + 3; 962 963 *buf++ = 1; /* LONGLBA */ 964 buf += 2; 965 *buf++ = 16; 966 put_unaligned_be64(blocks, buf); 967 buf += 12; 968 put_unaligned_be32(block_size, buf); 969 970 return 17; 971} 972 973static sense_reason_t spc_emulate_modesense(struct se_cmd *cmd) 974{ 975 struct se_device *dev = cmd->se_dev; 976 char *cdb = cmd->t_task_cdb; 977 unsigned char buf[SE_MODE_PAGE_BUF], *rbuf; 978 int type = dev->transport->get_device_type(dev); 979 int ten = (cmd->t_task_cdb[0] == MODE_SENSE_10); 980 bool dbd = !!(cdb[1] & 0x08); 981 bool llba = ten ? !!(cdb[1] & 0x10) : false; 982 u8 pc = cdb[2] >> 6; 983 u8 page = cdb[2] & 0x3f; 984 u8 subpage = cdb[3]; 985 int length = 0; 986 int ret; 987 int i; 988 989 memset(buf, 0, SE_MODE_PAGE_BUF); 990 991 /* 992 * Skip over MODE DATA LENGTH + MEDIUM TYPE fields to byte 3 for 993 * MODE_SENSE_10 and byte 2 for MODE_SENSE (6). 994 */ 995 length = ten ? 3 : 2; 996 997 /* DEVICE-SPECIFIC PARAMETER */ 998 if ((cmd->se_lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) || 999 (cmd->se_deve && 1000 (cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY))) 1001 spc_modesense_write_protect(&buf[length], type); 1002 1003 if ((se_dev_check_wce(dev)) && 1004 (dev->dev_attrib.emulate_fua_write > 0)) 1005 spc_modesense_dpofua(&buf[length], type); 1006 1007 ++length; 1008 1009 /* BLOCK DESCRIPTOR */ 1010 1011 /* 1012 * For now we only include a block descriptor for disk (SBC) 1013 * devices; other command sets use a slightly different format. 1014 */ 1015 if (!dbd && type == TYPE_DISK) { 1016 u64 blocks = dev->transport->get_blocks(dev); 1017 u32 block_size = dev->dev_attrib.block_size; 1018 1019 if (ten) { 1020 if (llba) { 1021 length += spc_modesense_long_blockdesc(&buf[length], 1022 blocks, block_size); 1023 } else { 1024 length += 3; 1025 length += spc_modesense_blockdesc(&buf[length], 1026 blocks, block_size); 1027 } 1028 } else { 1029 length += spc_modesense_blockdesc(&buf[length], blocks, 1030 block_size); 1031 } 1032 } else { 1033 if (ten) 1034 length += 4; 1035 else 1036 length += 1; 1037 } 1038 1039 if (page == 0x3f) { 1040 if (subpage != 0x00 && subpage != 0xff) { 1041 pr_warn("MODE_SENSE: Invalid subpage code: 0x%02x\n", subpage); 1042 return TCM_INVALID_CDB_FIELD; 1043 } 1044 1045 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) { 1046 /* 1047 * Tricky way to say all subpage 00h for 1048 * subpage==0, all subpages for subpage==0xff 1049 * (and we just checked above that those are 1050 * the only two possibilities). 1051 */ 1052 if ((modesense_handlers[i].subpage & ~subpage) == 0) { 1053 ret = modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1054 if (!ten && length + ret >= 255) 1055 break; 1056 length += ret; 1057 } 1058 } 1059 1060 goto set_length; 1061 } 1062 1063 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1064 if (modesense_handlers[i].page == page && 1065 modesense_handlers[i].subpage == subpage) { 1066 length += modesense_handlers[i].emulate(cmd, pc, &buf[length]); 1067 goto set_length; 1068 } 1069 1070 /* 1071 * We don't intend to implement: 1072 * - obsolete page 03h "format parameters" (checked by Solaris) 1073 */ 1074 if (page != 0x03) 1075 pr_err("MODE SENSE: unimplemented page/subpage: 0x%02x/0x%02x\n", 1076 page, subpage); 1077 1078 return TCM_UNKNOWN_MODE_PAGE; 1079 1080set_length: 1081 if (ten) 1082 put_unaligned_be16(length - 2, buf); 1083 else 1084 buf[0] = length - 1; 1085 1086 rbuf = transport_kmap_data_sg(cmd); 1087 if (rbuf) { 1088 memcpy(rbuf, buf, min_t(u32, SE_MODE_PAGE_BUF, cmd->data_length)); 1089 transport_kunmap_data_sg(cmd); 1090 } 1091 1092 target_complete_cmd_with_length(cmd, GOOD, length); 1093 return 0; 1094} 1095 1096static sense_reason_t spc_emulate_modeselect(struct se_cmd *cmd) 1097{ 1098 char *cdb = cmd->t_task_cdb; 1099 bool ten = cdb[0] == MODE_SELECT_10; 1100 int off = ten ? 8 : 4; 1101 bool pf = !!(cdb[1] & 0x10); 1102 u8 page, subpage; 1103 unsigned char *buf; 1104 unsigned char tbuf[SE_MODE_PAGE_BUF]; 1105 int length; 1106 sense_reason_t ret = 0; 1107 int i; 1108 1109 if (!cmd->data_length) { 1110 target_complete_cmd(cmd, GOOD); 1111 return 0; 1112 } 1113 1114 if (cmd->data_length < off + 2) 1115 return TCM_PARAMETER_LIST_LENGTH_ERROR; 1116 1117 buf = transport_kmap_data_sg(cmd); 1118 if (!buf) 1119 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1120 1121 if (!pf) { 1122 ret = TCM_INVALID_CDB_FIELD; 1123 goto out; 1124 } 1125 1126 page = buf[off] & 0x3f; 1127 subpage = buf[off] & 0x40 ? buf[off + 1] : 0; 1128 1129 for (i = 0; i < ARRAY_SIZE(modesense_handlers); ++i) 1130 if (modesense_handlers[i].page == page && 1131 modesense_handlers[i].subpage == subpage) { 1132 memset(tbuf, 0, SE_MODE_PAGE_BUF); 1133 length = modesense_handlers[i].emulate(cmd, 0, tbuf); 1134 goto check_contents; 1135 } 1136 1137 ret = TCM_UNKNOWN_MODE_PAGE; 1138 goto out; 1139 1140check_contents: 1141 if (cmd->data_length < off + length) { 1142 ret = TCM_PARAMETER_LIST_LENGTH_ERROR; 1143 goto out; 1144 } 1145 1146 if (memcmp(buf + off, tbuf, length)) 1147 ret = TCM_INVALID_PARAMETER_LIST; 1148 1149out: 1150 transport_kunmap_data_sg(cmd); 1151 1152 if (!ret) 1153 target_complete_cmd(cmd, GOOD); 1154 return ret; 1155} 1156 1157static sense_reason_t spc_emulate_request_sense(struct se_cmd *cmd) 1158{ 1159 unsigned char *cdb = cmd->t_task_cdb; 1160 unsigned char *rbuf; 1161 u8 ua_asc = 0, ua_ascq = 0; 1162 unsigned char buf[SE_SENSE_BUF]; 1163 1164 memset(buf, 0, SE_SENSE_BUF); 1165 1166 if (cdb[1] & 0x01) { 1167 pr_err("REQUEST_SENSE description emulation not" 1168 " supported\n"); 1169 return TCM_INVALID_CDB_FIELD; 1170 } 1171 1172 rbuf = transport_kmap_data_sg(cmd); 1173 if (!rbuf) 1174 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1175 1176 if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { 1177 /* 1178 * CURRENT ERROR, UNIT ATTENTION 1179 */ 1180 buf[0] = 0x70; 1181 buf[SPC_SENSE_KEY_OFFSET] = UNIT_ATTENTION; 1182 1183 /* 1184 * The Additional Sense Code (ASC) from the UNIT ATTENTION 1185 */ 1186 buf[SPC_ASC_KEY_OFFSET] = ua_asc; 1187 buf[SPC_ASCQ_KEY_OFFSET] = ua_ascq; 1188 buf[7] = 0x0A; 1189 } else { 1190 /* 1191 * CURRENT ERROR, NO SENSE 1192 */ 1193 buf[0] = 0x70; 1194 buf[SPC_SENSE_KEY_OFFSET] = NO_SENSE; 1195 1196 /* 1197 * NO ADDITIONAL SENSE INFORMATION 1198 */ 1199 buf[SPC_ASC_KEY_OFFSET] = 0x00; 1200 buf[7] = 0x0A; 1201 } 1202 1203 memcpy(rbuf, buf, min_t(u32, sizeof(buf), cmd->data_length)); 1204 transport_kunmap_data_sg(cmd); 1205 1206 target_complete_cmd(cmd, GOOD); 1207 return 0; 1208} 1209 1210sense_reason_t spc_emulate_report_luns(struct se_cmd *cmd) 1211{ 1212 struct se_dev_entry *deve; 1213 struct se_session *sess = cmd->se_sess; 1214 unsigned char *buf; 1215 u32 lun_count = 0, offset = 8, i; 1216 1217 if (cmd->data_length < 16) { 1218 pr_warn("REPORT LUNS allocation length %u too small\n", 1219 cmd->data_length); 1220 return TCM_INVALID_CDB_FIELD; 1221 } 1222 1223 buf = transport_kmap_data_sg(cmd); 1224 if (!buf) 1225 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1226 1227 /* 1228 * If no struct se_session pointer is present, this struct se_cmd is 1229 * coming via a target_core_mod PASSTHROUGH op, and not through 1230 * a $FABRIC_MOD. In that case, report LUN=0 only. 1231 */ 1232 if (!sess) { 1233 int_to_scsilun(0, (struct scsi_lun *)&buf[offset]); 1234 lun_count = 1; 1235 goto done; 1236 } 1237 1238 spin_lock_irq(&sess->se_node_acl->device_list_lock); 1239 for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) { 1240 deve = sess->se_node_acl->device_list[i]; 1241 if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS)) 1242 continue; 1243 /* 1244 * We determine the correct LUN LIST LENGTH even once we 1245 * have reached the initial allocation length. 1246 * See SPC2-R20 7.19. 1247 */ 1248 lun_count++; 1249 if ((offset + 8) > cmd->data_length) 1250 continue; 1251 1252 int_to_scsilun(deve->mapped_lun, (struct scsi_lun *)&buf[offset]); 1253 offset += 8; 1254 } 1255 spin_unlock_irq(&sess->se_node_acl->device_list_lock); 1256 1257 /* 1258 * See SPC3 r07, page 159. 1259 */ 1260done: 1261 lun_count *= 8; 1262 buf[0] = ((lun_count >> 24) & 0xff); 1263 buf[1] = ((lun_count >> 16) & 0xff); 1264 buf[2] = ((lun_count >> 8) & 0xff); 1265 buf[3] = (lun_count & 0xff); 1266 transport_kunmap_data_sg(cmd); 1267 1268 target_complete_cmd_with_length(cmd, GOOD, 8 + lun_count * 8); 1269 return 0; 1270} 1271EXPORT_SYMBOL(spc_emulate_report_luns); 1272 1273static sense_reason_t 1274spc_emulate_testunitready(struct se_cmd *cmd) 1275{ 1276 target_complete_cmd(cmd, GOOD); 1277 return 0; 1278} 1279 1280sense_reason_t 1281spc_parse_cdb(struct se_cmd *cmd, unsigned int *size) 1282{ 1283 struct se_device *dev = cmd->se_dev; 1284 unsigned char *cdb = cmd->t_task_cdb; 1285 1286 switch (cdb[0]) { 1287 case MODE_SELECT: 1288 *size = cdb[4]; 1289 cmd->execute_cmd = spc_emulate_modeselect; 1290 break; 1291 case MODE_SELECT_10: 1292 *size = (cdb[7] << 8) + cdb[8]; 1293 cmd->execute_cmd = spc_emulate_modeselect; 1294 break; 1295 case MODE_SENSE: 1296 *size = cdb[4]; 1297 cmd->execute_cmd = spc_emulate_modesense; 1298 break; 1299 case MODE_SENSE_10: 1300 *size = (cdb[7] << 8) + cdb[8]; 1301 cmd->execute_cmd = spc_emulate_modesense; 1302 break; 1303 case LOG_SELECT: 1304 case LOG_SENSE: 1305 *size = (cdb[7] << 8) + cdb[8]; 1306 break; 1307 case PERSISTENT_RESERVE_IN: 1308 *size = (cdb[7] << 8) + cdb[8]; 1309 cmd->execute_cmd = target_scsi3_emulate_pr_in; 1310 break; 1311 case PERSISTENT_RESERVE_OUT: 1312 *size = (cdb[7] << 8) + cdb[8]; 1313 cmd->execute_cmd = target_scsi3_emulate_pr_out; 1314 break; 1315 case RELEASE: 1316 case RELEASE_10: 1317 if (cdb[0] == RELEASE_10) 1318 *size = (cdb[7] << 8) | cdb[8]; 1319 else 1320 *size = cmd->data_length; 1321 1322 cmd->execute_cmd = target_scsi2_reservation_release; 1323 break; 1324 case RESERVE: 1325 case RESERVE_10: 1326 /* 1327 * The SPC-2 RESERVE does not contain a size in the SCSI CDB. 1328 * Assume the passthrough or $FABRIC_MOD will tell us about it. 1329 */ 1330 if (cdb[0] == RESERVE_10) 1331 *size = (cdb[7] << 8) | cdb[8]; 1332 else 1333 *size = cmd->data_length; 1334 1335 cmd->execute_cmd = target_scsi2_reservation_reserve; 1336 break; 1337 case REQUEST_SENSE: 1338 *size = cdb[4]; 1339 cmd->execute_cmd = spc_emulate_request_sense; 1340 break; 1341 case INQUIRY: 1342 *size = (cdb[3] << 8) + cdb[4]; 1343 1344 /* 1345 * Do implicit HEAD_OF_QUEUE processing for INQUIRY. 1346 * See spc4r17 section 5.3 1347 */ 1348 cmd->sam_task_attr = TCM_HEAD_TAG; 1349 cmd->execute_cmd = spc_emulate_inquiry; 1350 break; 1351 case SECURITY_PROTOCOL_IN: 1352 case SECURITY_PROTOCOL_OUT: 1353 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1354 break; 1355 case EXTENDED_COPY: 1356 *size = get_unaligned_be32(&cdb[10]); 1357 cmd->execute_cmd = target_do_xcopy; 1358 break; 1359 case RECEIVE_COPY_RESULTS: 1360 *size = get_unaligned_be32(&cdb[10]); 1361 cmd->execute_cmd = target_do_receive_copy_results; 1362 break; 1363 case READ_ATTRIBUTE: 1364 case WRITE_ATTRIBUTE: 1365 *size = (cdb[10] << 24) | (cdb[11] << 16) | 1366 (cdb[12] << 8) | cdb[13]; 1367 break; 1368 case RECEIVE_DIAGNOSTIC: 1369 case SEND_DIAGNOSTIC: 1370 *size = (cdb[3] << 8) | cdb[4]; 1371 break; 1372 case WRITE_BUFFER: 1373 *size = (cdb[6] << 16) + (cdb[7] << 8) + cdb[8]; 1374 break; 1375 case REPORT_LUNS: 1376 cmd->execute_cmd = spc_emulate_report_luns; 1377 *size = (cdb[6] << 24) | (cdb[7] << 16) | (cdb[8] << 8) | cdb[9]; 1378 /* 1379 * Do implicit HEAD_OF_QUEUE processing for REPORT_LUNS 1380 * See spc4r17 section 5.3 1381 */ 1382 cmd->sam_task_attr = TCM_HEAD_TAG; 1383 break; 1384 case TEST_UNIT_READY: 1385 cmd->execute_cmd = spc_emulate_testunitready; 1386 *size = 0; 1387 break; 1388 case MAINTENANCE_IN: 1389 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1390 /* 1391 * MAINTENANCE_IN from SCC-2 1392 * Check for emulated MI_REPORT_TARGET_PGS 1393 */ 1394 if ((cdb[1] & 0x1f) == MI_REPORT_TARGET_PGS) { 1395 cmd->execute_cmd = 1396 target_emulate_report_target_port_groups; 1397 } 1398 *size = get_unaligned_be32(&cdb[6]); 1399 } else { 1400 /* 1401 * GPCMD_SEND_KEY from multi media commands 1402 */ 1403 *size = get_unaligned_be16(&cdb[8]); 1404 } 1405 break; 1406 case MAINTENANCE_OUT: 1407 if (dev->transport->get_device_type(dev) != TYPE_ROM) { 1408 /* 1409 * MAINTENANCE_OUT from SCC-2 1410 * Check for emulated MO_SET_TARGET_PGS. 1411 */ 1412 if (cdb[1] == MO_SET_TARGET_PGS) { 1413 cmd->execute_cmd = 1414 target_emulate_set_target_port_groups; 1415 } 1416 *size = get_unaligned_be32(&cdb[6]); 1417 } else { 1418 /* 1419 * GPCMD_SEND_KEY from multi media commands 1420 */ 1421 *size = get_unaligned_be16(&cdb[8]); 1422 } 1423 break; 1424 default: 1425 pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode" 1426 " 0x%02x, sending CHECK_CONDITION.\n", 1427 cmd->se_tfo->get_fabric_name(), cdb[0]); 1428 return TCM_UNSUPPORTED_SCSI_OPCODE; 1429 } 1430 1431 return 0; 1432} 1433EXPORT_SYMBOL(spc_parse_cdb); 1434