"Fossies" - the Fresh Open Source Software Archive  

Source code changes of the file "lib/sg_pt_linux_nvme.c" between
sdparm-1.11.tgz and sdparm-1.12.tgz

About: sdparm let you access SCSI modes pages, read VPD pages, send simple SCSI commands (similar functionality for SCSI disks like "hdparm" for ATA disks).

sg_pt_linux_nvme.c  (sdparm-1.11.tgz):sg_pt_linux_nvme.c  (sdparm-1.12.tgz)
/* /*
* Copyright (c) 2017-2019 Douglas Gilbert. * Copyright (c) 2017-2021 Douglas Gilbert.
* All rights reserved. * All rights reserved.
* Use of this source code is governed by a BSD-style * Use of this source code is governed by a BSD-style
* license that can be found in the BSD_LICENSE file. * license that can be found in the BSD_LICENSE file.
* *
* SPDX-License-Identifier: BSD-2-Clause * SPDX-License-Identifier: BSD-2-Clause
* *
* The code to use the NVMe Management Interface (MI) SES pass-through * The code to use the NVMe Management Interface (MI) SES pass-through
* was provided by WDC in November 2017. * was provided by WDC in November 2017.
*/ */
skipping to change at line 44 skipping to change at line 44
* but WITHOUT ANY WARRANTY; without even the implied warranty of * but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details. * GNU General Public License for more details.
* *
* You should have received a copy of the GNU General Public License * You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software * along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA. * MA 02110-1301, USA.
*/ */
/* sg_pt_linux_nvme version 1.09 20190303 */ /* sg_pt_linux_nvme version 1.15 20210102 */
/* This file contains a small "SPC-only" SNTL to support the SES pass-through /* This file contains a small "SPC-only" SNTL to support the SES pass-through
* of SEND DIAGNOSTIC and RECEIVE DIAGNOSTIC RESULTS through NVME-MI * of SEND DIAGNOSTIC and RECEIVE DIAGNOSTIC RESULTS through NVME-MI
* SES Send and SES Receive. */ * SES Send and SES Receive. */
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
#include <stdarg.h> #include <stdarg.h>
#include <stdbool.h> #include <stdbool.h>
#include <string.h> #include <string.h>
skipping to change at line 88 skipping to change at line 88
#include "sg_unaligned.h" #include "sg_unaligned.h"
#include "sg_pr2serr.h" #include "sg_pr2serr.h"
#define SCSI_INQUIRY_OPC 0x12 #define SCSI_INQUIRY_OPC 0x12
#define SCSI_REPORT_LUNS_OPC 0xa0 #define SCSI_REPORT_LUNS_OPC 0xa0
#define SCSI_TEST_UNIT_READY_OPC 0x0 #define SCSI_TEST_UNIT_READY_OPC 0x0
#define SCSI_REQUEST_SENSE_OPC 0x3 #define SCSI_REQUEST_SENSE_OPC 0x3
#define SCSI_SEND_DIAGNOSTIC_OPC 0x1d #define SCSI_SEND_DIAGNOSTIC_OPC 0x1d
#define SCSI_RECEIVE_DIAGNOSTIC_OPC 0x1c #define SCSI_RECEIVE_DIAGNOSTIC_OPC 0x1c
#define SCSI_MAINT_IN_OPC 0xa3 #define SCSI_MAINT_IN_OPC 0xa3
#define SCSI_READ10_OPC 0x28
#define SCSI_READ16_OPC 0x88
#define SCSI_REP_SUP_OPCS_OPC 0xc #define SCSI_REP_SUP_OPCS_OPC 0xc
#define SCSI_REP_SUP_TMFS_OPC 0xd #define SCSI_REP_SUP_TMFS_OPC 0xd
#define SCSI_MODE_SENSE10_OPC 0x5a #define SCSI_MODE_SENSE10_OPC 0x5a
#define SCSI_MODE_SELECT10_OPC 0x55 #define SCSI_MODE_SELECT10_OPC 0x55
#define SCSI_READ_CAPACITY10_OPC 0x25 #define SCSI_READ_CAPACITY10_OPC 0x25
#define SCSI_START_STOP_OPC 0x1b
#define SCSI_SYNC_CACHE10_OPC 0x35
#define SCSI_SYNC_CACHE16_OPC 0x91
#define SCSI_VERIFY10_OPC 0x2f
#define SCSI_VERIFY16_OPC 0x8f
#define SCSI_WRITE10_OPC 0x2a
#define SCSI_WRITE16_OPC 0x8a
#define SCSI_WRITE_SAME10_OPC 0x41
#define SCSI_WRITE_SAME16_OPC 0x93
#define SCSI_SERVICE_ACT_IN_OPC 0x9e #define SCSI_SERVICE_ACT_IN_OPC 0x9e
#define SCSI_READ_CAPACITY16_SA 0x10 #define SCSI_READ_CAPACITY16_SA 0x10
#define SCSI_SA_MSK 0x1f #define SCSI_SA_MSK 0x1f
/* Additional Sense Code (ASC) */ /* Additional Sense Code (ASC) */
#define NO_ADDITIONAL_SENSE 0x0 #define NO_ADDITIONAL_SENSE 0x0
#define LOGICAL_UNIT_NOT_READY 0x4 #define LOGICAL_UNIT_NOT_READY 0x4
#define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8 #define LOGICAL_UNIT_COMMUNICATION_FAILURE 0x8
#define UNRECOVERED_READ_ERR 0x11 #define UNRECOVERED_READ_ERR 0x11
#define PARAMETER_LIST_LENGTH_ERR 0x1a #define PARAMETER_LIST_LENGTH_ERR 0x1a
skipping to change at line 125 skipping to change at line 136
#define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */ #define BUS_RESET_ASCQ 0x2 /* scsi bus reset occurred */
#define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */ #define MODE_CHANGED_ASCQ 0x1 /* mode parameters changed */
#define CAPACITY_CHANGED_ASCQ 0x9 #define CAPACITY_CHANGED_ASCQ 0x9
#define SAVING_PARAMS_UNSUP 0x39 #define SAVING_PARAMS_UNSUP 0x39
#define TRANSPORT_PROBLEM 0x4b #define TRANSPORT_PROBLEM 0x4b
#define THRESHOLD_EXCEEDED 0x5d #define THRESHOLD_EXCEEDED 0x5d
#define LOW_POWER_COND_ON 0x5e #define LOW_POWER_COND_ON 0x5e
#define MISCOMPARE_VERIFY_ASC 0x1d #define MISCOMPARE_VERIFY_ASC 0x1d
#define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */ #define MICROCODE_CHANGED_ASCQ 0x1 /* with TARGET_CHANGED_ASC */
#define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16 #define MICROCODE_CHANGED_WO_RESET_ASCQ 0x16
#define PCIE_ERR_ASC 0x4b
#define PCIE_UNSUPP_REQ_ASCQ 0x13
/* NVMe Admin commands */
#define SG_NVME_AD_GET_FEATURE 0xa
#define SG_NVME_AD_SET_FEATURE 0x9
#define SG_NVME_AD_IDENTIFY 0x6 /* similar to SCSI INQUIRY */
#define SG_NVME_AD_MI_RECEIVE 0x1e /* MI: Management Interface */
#define SG_NVME_AD_MI_SEND 0x1d /* hmmm, same opcode as SEND DIAG */
/* NVMe NVM (Non-Volatile Memory) commands */
#define SG_NVME_NVM_FLUSH 0x0 /* SCSI SYNCHRONIZE CACHE */
#define SG_NVME_NVM_COMPARE 0x5 /* SCSI VERIFY(BYTCHK=1) */
#define SG_NVME_NVM_READ 0x2
#define SG_NVME_NVM_VERIFY 0xc /* SCSI VERIFY(BYTCHK=0) */
#define SG_NVME_NVM_WRITE 0x1
#define SG_NVME_NVM_WRITE_ZEROES 0x8 /* SCSI WRITE SAME */
#define SG_NVME_NVM_CDW12_FUA (1 << 30) /* Force Unit Access bit */
#if (HAVE_NVME && (! IGNORE_NVME)) #if (HAVE_NVME && (! IGNORE_NVME))
/* This trims given NVMe block device name in Linux (e.g. /dev/nvme0n1p5) /* This trims given NVMe block device name in Linux (e.g. /dev/nvme0n1p5)
* to the name of its associated char device (e.g. /dev/nvme0). If this * to the name of its associated char device (e.g. /dev/nvme0). If this
* occurs true is returned and the char device name is placed in 'b' (as * occurs true is returned and the char device name is placed in 'b' (as
* long as b_len is sufficient). Otherwise false is returned. */ * long as b_len is sufficient). Otherwise false is returned. */
bool bool
sg_get_nvme_char_devname(const char * nvme_block_devname, uint32_t b_len, sg_get_nvme_char_devname(const char * nvme_block_devname, uint32_t b_len,
char * b) char * b)
skipping to change at line 280 skipping to change at line 310
int time_secs, int vb) int time_secs, int vb)
{ {
const uint32_t cmd_len = sizeof(struct sg_nvme_passthru_cmd); const uint32_t cmd_len = sizeof(struct sg_nvme_passthru_cmd);
int res; int res;
uint32_t n; uint32_t n;
uint16_t sct_sc; uint16_t sct_sc;
const uint8_t * up = ((const uint8_t *)cmdp) + SG_NVME_PT_OPCODE; const uint8_t * up = ((const uint8_t *)cmdp) + SG_NVME_PT_OPCODE;
char nam[64]; char nam[64];
if (vb) if (vb)
sg_get_nvme_opcode_name(*up, true, sizeof(nam), nam); sg_get_nvme_opcode_name(*up, true /* ADMIN */, sizeof(nam), nam);
else else
nam[0] = '\0'; nam[0] = '\0';
cmdp->timeout_ms = (time_secs < 0) ? (-time_secs) : (1000 * time_secs); cmdp->timeout_ms = (time_secs < 0) ? (-time_secs) : (1000 * time_secs);
ptp->os_err = 0; ptp->os_err = 0;
if (vb > 2) { if (vb > 2) {
pr2ws("NVMe Admin command: %s\n", nam); pr2ws("NVMe Admin command: %s\n", nam);
hex2stderr((const uint8_t *)cmdp, cmd_len, 1); hex2stderr((const uint8_t *)cmdp, cmd_len, 1);
if ((vb > 3) && (! is_read) && dp) { if ((vb > 3) && (! is_read) && dp) {
uint32_t len = sg_get_unaligned_le32(up + SG_NVME_PT_DATA_LEN); uint32_t len = sg_get_unaligned_le32(up + SG_NVME_PT_DATA_LEN);
skipping to change at line 315 skipping to change at line 345
ptp->os_err = -res; ptp->os_err = -res;
if (vb > 1) { if (vb > 1) {
pr2ws("%s: ioctl for %s [0x%x] failed: %s " pr2ws("%s: ioctl for %s [0x%x] failed: %s "
"(errno=%d)\n", __func__, nam, *up, strerror(-res), -res); "(errno=%d)\n", __func__, nam, *up, strerror(-res), -res);
} }
return res; return res;
} }
/* Now res contains NVMe completion queue CDW3 31:17 (15 bits) */ /* Now res contains NVMe completion queue CDW3 31:17 (15 bits) */
ptp->nvme_result = cmdp->result; ptp->nvme_result = cmdp->result;
if (ptp->nvme_direct && ptp->io_hdr.response && if ((! ptp->nvme_our_sntl) && ptp->io_hdr.response &&
(ptp->io_hdr.max_response_len > 3)) { (ptp->io_hdr.max_response_len > 3)) {
/* build 32 byte "sense" buffer */ /* build 32 byte "sense" buffer */
uint8_t * sbp = (uint8_t *)(sg_uintptr_t)ptp->io_hdr.response; uint8_t * sbp = (uint8_t *)(sg_uintptr_t)ptp->io_hdr.response;
uint16_t st = (uint16_t)res; uint16_t st = (uint16_t)res;
n = ptp->io_hdr.max_response_len; n = ptp->io_hdr.max_response_len;
n = (n < 32) ? n : 32; n = (n < 32) ? n : 32;
memset(sbp, 0 , n); memset(sbp, 0 , n);
ptp->io_hdr.response_len = n; ptp->io_hdr.response_len = n;
sg_put_unaligned_le32(cmdp->result, sg_put_unaligned_le32(cmdp->result,
skipping to change at line 362 skipping to change at line 392
else { else {
pr2ws("\nData-in buffer (first 1024 of %u bytes):\n", n); pr2ws("\nData-in buffer (first 1024 of %u bytes):\n", n);
n = 1024; n = 1024;
} }
hex2stderr((const uint8_t *)dp, n, 0); hex2stderr((const uint8_t *)dp, n, 0);
} }
} }
return 0; return 0;
} }
/* see NVME MI document, NVMSR is NVM Subsystem Report */
static void static void
sntl_check_enclosure_override(struct sg_pt_linux_scsi * ptp, int vb) sntl_check_enclosure_override(struct sg_pt_linux_scsi * ptp, int vb)
{ {
uint8_t * up = ptp->nvme_id_ctlp; uint8_t * up = ptp->nvme_id_ctlp;
uint8_t nvmsr; uint8_t nvmsr;
if (NULL == up) if (NULL == up)
return; return;
nvmsr = up[253]; nvmsr = up[253];
if (vb > 3) if (vb > 3)
skipping to change at line 422 skipping to change at line 453
} }
} }
static int static int
sntl_do_identify(struct sg_pt_linux_scsi * ptp, int cns, int nsid, sntl_do_identify(struct sg_pt_linux_scsi * ptp, int cns, int nsid,
int time_secs, int u_len, uint8_t * up, int vb) int time_secs, int u_len, uint8_t * up, int vb)
{ {
struct sg_nvme_passthru_cmd cmd; struct sg_nvme_passthru_cmd cmd;
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
cmd.opcode = 0x6; /* Identify */ cmd.opcode = SG_NVME_AD_IDENTIFY;
cmd.nsid = nsid; cmd.nsid = nsid;
cmd.cdw10 = cns; cmd.cdw10 = cns;
cmd.addr = (uint64_t)(sg_uintptr_t)up; cmd.addr = (uint64_t)(sg_uintptr_t)up;
cmd.data_len = u_len; cmd.data_len = u_len;
return sg_nvme_admin_cmd(ptp, &cmd, up, true, time_secs, vb); return sg_nvme_admin_cmd(ptp, &cmd, up, true, time_secs, vb);
} }
/* Currently only caches associated identify controller response (4096 bytes). /* Currently only caches associated identify controller response (4096 bytes).
* Returns 0 on success; otherwise a positive value is returned */ * Returns 0 on success; otherwise a positive value is returned */
static int static int
sntl_cache_identity(struct sg_pt_linux_scsi * ptp, int time_secs, int vb) sntl_cache_identify(struct sg_pt_linux_scsi * ptp, int time_secs, int vb)
{ {
int ret; int ret;
uint32_t pg_sz = sg_get_page_size(); uint32_t pg_sz = sg_get_page_size();
uint8_t * up; uint8_t * up;
up = sg_memalign(pg_sz, pg_sz, &ptp->free_nvme_id_ctlp, false); up = sg_memalign(pg_sz, pg_sz, &ptp->free_nvme_id_ctlp, false);
ptp->nvme_id_ctlp = up; ptp->nvme_id_ctlp = up;
if (NULL == up) { if (NULL == up) {
pr2ws("%s: sg_memalign() failed to get memory\n", __func__); pr2ws("%s: sg_memalign() failed to get memory\n", __func__);
return sg_convert_errno(ENOMEM); return sg_convert_errno(ENOMEM);
} }
ret = sntl_do_identify(ptp, 0x1 /* CNS */, 0 /* nsid */, time_secs, ret = sntl_do_identify(ptp, 0x1 /* CNS */, 0 /* nsid */, time_secs,
pg_sz, up, vb); pg_sz, up, vb);
if (0 == ret) if (0 == ret)
sntl_check_enclosure_override(ptp, vb); sntl_check_enclosure_override(ptp, vb);
return (ret < 0) ? sg_convert_errno(-ret) : ret; return (ret < 0) ? sg_convert_errno(-ret) : ret;
} }
/* If nsid==0 then set cmdp->nsid to SG_NVME_BROADCAST_NSID. */
static int
sntl_get_features(struct sg_pt_linux_scsi * ptp, int feature_id, int select,
uint32_t nsid, uint64_t din_addr, int time_secs, int vb)
{
int res;
struct sg_nvme_passthru_cmd cmd;
struct sg_nvme_passthru_cmd * cmdp = &cmd;
if (vb > 4)
pr2ws("%s: feature_id=0x%x, select=%d\n", __func__, feature_id,
select);
memset(cmdp, 0, sizeof(*cmdp));
cmdp->opcode = SG_NVME_AD_GET_FEATURE;
cmdp->nsid = nsid ? nsid : SG_NVME_BROADCAST_NSID;
select &= 0x7;
feature_id &= 0xff;
cmdp->cdw10 = (select << 8) | feature_id;
if (din_addr)
cmdp->addr = din_addr;
cmdp->timeout_ms = (time_secs < 0) ? 0 : (1000 * time_secs);
res = sg_nvme_admin_cmd(ptp, cmdp, NULL, false, time_secs, vb);
if (res)
return res;
ptp->os_err = 0;
ptp->nvme_status = 0;
return 0;
}
static const char * nvme_scsi_vendor_str = "NVMe "; static const char * nvme_scsi_vendor_str = "NVMe ";
static const uint16_t inq_resp_len = 36; static const uint16_t inq_resp_len = 36;
static int static int
sntl_inq(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp, int time_secs, sntl_inq(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp, int time_secs,
int vb) int vb)
{ {
bool evpd; bool evpd;
bool cp_id_ctl = false; bool cp_id_ctl = false;
int res; int res;
skipping to change at line 476 skipping to change at line 536
uint8_t inq_dout[256]; uint8_t inq_dout[256];
if (vb > 3) if (vb > 3)
pr2ws("%s: time_secs=%d\n", __func__, time_secs); pr2ws("%s: time_secs=%d\n", __func__, time_secs);
if (0x2 & cdbp[1]) { /* Reject CmdDt=1 */ if (0x2 & cdbp[1]) { /* Reject CmdDt=1 */
mk_sense_invalid_fld(ptp, true, 1, 1, vb); mk_sense_invalid_fld(ptp, true, 1, 1, vb);
return 0; return 0;
} }
if (NULL == ptp->nvme_id_ctlp) { if (NULL == ptp->nvme_id_ctlp) {
res = sntl_cache_identity(ptp, time_secs, vb); res = sntl_cache_identify(ptp, time_secs, vb);
if (SG_LIB_NVME_STATUS == res) { if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb); mk_sense_from_nvme_status(ptp, vb);
return 0; return 0;
} else if (res) /* should be negative errno */ } else if (res) /* should be negative errno */
return res; return res;
} }
memset(inq_dout, 0, sizeof(inq_dout)); memset(inq_dout, 0, sizeof(inq_dout));
alloc_len = sg_get_unaligned_be16(cdbp + 3); alloc_len = sg_get_unaligned_be16(cdbp + 3);
evpd = !!(0x1 & cdbp[1]); evpd = !!(0x1 & cdbp[1]);
pg_cd = cdbp[2]; pg_cd = cdbp[2];
skipping to change at line 586 skipping to change at line 646
memcpy(dp, inq_dout, (n < 16 ? n : 16)); memcpy(dp, inq_dout, (n < 16 ? n : 16));
if (n > 16) if (n > 16)
memcpy(dp + 16, ptp->nvme_id_ctlp, n - 16); memcpy(dp + 16, ptp->nvme_id_ctlp, n - 16);
} else } else
memcpy(dp, inq_dout, n); memcpy(dp, inq_dout, n);
} }
} }
} else { /* Standard INQUIRY response */ } else { /* Standard INQUIRY response */
/* pdt=0 --> disk; pdt=0xd --> SES; pdt=3 --> processor (safte) */ /* pdt=0 --> disk; pdt=0xd --> SES; pdt=3 --> processor (safte) */
inq_dout[0] = (0x1f & ptp->dev_stat.pdt); /* (PQ=0)<<5 */ inq_dout[0] = (0x1f & ptp->dev_stat.pdt); /* (PQ=0)<<5 */
/* inq_dout[1] = (RMD=0)<<7 | (LU_CONG=0)<<6; rest reserved */ /* inq_dout[1] = (RMD=0)<<7 | (LU_CONG=0)<<6 | (HOT_PLUG=0)<<4; */
inq_dout[2] = 6; /* version: SPC-4 */ inq_dout[2] = 6; /* version: SPC-4 */
inq_dout[3] = 2; /* NORMACA=0, HISUP=0, response data format: 2 */ inq_dout[3] = 2; /* NORMACA=0, HISUP=0, response data format: 2 */
inq_dout[4] = 31; /* so response length is (or could be) 36 bytes */ inq_dout[4] = 31; /* so response length is (or could be) 36 bytes */
inq_dout[6] = ptp->dev_stat.enc_serv ? 0x40 : 0; inq_dout[6] = ptp->dev_stat.enc_serv ? 0x40 : 0;
inq_dout[7] = 0x2; /* CMDQUE=1 */ inq_dout[7] = 0x2; /* CMDQUE=1 */
memcpy(inq_dout + 8, nvme_scsi_vendor_str, 8); /* NVMe not Intel */ memcpy(inq_dout + 8, nvme_scsi_vendor_str, 8); /* NVMe not Intel */
memcpy(inq_dout + 16, ptp->nvme_id_ctlp + 24, 16); /* Prod <-- MN */ memcpy(inq_dout + 16, ptp->nvme_id_ctlp + 24, 16); /* Prod <-- MN */
memcpy(inq_dout + 32, ptp->nvme_id_ctlp + 64, 4); /* Rev <-- FR */ memcpy(inq_dout + 32, ptp->nvme_id_ctlp + 64, 4); /* Rev <-- FR */
if (alloc_len > 0) { if (alloc_len > 0) {
n = (alloc_len < inq_resp_len) ? alloc_len : inq_resp_len; n = (alloc_len < inq_resp_len) ? alloc_len : inq_resp_len;
skipping to change at line 623 skipping to change at line 683
uint32_t alloc_len, k, n, num, max_nsid; uint32_t alloc_len, k, n, num, max_nsid;
uint8_t * rl_doutp; uint8_t * rl_doutp;
uint8_t * up; uint8_t * up;
if (vb > 3) if (vb > 3)
pr2ws("%s: time_secs=%d\n", __func__, time_secs); pr2ws("%s: time_secs=%d\n", __func__, time_secs);
sel_report = cdbp[2]; sel_report = cdbp[2];
alloc_len = sg_get_unaligned_be32(cdbp + 6); alloc_len = sg_get_unaligned_be32(cdbp + 6);
if (NULL == ptp->nvme_id_ctlp) { if (NULL == ptp->nvme_id_ctlp) {
res = sntl_cache_identity(ptp, time_secs, vb); res = sntl_cache_identify(ptp, time_secs, vb);
if (SG_LIB_NVME_STATUS == res) { if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb); mk_sense_from_nvme_status(ptp, vb);
return 0; return 0;
} else if (res) } else if (res)
return res; return res;
} }
max_nsid = sg_get_unaligned_le32(ptp->nvme_id_ctlp + 516); max_nsid = sg_get_unaligned_le32(ptp->nvme_id_ctlp + 516);
switch (sel_report) { switch (sel_report) {
case 0: case 0:
case 2: case 2:
skipping to change at line 679 skipping to change at line 739
res = 0; res = 0;
free(rl_doutp); free(rl_doutp);
return res; return res;
} }
static int static int
sntl_tur(struct sg_pt_linux_scsi * ptp, int time_secs, int vb) sntl_tur(struct sg_pt_linux_scsi * ptp, int time_secs, int vb)
{ {
int res; int res;
uint32_t pow_state; uint32_t pow_state;
struct sg_nvme_passthru_cmd cmd;
if (vb > 4) if (vb > 4)
pr2ws("%s: time_secs=%d\n", __func__, time_secs); pr2ws("%s: start\n", __func__);
if (NULL == ptp->nvme_id_ctlp) { if (NULL == ptp->nvme_id_ctlp) {
res = sntl_cache_identity(ptp, time_secs, vb); res = sntl_cache_identify(ptp, time_secs, vb);
if (SG_LIB_NVME_STATUS == res) { if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb); mk_sense_from_nvme_status(ptp, vb);
return 0; return 0;
} else if (res) } else if (res)
return res; return res;
} }
memset(&cmd, 0, sizeof(cmd)); res = sntl_get_features(ptp, 2 /* Power Management */, 0 /* current */,
cmd.opcode = 0xa; /* Get feature */ 0, 0, time_secs, vb);
cmd.nsid = SG_NVME_BROADCAST_NSID;
cmd.cdw10 = 0x2; /* SEL=0 (current), Feature=2 Power Management */
cmd.timeout_ms = (time_secs < 0) ? 0 : (1000 * time_secs);
res = sg_nvme_admin_cmd(ptp, &cmd, NULL, false, time_secs, vb);
if (0 != res) { if (0 != res) {
if (SG_LIB_NVME_STATUS == res) { if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb); mk_sense_from_nvme_status(ptp, vb);
return 0; return 0;
} else } else
return res; return res;
} else {
ptp->os_err = 0;
ptp->nvme_status = 0;
} }
pow_state = (0x1f & ptp->nvme_result); pow_state = (0x1f & ptp->nvme_result);
if (vb > 3) if (vb > 3)
pr2ws("%s: pow_state=%u\n", __func__, pow_state); pr2ws("%s: pow_state=%u\n", __func__, pow_state);
#if 0 /* pow_state bounces around too much on laptop */ #if 0 /* pow_state bounces around too much on laptop */
if (pow_state) if (pow_state)
mk_sense_asc_ascq(ptp, SPC_SK_NOT_READY, LOW_POWER_COND_ON_ASC, 0, mk_sense_asc_ascq(ptp, SPC_SK_NOT_READY, LOW_POWER_COND_ON_ASC, 0,
vb); vb);
#endif #endif
return 0; return 0;
} }
static int static int
sntl_req_sense(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp, sntl_req_sense(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
int time_secs, int vb) int time_secs, int vb)
{ {
bool desc; bool desc;
int res; int res;
uint32_t pow_state, alloc_len, n; uint32_t pow_state, alloc_len, n;
struct sg_nvme_passthru_cmd cmd;
uint8_t rs_dout[64]; uint8_t rs_dout[64];
if (vb > 3) if (vb > 3)
pr2ws("%s: time_secs=%d\n", __func__, time_secs); pr2ws("%s: time_secs=%d\n", __func__, time_secs);
if (NULL == ptp->nvme_id_ctlp) { if (NULL == ptp->nvme_id_ctlp) {
res = sntl_cache_identity(ptp, time_secs, vb); res = sntl_cache_identify(ptp, time_secs, vb);
if (SG_LIB_NVME_STATUS == res) { if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb); mk_sense_from_nvme_status(ptp, vb);
return 0; return 0;
} else if (res) } else if (res)
return res; return res;
} }
desc = !!(0x1 & cdbp[1]); desc = !!(0x1 & cdbp[1]);
alloc_len = cdbp[4]; alloc_len = cdbp[4];
memset(&cmd, 0, sizeof(cmd)); res = sntl_get_features(ptp, 0x2 /* Power Management */, 0 /* current */,
cmd.opcode = 0xa; /* Get feature */ 0, 0, time_secs, vb);
cmd.nsid = SG_NVME_BROADCAST_NSID;
cmd.cdw10 = 0x2; /* SEL=0 (current), Feature=2 Power Management */
cmd.timeout_ms = (time_secs < 0) ? 0 : (1000 * time_secs);
res = sg_nvme_admin_cmd(ptp, &cmd, NULL, false, time_secs, vb);
if (0 != res) { if (0 != res) {
if (SG_LIB_NVME_STATUS == res) { if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb); mk_sense_from_nvme_status(ptp, vb);
return 0; return 0;
} else } else
return res; return res;
} else {
ptp->os_err = 0;
ptp->nvme_status = 0;
} }
ptp->io_hdr.response_len = 0; ptp->io_hdr.response_len = 0;
pow_state = (0x1f & ptp->nvme_result); pow_state = (0x1f & ptp->nvme_result);
if (vb > 3) if (vb > 3)
pr2ws("%s: pow_state=%u\n", __func__, pow_state); pr2ws("%s: pow_state=%u\n", __func__, pow_state);
memset(rs_dout, 0, sizeof(rs_dout)); memset(rs_dout, 0, sizeof(rs_dout));
if (pow_state) if (pow_state)
sg_build_sense_buffer(desc, rs_dout, SPC_SK_NO_SENSE, sg_build_sense_buffer(desc, rs_dout, SPC_SK_NO_SENSE,
LOW_POWER_COND_ON_ASC, 0); LOW_POWER_COND_ON_ASC, 0);
else else
skipping to change at line 776 skipping to change at line 820
NO_ADDITIONAL_SENSE, 0); NO_ADDITIONAL_SENSE, 0);
n = desc ? 8 : 18; n = desc ? 8 : 18;
n = (n < alloc_len) ? n : alloc_len; n = (n < alloc_len) ? n : alloc_len;
n = (n < ptp->io_hdr.din_xfer_len) ? n : ptp->io_hdr.din_xfer_len; n = (n < ptp->io_hdr.din_xfer_len) ? n : ptp->io_hdr.din_xfer_len;
ptp->io_hdr.din_resid = ptp->io_hdr.din_xfer_len - n; ptp->io_hdr.din_resid = ptp->io_hdr.din_xfer_len - n;
if (n > 0) if (n > 0)
memcpy((uint8_t *)(sg_uintptr_t)ptp->io_hdr.din_xferp, rs_dout, n); memcpy((uint8_t *)(sg_uintptr_t)ptp->io_hdr.din_xferp, rs_dout, n);
return 0; return 0;
} }
static uint8_t pc_t10_2_select[] = {0, 3, 1, 2};
/* For MODE SENSE(10) and MODE SELECT(10). 6 byte variants not supported */
static int static int
sntl_mode_ss(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp, sntl_mode_ss(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
int time_secs, int vb) int time_secs, int vb)
{ {
bool is_msense = (SCSI_MODE_SENSE10_OPC == cdbp[0]); bool is_msense = (SCSI_MODE_SENSE10_OPC == cdbp[0]);
int res, n, len; int res, n, len;
uint8_t * bp; uint8_t * bp;
struct sg_sntl_result_t sntl_result; struct sg_sntl_result_t sntl_result;
if (vb > 3) if (vb > 3)
pr2ws("%s: mse%s, time_secs=%d\n", __func__, pr2ws("%s: mode se%s\n", __func__, (is_msense ? "nse" : "lect"));
(is_msense ? "nse" : "lect"), time_secs);
if (NULL == ptp->nvme_id_ctlp) { if (NULL == ptp->nvme_id_ctlp) {
res = sntl_cache_identity(ptp, time_secs, vb); res = sntl_cache_identify(ptp, time_secs, vb);
if (SG_LIB_NVME_STATUS == res) { if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb); mk_sense_from_nvme_status(ptp, vb);
return 0; return 0;
} else if (res) } else if (res)
return res; return res;
} }
if (is_msense) { /* MODE SENSE(10) */ if (is_msense) { /* MODE SENSE(10) */
uint8_t pc_t10 = (cdbp[2] >> 6) & 0x3;
int mp_t10 = (cdbp[2] & 0x3f);
if ((0x3f == mp_t10) || (0x8 /* caching mpage */ == mp_t10)) {
/* 0x6 is "Volatile write cache" feature id */
res = sntl_get_features(ptp, 0x6, pc_t10_2_select[pc_t10], 0,
0, time_secs, vb);
if (0 != res) {
if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb);
return 0;
} else
return res;
}
ptp->dev_stat.wce = !!(0x1 & ptp->nvme_result);
}
len = ptp->io_hdr.din_xfer_len; len = ptp->io_hdr.din_xfer_len;
bp = (uint8_t *)(sg_uintptr_t)ptp->io_hdr.din_xferp; bp = (uint8_t *)(sg_uintptr_t)ptp->io_hdr.din_xferp;
n = sntl_resp_mode_sense10(&ptp->dev_stat, cdbp, bp, len, n = sntl_resp_mode_sense10(&ptp->dev_stat, cdbp, bp, len,
&sntl_result); &sntl_result);
ptp->io_hdr.din_resid = (n >= 0) ? len - n : len; ptp->io_hdr.din_resid = (n >= 0) ? len - n : len;
} else { /* MODE SELECT(10) */ } else { /* MODE SELECT(10) */
bool sp = !!(0x1 & cdbp[1]); /* Save Page indication */
uint8_t pre_enc_ov = ptp->dev_stat.enclosure_override; uint8_t pre_enc_ov = ptp->dev_stat.enclosure_override;
len = ptp->io_hdr.dout_xfer_len; len = ptp->io_hdr.dout_xfer_len;
bp = (uint8_t *)(sg_uintptr_t)ptp->io_hdr.dout_xferp; bp = (uint8_t *)(sg_uintptr_t)ptp->io_hdr.dout_xferp;
ptp->dev_stat.wce_changed = false;
n = sntl_resp_mode_select10(&ptp->dev_stat, cdbp, bp, len, n = sntl_resp_mode_select10(&ptp->dev_stat, cdbp, bp, len,
&sntl_result); &sntl_result);
if (ptp->dev_stat.wce_changed) {
uint32_t nsid = ptp->nvme_nsid;
struct sg_nvme_passthru_cmd cmd;
struct sg_nvme_passthru_cmd * cmdp = &cmd;
ptp->dev_stat.wce_changed = false;
memset(cmdp, 0, sizeof(*cmdp));
cmdp->opcode = SG_NVME_AD_SET_FEATURE;
cmdp->nsid = nsid ? nsid : SG_NVME_BROADCAST_NSID;
cmdp->cdw10 = 0x6; /* "Volatile write cache" feature id */
if (sp)
cmdp->cdw10 |= (1 << 31);
cmdp->cdw11 = (uint32_t)ptp->dev_stat.wce;
cmdp->timeout_ms = (time_secs < 0) ? 0 : (1000 * time_secs);
res = sg_nvme_admin_cmd(ptp, cmdp, NULL, false, time_secs, vb);
if (0 != res) {
if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb);
return 0;
} else
return res;
}
ptp->os_err = 0;
ptp->nvme_status = 0;
}
if (pre_enc_ov != ptp->dev_stat.enclosure_override) if (pre_enc_ov != ptp->dev_stat.enclosure_override)
sntl_check_enclosure_override(ptp, vb); /* ENC_OV has changed */ sntl_check_enclosure_override(ptp, vb); /* ENC_OV has changed */
} }
if (n < 0) { if (n < 0) {
int in_bit = (255 == sntl_result.in_bit) ? (int)sntl_result.in_bit : int in_bit = (255 == sntl_result.in_bit) ? (int)sntl_result.in_bit :
-1; -1;
if ((SAM_STAT_CHECK_CONDITION == sntl_result.sstatus) && if ((SAM_STAT_CHECK_CONDITION == sntl_result.sstatus) &&
(SPC_SK_ILLEGAL_REQUEST == sntl_result.sk)) { (SPC_SK_ILLEGAL_REQUEST == sntl_result.sk)) {
if (INVALID_FIELD_IN_CDB == sntl_result.asc) if (INVALID_FIELD_IN_CDB == sntl_result.asc)
mk_sense_invalid_fld(ptp, true, sntl_result.in_byte, in_bit, mk_sense_invalid_fld(ptp, true, sntl_result.in_byte, in_bit,
skipping to change at line 931 skipping to change at line 1020
} }
dpg_cd = dop[0]; dpg_cd = dop[0];
dpg_len = sg_get_unaligned_be16(dop + 2) + 4; dpg_len = sg_get_unaligned_be16(dop + 2) + 4;
/* should we allow for more than one D_PG is dout ?? */ /* should we allow for more than one D_PG is dout ?? */
n = (n < dpg_len) ? n : dpg_len; /* not yet ... */ n = (n < dpg_len) ? n : dpg_len; /* not yet ... */
if (vb) if (vb)
pr2ws("%s: passing through d_pg=0x%x, len=%u to NVME_MI SES send\n", pr2ws("%s: passing through d_pg=0x%x, len=%u to NVME_MI SES send\n",
__func__, dpg_cd, dpg_len); __func__, dpg_cd, dpg_len);
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
cmd.opcode = 0x1d; /* MI send; hmmm same opcode as SEND DIAG */ cmd.opcode = SG_NVME_AD_MI_SEND;
cmd.addr = (uint64_t)(sg_uintptr_t)dop; cmd.addr = (uint64_t)(sg_uintptr_t)dop;
cmd.data_len = 0x1000; /* NVMe 4k page size. Maybe determine this? */ cmd.data_len = 0x1000; /* NVMe 4k page size. Maybe determine this? */
/* dout_len > 0x1000, is this a problem?? */ /* dout_len > 0x1000, is this a problem?? */
cmd.cdw10 = 0x0804; /* NVMe Message Header */ cmd.cdw10 = 0x0804; /* NVMe Message Header */
cmd.cdw11 = 0x9; /* nvme_mi_ses_send; (0x8 -> mi_ses_recv) */ cmd.cdw11 = 0x9; /* nvme_mi_ses_send; (0x8 -> mi_ses_recv) */
cmd.cdw13 = n; cmd.cdw13 = n;
res = sg_nvme_admin_cmd(ptp, &cmd, dop, false, time_secs, vb); res = sg_nvme_admin_cmd(ptp, &cmd, dop, false, time_secs, vb);
if (0 != res) { if (0 != res) {
if (SG_LIB_NVME_STATUS == res) { if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb); mk_sense_from_nvme_status(ptp, vb);
skipping to change at line 986 skipping to change at line 1075
if (vb) if (vb)
pr2ws("%s: din [0x%" PRIx64 "] not page aligned\n", __func__, pr2ws("%s: din [0x%" PRIx64 "] not page aligned\n", __func__,
(uint64_t)ptp->io_hdr.din_xferp); (uint64_t)ptp->io_hdr.din_xferp);
return SCSI_PT_DO_BAD_PARAMS; return SCSI_PT_DO_BAD_PARAMS;
} }
if (vb) if (vb)
pr2ws("%s: expecting d_pg=0x%x from NVME_MI SES receive\n", __func__, pr2ws("%s: expecting d_pg=0x%x from NVME_MI SES receive\n", __func__,
dpg_cd); dpg_cd);
memset(&cmd, 0, sizeof(cmd)); memset(&cmd, 0, sizeof(cmd));
cmd.opcode = 0x1e; /* MI receive */ cmd.opcode = SG_NVME_AD_MI_RECEIVE;
cmd.addr = (uint64_t)(sg_uintptr_t)dip; cmd.addr = (uint64_t)(sg_uintptr_t)dip;
cmd.data_len = 0x1000; /* NVMe 4k page size. Maybe determine this? */ cmd.data_len = 0x1000; /* NVMe 4k page size. Maybe determine this? */
/* din_len > 0x1000, is this a problem?? */ /* din_len > 0x1000, is this a problem?? */
cmd.cdw10 = 0x0804; /* NVMe Message Header */ cmd.cdw10 = 0x0804; /* NVMe Message Header */
cmd.cdw11 = 0x8; /* nvme_mi_ses_receive */ cmd.cdw11 = 0x8; /* nvme_mi_ses_receive */
cmd.cdw12 = dpg_cd; cmd.cdw12 = dpg_cd;
cmd.cdw13 = n; cmd.cdw13 = n;
res = sg_nvme_admin_cmd(ptp, &cmd, dip, true, time_secs, vb); res = sg_nvme_admin_cmd(ptp, &cmd, dip, true, time_secs, vb);
if (0 != res) { if (0 != res) {
if (SG_LIB_NVME_STATUS == res) { if (SG_LIB_NVME_STATUS == res) {
skipping to change at line 1163 skipping to change at line 1252
if (len > 0) if (len > 0)
memcpy((uint8_t *)(sg_uintptr_t)ptp->io_hdr.din_xferp, arr, len); memcpy((uint8_t *)(sg_uintptr_t)ptp->io_hdr.din_xferp, arr, len);
return 0; return 0;
} }
/* Note that the "Returned logical block address" (RLBA) field in the SCSI /* Note that the "Returned logical block address" (RLBA) field in the SCSI
* READ CAPACITY (10+16) command's response provides the address of the _last_ * READ CAPACITY (10+16) command's response provides the address of the _last_
* LBA (counting origin 0) which will be one less that the "size" in the * LBA (counting origin 0) which will be one less that the "size" in the
* NVMe Identify command response's NSZE field. One problem is that in * NVMe Identify command response's NSZE field. One problem is that in
* some situations NSZE can be zero: temporarily set RLBA field to 0 * some situations NSZE can be zero: temporarily set RLBA field to 0
* (implying a 1 LB logical units size) pending further research. */ * (implying a 1 LB logical units size) pending further research. The LBLIB
* is the "Logical Block Length In Bytes" field in the RCAP response. */
static int static int
sntl_readcap(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp, sntl_readcap(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
int time_secs, int vb) int time_secs, int vb)
{ {
bool is_rcap10 = (SCSI_READ_CAPACITY10_OPC == cdbp[0]); bool is_rcap10 = (SCSI_READ_CAPACITY10_OPC == cdbp[0]);
int res, n, len, alloc_len, dps; int res, n, len, alloc_len, dps;
uint8_t flbas, index, lbads; uint8_t flbas, index, lbads; /* NVMe: 2**LBADS --> Logical Block size */
uint32_t lbafx; /* "x" is 0 to 15 in NVMe spec */ uint32_t lbafx; /* NVME: LBAF0...LBAF15, each 16 bytes */
uint32_t pg_sz = sg_get_page_size(); uint32_t pg_sz = sg_get_page_size();
uint64_t nsze; uint64_t nsze;
uint8_t * bp; uint8_t * bp;
uint8_t * up; uint8_t * up;
uint8_t * free_up = NULL; uint8_t * free_up = NULL;
uint8_t resp[32]; uint8_t resp[32];
if (vb > 3) if (vb > 3)
pr2ws("%s: RCAP%d, time_secs=%d\n", __func__, pr2ws("%s: RCAP%d, time_secs=%d\n", __func__,
(is_rcap10 ? 10 : 16), time_secs); (is_rcap10 ? 10 : 16), time_secs);
skipping to change at line 1193 skipping to change at line 1283
if (NULL == up) { if (NULL == up) {
pr2ws("%s: sg_memalign() failed to get memory\n", __func__); pr2ws("%s: sg_memalign() failed to get memory\n", __func__);
return sg_convert_errno(ENOMEM); return sg_convert_errno(ENOMEM);
} }
res = sntl_do_identify(ptp, 0x0 /* CNS */, ptp->nvme_nsid, time_secs, res = sntl_do_identify(ptp, 0x0 /* CNS */, ptp->nvme_nsid, time_secs,
pg_sz, up, vb); pg_sz, up, vb);
if (res < 0) { if (res < 0) {
res = sg_convert_errno(-res); res = sg_convert_errno(-res);
goto fini; goto fini;
} }
memset(resp, 0, sizeof(*resp)); memset(resp, 0, sizeof(resp));
nsze = sg_get_unaligned_le64(up + 0); nsze = sg_get_unaligned_le64(up + 0);
flbas = up[26]; flbas = up[26]; /* NVME FLBAS field from Identify, want LBAF[flbas] */
index = 128 + (4 * (flbas & 0xf)); index = 128 + (4 * (flbas & 0xf));
lbafx = sg_get_unaligned_le32(up + index); lbafx = sg_get_unaligned_le32(up + index);
lbads = (lbafx >> 16) & 0xff; /* bits 16 to 23 inclusive, pow2 */ lbads = (lbafx >> 16) & 0xff; /* bits 16 to 23 inclusive, pow2 */
if (is_rcap10) { if (is_rcap10) {
alloc_len = 8; /* implicit, not in cdb */ alloc_len = 8; /* implicit, not in cdb */
if (nsze > 0xffffffff) if (nsze > 0xffffffff)
sg_put_unaligned_be32(0xffffffff, resp + 0); sg_put_unaligned_be32(0xffffffff, resp + 0);
else if (0 == nsze) /* no good answer here */ else if (0 == nsze) /* no good answer here */
sg_put_unaligned_be32(0, resp + 0); sg_put_unaligned_be32(0, resp + 0); /* SCSI RLBA field */
else else
sg_put_unaligned_be32((uint32_t)(nsze - 1), resp + 0); sg_put_unaligned_be32((uint32_t)(nsze - 1), resp + 0);
sg_put_unaligned_be32(1 << lbads, resp + 4); /* RLBA field */ sg_put_unaligned_be32(1 << lbads, resp + 4); /* SCSI LBLIB field */
} else { } else {
alloc_len = sg_get_unaligned_be32(cdbp + 10); alloc_len = sg_get_unaligned_be32(cdbp + 10);
dps = up[29]; dps = up[29];
if (0x7 & dps) { if (0x7 & dps) {
resp[12] = 0x1; resp[12] = 0x1;
n = (0x7 & dps) - 1; n = (0x7 & dps) - 1;
if (n > 0) if (n > 0)
resp[12] |= (n + n); resp[12] |= (n + n);
} }
if (0 == nsze) /* no good answer here */ if (0 == nsze) /* no good answer here */
sg_put_unaligned_be64(0, resp + 0); sg_put_unaligned_be64(0, resp + 0);
else else
sg_put_unaligned_be64(nsze - 1, resp + 0); sg_put_unaligned_be64(nsze - 1, resp + 0);
sg_put_unaligned_be32(1 << lbads, resp + 8); /* RLBA field */ sg_put_unaligned_be32(1 << lbads, resp + 8); /* SCSI LBLIB field */
} }
len = ptp->io_hdr.din_xfer_len; len = ptp->io_hdr.din_xfer_len;
bp = (uint8_t *)(sg_uintptr_t)ptp->io_hdr.din_xferp; bp = (uint8_t *)(sg_uintptr_t)ptp->io_hdr.din_xferp;
n = 16; n = 32;
n = (n < alloc_len) ? n : alloc_len; n = (n < alloc_len) ? n : alloc_len;
n = (n < len) ? n : len; n = (n < len) ? n : len;
ptp->io_hdr.din_resid = len - n; ptp->io_hdr.din_resid = len - n;
if (n > 0) if (n > 0)
memcpy(bp, resp, n); memcpy(bp, resp, n);
fini: fini:
if (free_up) if (free_up)
free(free_up); free(free_up);
return res; return res;
} }
static int
do_nvm_pt_low(struct sg_pt_linux_scsi * ptp,
struct sg_nvme_passthru_cmd *cmdp, void * dp, int dlen,
bool is_read, int time_secs, int vb)
{
const uint32_t cmd_len = sizeof(struct sg_nvme_passthru_cmd);
int res;
uint32_t n;
uint16_t sct_sc;
const uint8_t * up = ((const uint8_t *)cmdp) + SG_NVME_PT_OPCODE;
char nam[64];
if (vb)
sg_get_nvme_opcode_name(*up, false /* NVM */ , sizeof(nam), nam);
else
nam[0] = '\0';
cmdp->timeout_ms = (time_secs < 0) ? (-time_secs) : (1000 * time_secs);
ptp->os_err = 0;
if (vb > 2) {
pr2ws("NVMe NVM command: %s\n", nam);
hex2stderr((const uint8_t *)cmdp, cmd_len, 1);
if ((vb > 3) && (! is_read) && dp) {
if (dlen > 0) {
n = dlen;
if ((dlen < 512) || (vb > 5))
pr2ws("\nData-out buffer (%u bytes):\n", n);
else {
pr2ws("\nData-out buffer (first 512 of %u bytes):\n", n);
n = 512;
}
hex2stderr((const uint8_t *)dp, n, 0);
}
}
}
res = ioctl(ptp->dev_fd, NVME_IOCTL_IO_CMD, cmdp);
if (res < 0) { /* OS error (errno negated) */
ptp->os_err = -res;
if (vb > 1) {
pr2ws("%s: ioctl for %s [0x%x] failed: %s "
"(errno=%d)\n", __func__, nam, *up, strerror(-res), -res);
}
return res;
}
/* Now res contains NVMe completion queue CDW3 31:17 (15 bits) */
ptp->nvme_result = cmdp->result;
if ((! ptp->nvme_our_sntl) && ptp->io_hdr.response &&
(ptp->io_hdr.max_response_len > 3)) {
/* build 32 byte "sense" buffer */
uint8_t * sbp = (uint8_t *)(sg_uintptr_t)ptp->io_hdr.response;
uint16_t st = (uint16_t)res;
n = ptp->io_hdr.max_response_len;
n = (n < 32) ? n : 32;
memset(sbp, 0 , n);
ptp->io_hdr.response_len = n;
sg_put_unaligned_le32(cmdp->result,
sbp + SG_NVME_PT_CQ_RESULT);
if (n > 15) /* LSBit will be 0 (Phase bit) after (st << 1) */
sg_put_unaligned_le16(st << 1, sbp + SG_NVME_PT_CQ_STATUS_P);
}
/* clear upper bits (DNR and More) leaving ((SCT << 8) | SC) */
sct_sc = 0x7ff & res; /* 11 bits */
ptp->nvme_status = sct_sc;
ptp->nvme_stat_dnr = !!(0x4000 & res);
ptp->nvme_stat_more = !!(0x2000 & res);
if (sct_sc) { /* when non-zero, treat as command error */
if (vb > 1) {
char b[80];
pr2ws("%s: ioctl for %s [0x%x] failed, status: %s [0x%x]\n",
__func__, nam, *up,
sg_get_nvme_cmd_status_str(sct_sc, sizeof(b), b), sct_sc);
}
return SG_LIB_NVME_STATUS; /* == SCSI_PT_DO_NVME_STATUS */
}
if ((vb > 3) && is_read && dp) {
if (dlen > 0) {
n = dlen;
if ((dlen < 1024) || (vb > 5))
pr2ws("\nData-in buffer (%u bytes):\n", n);
else {
pr2ws("\nData-in buffer (first 1024 of %u bytes):\n", n);
n = 1024;
}
hex2stderr((const uint8_t *)dp, n, 0);
}
}
return 0;
}
/* Since ptp can be a char device (e.g. /dev/nvme0) or a blocks device
* (e.g. /dev/nvme0n1 or /dev/nvme0n1p3) use NVME_IOCTL_IO_CMD which is
* common to both (and takes a timeout). The difficult is that
* NVME_IOCTL_IO_CMD takes a nvme_passthru_cmd object point. */
static int
sntl_do_nvm_cmd(struct sg_pt_linux_scsi * ptp, struct sg_nvme_user_io * iop,
uint32_t dlen, bool is_read, int time_secs, int vb)
{
struct sg_nvme_passthru_cmd nvme_pt_cmd;
struct sg_nvme_passthru_cmd *cmdp = &nvme_pt_cmd;
void * dp = (void *)(sg_uintptr_t)iop->addr;
memset(cmdp, 0, sizeof(*cmdp));
cmdp->opcode = iop->opcode;
cmdp->flags = iop->flags;
cmdp->nsid = ptp->nvme_nsid;
cmdp->addr = iop->addr;
cmdp->data_len = dlen;
cmdp->cdw10 = iop->slba & 0xffffffff;
cmdp->cdw11 = (iop->slba >> 32) & 0xffffffff;
cmdp->cdw12 = iop->nblocks; /* lower 16 bits already "0's based" count */
return do_nvm_pt_low(ptp, cmdp, dp, dlen, is_read, time_secs, vb);
}
static int
sntl_read(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
int time_secs, int vb)
{
bool is_read10 = (SCSI_READ10_OPC == cdbp[0]);
bool have_fua = !!(cdbp[1] & 0x8);
int res;
int nblks_t10 = 0;
struct sg_nvme_user_io io;
struct sg_nvme_user_io * iop = &io;
if (vb > 3)
pr2ws("%s: fua=%d, time_secs=%d\n", __func__, (int)have_fua,
time_secs);
memset(iop, 0, sizeof(*iop));
iop->opcode = SG_NVME_NVM_READ;
if (is_read10) {
iop->slba = sg_get_unaligned_be32(cdbp + 2);
nblks_t10 = sg_get_unaligned_be16(cdbp + 7);
} else {
uint32_t num = sg_get_unaligned_be32(cdbp + 10);
iop->slba = sg_get_unaligned_be64(cdbp + 2);
if (num > (UINT16_MAX + 1)) {
mk_sense_invalid_fld(ptp, true, 11, -1, vb);
return 0;
} else
nblks_t10 = num;
}
if (0 == nblks_t10) { /* NOP in SCSI */
if (vb > 4)
pr2ws("%s: nblks_t10 is 0, a NOP in SCSI, can't map to NVMe\n",
__func__);
return 0;
}
iop->nblocks = nblks_t10 - 1; /* crazy "0's based" */
if (have_fua)
iop->nblocks |= SG_NVME_NVM_CDW12_FUA;
iop->addr = (uint64_t)ptp->io_hdr.din_xferp;
res = sntl_do_nvm_cmd(ptp, iop, ptp->io_hdr.din_xfer_len,
true /* is_read */, time_secs, vb);
if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb);
return 0;
}
return res;
}
static int
sntl_write(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
int time_secs, int vb)
{
bool is_write10 = (SCSI_WRITE10_OPC == cdbp[0]);
bool have_fua = !!(cdbp[1] & 0x8);
int res;
int nblks_t10 = 0;
struct sg_nvme_user_io io;
struct sg_nvme_user_io * iop = &io;
if (vb > 3)
pr2ws("%s: fua=%d, time_secs=%d\n", __func__, (int)have_fua,
time_secs);
memset(iop, 0, sizeof(*iop));
iop->opcode = SG_NVME_NVM_WRITE;
if (is_write10) {
iop->slba = sg_get_unaligned_be32(cdbp + 2);
nblks_t10 = sg_get_unaligned_be16(cdbp + 7);
} else {
uint32_t num = sg_get_unaligned_be32(cdbp + 10);
iop->slba = sg_get_unaligned_be64(cdbp + 2);
if (num > (UINT16_MAX + 1)) {
mk_sense_invalid_fld(ptp, true, 11, -1, vb);
return 0;
} else
nblks_t10 = num;
}
if (0 == nblks_t10) { /* NOP in SCSI */
if (vb > 4)
pr2ws("%s: nblks_t10 is 0, a NOP in SCSI, can't map to NVMe\n",
__func__);
return 0;
}
iop->nblocks = nblks_t10 - 1;
if (have_fua)
iop->nblocks |= SG_NVME_NVM_CDW12_FUA;
iop->addr = (uint64_t)ptp->io_hdr.dout_xferp;
res = sntl_do_nvm_cmd(ptp, iop, ptp->io_hdr.dout_xfer_len, false,
time_secs, vb);
if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb);
return 0;
}
return res;
}
static int
sntl_verify(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
int time_secs, int vb)
{
bool is_verify10 = (SCSI_VERIFY10_OPC == cdbp[0]);
uint8_t bytchk = (cdbp[1] >> 1) & 0x3;
uint32_t dlen = 0;
int res;
int nblks_t10 = 0;
struct sg_nvme_user_io io;
struct sg_nvme_user_io * iop = &io;
if (vb > 3)
pr2ws("%s: bytchk=%d, time_secs=%d\n", __func__, bytchk, time_secs);
if (bytchk > 1) {
mk_sense_invalid_fld(ptp, true, 1, 2, vb);
return 0;
}
memset(iop, 0, sizeof(*iop));
iop->opcode = bytchk ? SG_NVME_NVM_COMPARE : SG_NVME_NVM_WRITE;
if (is_verify10) {
iop->slba = sg_get_unaligned_be32(cdbp + 2);
nblks_t10 = sg_get_unaligned_be16(cdbp + 7);
} else {
uint32_t num = sg_get_unaligned_be32(cdbp + 10);
iop->slba = sg_get_unaligned_be64(cdbp + 2);
if (num > (UINT16_MAX + 1)) {
mk_sense_invalid_fld(ptp, true, 11, -1, vb);
return 0;
} else
nblks_t10 = num;
}
if (0 == nblks_t10) { /* NOP in SCSI */
if (vb > 4)
pr2ws("%s: nblks_t10 is 0, a NOP in SCSI, can't map to NVMe\n",
__func__);
return 0;
}
iop->nblocks = nblks_t10 - 1;
if (bytchk) {
iop->addr = (uint64_t)ptp->io_hdr.dout_xferp;
dlen = ptp->io_hdr.dout_xfer_len;
}
res = sntl_do_nvm_cmd(ptp, iop, dlen, false, time_secs, vb);
if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb);
return 0;
}
return res;
}
static int
sntl_write_same(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
int time_secs, int vb)
{
bool is_ws10 = (SCSI_WRITE_SAME10_OPC == cdbp[0]);
bool ndob = is_ws10 ? false : !!(0x1 & cdbp[1]);
int res;
int nblks_t10 = 0;
struct sg_nvme_user_io io;
struct sg_nvme_user_io * iop = &io;
if (vb > 3)
pr2ws("%s: ndob=%d, time_secs=%d\n", __func__, (int)ndob, time_secs);
if (! ndob) {
int flbas, index, lbafx, lbads, lbsize;
uint8_t * up;
uint8_t * dp;
dp = (uint8_t *)(sg_uintptr_t)ptp->io_hdr.dout_xferp;
if (dp == NULL)
return sg_convert_errno(ENOMEM);
if (NULL == ptp->nvme_id_ctlp) {
res = sntl_cache_identify(ptp, time_secs, vb);
if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb);
return 0;
} else if (res)
return res;
}
up = ptp->nvme_id_ctlp;
flbas = up[26]; /* NVME FLBAS field from Identify */
index = 128 + (4 * (flbas & 0xf));
lbafx = sg_get_unaligned_le32(up + index);
lbads = (lbafx >> 16) & 0xff; /* bits 16 to 23 inclusive, pow2 */
lbsize = 1 << lbads;
if (! sg_all_zeros(dp, lbsize)) {
mk_sense_asc_ascq(ptp, SPC_SK_ILLEGAL_REQUEST, PCIE_ERR_ASC,
PCIE_UNSUPP_REQ_ASCQ, vb);
return 0;
}
/* so given single LB full of zeros, can translate .... */
}
memset(iop, 0, sizeof(*iop));
iop->opcode = SG_NVME_NVM_WRITE_ZEROES;
if (is_ws10) {
iop->slba = sg_get_unaligned_be32(cdbp + 2);
nblks_t10 = sg_get_unaligned_be16(cdbp + 7);
} else {
uint32_t num = sg_get_unaligned_be32(cdbp + 10);
iop->slba = sg_get_unaligned_be64(cdbp + 2);
if (num > (UINT16_MAX + 1)) {
mk_sense_invalid_fld(ptp, true, 11, -1, vb);
return 0;
} else
nblks_t10 = num;
}
if (0 == nblks_t10) { /* NOP in SCSI */
if (vb > 4)
pr2ws("%s: nblks_t10 is 0, a NOP in SCSI, can't map to NVMe\n",
__func__);
return 0;
}
iop->nblocks = nblks_t10 - 1;
res = sntl_do_nvm_cmd(ptp, iop, 0, false, time_secs, vb);
if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb);
return 0;
}
return res;
}
static int
sntl_sync_cache(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
int time_secs, int vb)
{
bool immed = !!(0x2 & cdbp[1]);
struct sg_nvme_user_io io;
struct sg_nvme_user_io * iop = &io;
int res;
if (vb > 3)
pr2ws("%s: immed=%d, time_secs=%d\n", __func__, (int)immed,
time_secs);
memset(iop, 0, sizeof(*iop));
iop->opcode = SG_NVME_NVM_FLUSH;
if (vb > 4)
pr2ws("%s: immed bit, lba and num_lbs fields ignored\n", __func__);
res = sntl_do_nvm_cmd(ptp, iop, 0, false, time_secs, vb);
if (SG_LIB_NVME_STATUS == res) {
mk_sense_from_nvme_status(ptp, vb);
return 0;
}
return res;
}
static int
sntl_start_stop(struct sg_pt_linux_scsi * ptp, const uint8_t * cdbp,
int time_secs, int vb)
{
bool immed = !!(0x1 & cdbp[1]);
if (vb > 3)
pr2ws("%s: immed=%d, time_secs=%d, ignore\n", __func__, (int)immed,
time_secs);
if (ptp) { } /* suppress warning */
return 0;
}
/* Executes NVMe Admin command (or at least forwards it to lower layers). /* Executes NVMe Admin command (or at least forwards it to lower layers).
* Returns 0 for success, negative numbers are negated 'errno' values from * Returns 0 for success, negative numbers are negated 'errno' values from
* OS system calls. Positive return values are errors from this package. * OS system calls. Positive return values are errors from this package.
* When time_secs is 0 the Linux NVMe Admin command default of 60 seconds * When time_secs is 0 the Linux NVMe Admin command default of 60 seconds
* is used. */ * is used. */
int int
sg_do_nvme_pt(struct sg_pt_base * vp, int fd, int time_secs, int vb) sg_do_nvme_pt(struct sg_pt_base * vp, int fd, int time_secs, int vb)
{ {
bool scsi_cdb; bool scsi_cdb;
bool is_read = false; bool is_read = false;
int n, len, hold_dev_fd; int n, len, hold_dev_fd;
uint16_t sa; uint16_t sa;
struct sg_pt_linux_scsi * ptp = &vp->impl; struct sg_pt_linux_scsi * ptp = &vp->impl;
struct sg_nvme_passthru_cmd cmd; struct sg_nvme_passthru_cmd cmd;
const uint8_t * cdbp; const uint8_t * cdbp;
void * dp = NULL; void * dp = NULL;
;
if (! ptp->io_hdr.request) { if (! ptp->io_hdr.request) {
if (vb) if (vb)
pr2ws("No NVMe command given (set_scsi_pt_cdb())\n"); pr2ws("No NVMe command given (set_scsi_pt_cdb())\n");
return SCSI_PT_DO_BAD_PARAMS; return SCSI_PT_DO_BAD_PARAMS;
} }
hold_dev_fd = ptp->dev_fd; hold_dev_fd = ptp->dev_fd;
if (fd >= 0) { if (fd >= 0) {
if ((ptp->dev_fd >= 0) && (fd != ptp->dev_fd)) { if ((ptp->dev_fd >= 0) && (fd != ptp->dev_fd)) {
if (vb) if (vb)
pr2ws("%s: file descriptor given to create() and here " pr2ws("%s: file descriptor given to create() and here "
skipping to change at line 1280 skipping to change at line 1744
pr2ws("%s: invalid file descriptors\n", __func__); pr2ws("%s: invalid file descriptors\n", __func__);
return SCSI_PT_DO_BAD_PARAMS; return SCSI_PT_DO_BAD_PARAMS;
} }
n = ptp->io_hdr.request_len; n = ptp->io_hdr.request_len;
cdbp = (const uint8_t *)(sg_uintptr_t)ptp->io_hdr.request; cdbp = (const uint8_t *)(sg_uintptr_t)ptp->io_hdr.request;
if (vb > 3) if (vb > 3)
pr2ws("%s: opcode=0x%x, fd=%d (dev_fd=%d), time_secs=%d\n", __func__, pr2ws("%s: opcode=0x%x, fd=%d (dev_fd=%d), time_secs=%d\n", __func__,
cdbp[0], fd, hold_dev_fd, time_secs); cdbp[0], fd, hold_dev_fd, time_secs);
scsi_cdb = sg_is_scsi_cdb(cdbp, n); scsi_cdb = sg_is_scsi_cdb(cdbp, n);
/* direct NVMe command (i.e. 64 bytes long) or SNTL */ /* direct NVMe command (i.e. 64 bytes long) or SNTL */
ptp->nvme_direct = ! scsi_cdb; ptp->nvme_our_sntl = scsi_cdb;
if (scsi_cdb) { if (scsi_cdb) {
switch (cdbp[0]) { switch (cdbp[0]) {
case SCSI_INQUIRY_OPC: case SCSI_INQUIRY_OPC:
return sntl_inq(ptp, cdbp, time_secs, vb); return sntl_inq(ptp, cdbp, time_secs, vb);
case SCSI_REPORT_LUNS_OPC: case SCSI_REPORT_LUNS_OPC:
return sntl_rluns(ptp, cdbp, time_secs, vb); return sntl_rluns(ptp, cdbp, time_secs, vb);
case SCSI_TEST_UNIT_READY_OPC: case SCSI_TEST_UNIT_READY_OPC:
return sntl_tur(ptp, time_secs, vb); return sntl_tur(ptp, time_secs, vb);
case SCSI_REQUEST_SENSE_OPC: case SCSI_REQUEST_SENSE_OPC:
return sntl_req_sense(ptp, cdbp, time_secs, vb); return sntl_req_sense(ptp, cdbp, time_secs, vb);
case SCSI_READ10_OPC:
case SCSI_READ16_OPC:
return sntl_read(ptp, cdbp, time_secs, vb);
case SCSI_WRITE10_OPC:
case SCSI_WRITE16_OPC:
return sntl_write(ptp, cdbp, time_secs, vb);
case SCSI_START_STOP_OPC:
return sntl_start_stop(ptp, cdbp, time_secs, vb);
case SCSI_SEND_DIAGNOSTIC_OPC: case SCSI_SEND_DIAGNOSTIC_OPC:
return sntl_senddiag(ptp, cdbp, time_secs, vb); return sntl_senddiag(ptp, cdbp, time_secs, vb);
case SCSI_RECEIVE_DIAGNOSTIC_OPC: case SCSI_RECEIVE_DIAGNOSTIC_OPC:
return sntl_recvdiag(ptp, cdbp, time_secs, vb); return sntl_recvdiag(ptp, cdbp, time_secs, vb);
case SCSI_MODE_SENSE10_OPC: case SCSI_MODE_SENSE10_OPC:
case SCSI_MODE_SELECT10_OPC: case SCSI_MODE_SELECT10_OPC:
return sntl_mode_ss(ptp, cdbp, time_secs, vb); return sntl_mode_ss(ptp, cdbp, time_secs, vb);
case SCSI_READ_CAPACITY10_OPC: case SCSI_READ_CAPACITY10_OPC:
return sntl_readcap(ptp, cdbp, time_secs, vb); return sntl_readcap(ptp, cdbp, time_secs, vb);
case SCSI_VERIFY10_OPC:
case SCSI_VERIFY16_OPC:
return sntl_verify(ptp, cdbp, time_secs, vb);
case SCSI_WRITE_SAME10_OPC:
case SCSI_WRITE_SAME16_OPC:
return sntl_write_same(ptp, cdbp, time_secs, vb);
case SCSI_SYNC_CACHE10_OPC:
case SCSI_SYNC_CACHE16_OPC:
return sntl_sync_cache(ptp, cdbp, time_secs, vb);
case SCSI_SERVICE_ACT_IN_OPC: case SCSI_SERVICE_ACT_IN_OPC:
if (SCSI_READ_CAPACITY16_SA == (cdbp[1] & SCSI_SA_MSK)) if (SCSI_READ_CAPACITY16_SA == (cdbp[1] & SCSI_SA_MSK))
return sntl_readcap(ptp, cdbp, time_secs, vb); return sntl_readcap(ptp, cdbp, time_secs, vb);
goto fini; goto fini;
case SCSI_MAINT_IN_OPC: case SCSI_MAINT_IN_OPC:
sa = SCSI_SA_MSK & cdbp[1]; /* service action */ sa = SCSI_SA_MSK & cdbp[1]; /* service action */
if (SCSI_REP_SUP_OPCS_OPC == sa) if (SCSI_REP_SUP_OPCS_OPC == sa)
return sntl_rep_opcodes(ptp, cdbp, time_secs, vb); return sntl_rep_opcodes(ptp, cdbp, time_secs, vb);
else if (SCSI_REP_SUP_TMFS_OPC == sa) else if (SCSI_REP_SUP_TMFS_OPC == sa)
return sntl_rep_tmfs(ptp, cdbp, time_secs, vb); return sntl_rep_tmfs(ptp, cdbp, time_secs, vb);
skipping to change at line 1377 skipping to change at line 1858
#endif #endif
pr2ws("\n"); pr2ws("\n");
} }
if (vp) { ; } /* suppress warning */ if (vp) { ; } /* suppress warning */
if (fd) { ; } /* suppress warning */ if (fd) { ; } /* suppress warning */
if (time_secs) { ; } /* suppress warning */ if (time_secs) { ; } /* suppress warning */
return -ENOTTY; /* inappropriate ioctl error */ return -ENOTTY; /* inappropriate ioctl error */
} }
#endif /* (HAVE_NVME && (! IGNORE_NVME)) */ #endif /* (HAVE_NVME && (! IGNORE_NVME)) */
#if (HAVE_NVME && (! IGNORE_NVME))
int
do_nvm_pt(struct sg_pt_base * vp, int submq, int timeout_secs, int vb)
{
bool is_read = false;
int dlen;
struct sg_pt_linux_scsi * ptp = &vp->impl;
struct sg_nvme_passthru_cmd cmd;
uint8_t * cmdp = (uint8_t *)&cmd;
void * dp = NULL;
if (vb && (submq != 0))
pr2ws("%s: warning, uses submit queue 0\n", __func__);
if (ptp->dev_fd < 0) {
if (vb > 1)
pr2ws("%s: no NVMe file descriptor given\n", __func__);
return SCSI_PT_DO_BAD_PARAMS;
}
if (! ptp->is_nvme) {
if (vb > 1)
pr2ws("%s: file descriptor is not NVMe device\n", __func__);
return SCSI_PT_DO_BAD_PARAMS;
}
if ((! ptp->io_hdr.request) || (64 != ptp->io_hdr.request_len)) {
if (vb > 1)
pr2ws("%s: no NVMe 64 byte command present\n", __func__);
return SCSI_PT_DO_BAD_PARAMS;
}
if (sizeof(cmd) > 64)
memset(cmdp + 64, 0, sizeof(cmd) - 64);
memcpy(cmdp, (uint8_t *)(sg_uintptr_t)ptp->io_hdr.request, 64);
ptp->nvme_our_sntl = false;
dlen = ptp->io_hdr.din_xfer_len;
if (dlen > 0) {
is_read = true;
dp = (void *)(sg_uintptr_t)ptp->io_hdr.din_xferp;
} else {
dlen = ptp->io_hdr.dout_xfer_len;
if (dlen > 0)
dp = (void *)(sg_uintptr_t)ptp->io_hdr.dout_xferp;
}
return do_nvm_pt_low(ptp, &cmd, dp, dlen, is_read, timeout_secs, vb);
}
#else /* (HAVE_NVME && (! IGNORE_NVME)) */
int
do_nvm_pt(struct sg_pt_base * vp, int submq, int timeout_secs, int verbose)
{
if (vb) {
pr2ws("%s: not supported, ", __func__);
#ifdef HAVE_NVME
pr2ws("HAVE_NVME, ");
#else
pr2ws("don't HAVE_NVME, ");
#endif
#ifdef IGNORE_NVME
pr2ws("IGNORE_NVME");
#else
pr2ws("don't IGNORE_NVME");
#endif
if (vp) { }
if (submq) { }
if (timeout_secs) { }
return SCSI_PT_DO_NOT_SUPPORTED;
}
#endif /* (HAVE_NVME && (! IGNORE_NVME)) */
 End of changes. 47 change blocks. 
49 lines changed or deleted 530 lines changed or added

Home  |  About  |  Features  |  All  |  Newest  |  Dox  |  Diffs  |  RSS Feeds  |  Screenshots  |  Comments  |  Imprint  |  Privacy  |  HTTP(S)