2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2017 Shunsuke Mie
5 * Copyright (c) 2018 Leon Dang
6 * Copyright (c) 2020 Chuck Tuffli
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * bhyve PCIe-NVMe device emulation.
34 * -s <n>,nvme,devpath,maxq=#,qsz=#,ioslots=#,sectsz=#,ser=A-Z,eui64=#,dsm=<opt>
41 * maxq = max number of queues
42 * qsz = max elements in each queue
43 * ioslots = max number of concurrent io requests
44 * sectsz = sector size (defaults to blockif sector size)
45 * ser = serial number (20-chars max)
46 * eui64 = IEEE Extended Unique Identifier (8 byte value)
47 * dsm = DataSet Management support. Option is one of auto, enable,disable
52 - create async event for smart and log
56 #include <sys/cdefs.h>
57 #include <sys/errno.h>
58 #include <sys/types.h>
59 #include <sys/crc16.h>
60 #include <net/ieee_oui.h>
64 #include <pthread_np.h>
65 #include <semaphore.h>
73 #include <machine/atomic.h>
74 #include <machine/vmm.h>
77 #include <dev/nvme/nvme.h>
86 static int nvme_debug = 0;
87 #define DPRINTF(fmt, args...) if (nvme_debug) PRINTLN(fmt, ##args)
88 #define WPRINTF(fmt, args...) PRINTLN(fmt, ##args)
90 /* defaults; can be overridden */
91 #define NVME_MSIX_BAR 4
93 #define NVME_IOSLOTS 8
95 /* The NVMe spec defines bits 13:4 in BAR0 as reserved */
96 #define NVME_MMIO_SPACE_MIN (1 << 14)
98 #define NVME_QUEUES 16
99 #define NVME_MAX_QENTRIES 2048
100 /* Memory Page size Minimum reported in CAP register */
101 #define NVME_MPSMIN 0
102 /* MPSMIN converted to bytes */
103 #define NVME_MPSMIN_BYTES (1 << (12 + NVME_MPSMIN))
105 #define NVME_PRP2_ITEMS (PAGE_SIZE/sizeof(uint64_t))
107 /* Note the + 1 allows for the initial descriptor to not be page aligned */
108 #define NVME_MAX_IOVEC ((1 << NVME_MDTS) + 1)
109 #define NVME_MAX_DATA_SIZE ((1 << NVME_MDTS) * NVME_MPSMIN_BYTES)
111 /* This is a synthetic status code to indicate there is no status */
112 #define NVME_NO_STATUS 0xffff
113 #define NVME_COMPLETION_VALID(c) ((c).status != NVME_NO_STATUS)
115 /* Reported temperature in Kelvin (i.e. room temperature) */
116 #define NVME_TEMPERATURE 296
120 /* Convert a zero-based value into a one-based value */
121 #define ONE_BASED(zero) ((zero) + 1)
122 /* Convert a one-based value into a zero-based value */
123 #define ZERO_BASED(one) ((one) - 1)
125 /* Encode number of SQ's and CQ's for Set/Get Features */
126 #define NVME_FEATURE_NUM_QUEUES(sc) \
127 (ZERO_BASED((sc)->num_squeues) & 0xffff) | \
128 (ZERO_BASED((sc)->num_cqueues) & 0xffff) << 16
130 #define NVME_DOORBELL_OFFSET offsetof(struct nvme_registers, doorbell)
132 enum nvme_controller_register_offsets {
133 NVME_CR_CAP_LOW = 0x00,
134 NVME_CR_CAP_HI = 0x04,
136 NVME_CR_INTMS = 0x0c,
137 NVME_CR_INTMC = 0x10,
142 NVME_CR_ASQ_LOW = 0x28,
143 NVME_CR_ASQ_HI = 0x2c,
144 NVME_CR_ACQ_LOW = 0x30,
145 NVME_CR_ACQ_HI = 0x34,
148 enum nvme_cmd_cdw11 {
149 NVME_CMD_CDW11_PC = 0x0001,
150 NVME_CMD_CDW11_IEN = 0x0002,
151 NVME_CMD_CDW11_IV = 0xFFFF0000,
159 #define NVME_CQ_INTEN 0x01
160 #define NVME_CQ_INTCOAL 0x02
162 struct nvme_completion_queue {
163 struct nvme_completion *qbase;
166 uint16_t tail; /* nvme progress */
167 uint16_t head; /* guest progress */
172 struct nvme_submission_queue {
173 struct nvme_command *qbase;
176 uint16_t head; /* nvme progress */
177 uint16_t tail; /* guest progress */
178 uint16_t cqid; /* completion queue id */
182 enum nvme_storage_type {
183 NVME_STOR_BLOCKIF = 0,
187 struct pci_nvme_blockstore {
188 enum nvme_storage_type type;
192 uint32_t sectsz_bits;
194 uint32_t deallocate:1;
198 * Calculate the number of additional page descriptors for guest IO requests
199 * based on the advertised Max Data Transfer (MDTS) and given the number of
200 * default iovec's in a struct blockif_req.
202 #define MDTS_PAD_SIZE \
203 ( NVME_MAX_IOVEC > BLOCKIF_IOV_MAX ? \
204 NVME_MAX_IOVEC - BLOCKIF_IOV_MAX : \
207 struct pci_nvme_ioreq {
208 struct pci_nvme_softc *sc;
209 STAILQ_ENTRY(pci_nvme_ioreq) link;
210 struct nvme_submission_queue *nvme_sq;
213 /* command information */
218 uint64_t prev_gpaddr;
222 struct blockif_req io_req;
224 struct iovec iovpadding[MDTS_PAD_SIZE];
228 /* Dataset Management bit in ONCS reflects backing storage capability */
229 NVME_DATASET_MANAGEMENT_AUTO,
230 /* Unconditionally set Dataset Management bit in ONCS */
231 NVME_DATASET_MANAGEMENT_ENABLE,
232 /* Unconditionally clear Dataset Management bit in ONCS */
233 NVME_DATASET_MANAGEMENT_DISABLE,
236 struct pci_nvme_softc;
237 struct nvme_feature_obj;
239 typedef void (*nvme_feature_cb)(struct pci_nvme_softc *,
240 struct nvme_feature_obj *,
241 struct nvme_command *,
242 struct nvme_completion *);
244 struct nvme_feature_obj {
248 bool namespace_specific;
251 #define NVME_FID_MAX (NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION + 1)
254 PCI_NVME_AE_TYPE_ERROR = 0,
255 PCI_NVME_AE_TYPE_SMART,
256 PCI_NVME_AE_TYPE_NOTICE,
257 PCI_NVME_AE_TYPE_IO_CMD = 6,
258 PCI_NVME_AE_TYPE_VENDOR = 7,
259 PCI_NVME_AE_TYPE_MAX /* Must be last */
260 } pci_nvme_async_type;
262 /* Asynchronous Event Requests */
263 struct pci_nvme_aer {
264 STAILQ_ENTRY(pci_nvme_aer) link;
265 uint16_t cid; /* Command ID of the submitted AER */
268 /** Asynchronous Event Information - Notice */
270 PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED = 0,
271 PCI_NVME_AEI_NOTICE_FW_ACTIVATION,
272 PCI_NVME_AEI_NOTICE_TELEMETRY_CHANGE,
273 PCI_NVME_AEI_NOTICE_ANA_CHANGE,
274 PCI_NVME_AEI_NOTICE_PREDICT_LATENCY_CHANGE,
275 PCI_NVME_AEI_NOTICE_LBA_STATUS_ALERT,
276 PCI_NVME_AEI_NOTICE_ENDURANCE_GROUP_CHANGE,
277 PCI_NVME_AEI_NOTICE_MAX,
278 } pci_nvme_async_event_info_notice;
280 #define PCI_NVME_AEI_NOTICE_SHIFT 8
281 #define PCI_NVME_AEI_NOTICE_MASK(event) (1 << (event + PCI_NVME_AEI_NOTICE_SHIFT))
283 /* Asynchronous Event Notifications */
284 struct pci_nvme_aen {
285 pci_nvme_async_type atype;
291 * By default, enable all Asynchrnous Event Notifications:
292 * SMART / Health Critical Warnings
293 * Namespace Attribute Notices
295 #define PCI_NVME_AEN_DEFAULT_MASK 0x11f
298 NVME_CNTRLTYPE_IO = 1,
299 NVME_CNTRLTYPE_DISCOVERY = 2,
300 NVME_CNTRLTYPE_ADMIN = 3,
301 } pci_nvme_cntrl_type;
303 struct pci_nvme_softc {
304 struct pci_devinst *nsc_pi;
308 struct nvme_registers regs;
310 struct nvme_namespace_data nsdata;
311 struct nvme_controller_data ctrldata;
312 struct nvme_error_information_entry err_log;
313 struct nvme_health_information_page health_log;
314 struct nvme_firmware_page fw_log;
315 struct nvme_ns_list ns_log;
317 struct pci_nvme_blockstore nvstore;
319 uint16_t max_qentries; /* max entries per queue */
320 uint32_t max_queues; /* max number of IO SQ's or CQ's */
321 uint32_t num_cqueues;
322 uint32_t num_squeues;
323 bool num_q_is_set; /* Has host set Number of Queues */
325 struct pci_nvme_ioreq *ioreqs;
326 STAILQ_HEAD(, pci_nvme_ioreq) ioreqs_free; /* free list of ioreqs */
327 uint32_t pending_ios;
332 * Memory mapped Submission and Completion queues
333 * Each array includes both Admin and IO queues
335 struct nvme_completion_queue *compl_queues;
336 struct nvme_submission_queue *submit_queues;
338 struct nvme_feature_obj feat[NVME_FID_MAX];
340 enum nvme_dsm_type dataset_management;
342 /* Accounting for SMART data */
343 __uint128_t read_data_units;
344 __uint128_t write_data_units;
345 __uint128_t read_commands;
346 __uint128_t write_commands;
347 uint32_t read_dunits_remainder;
348 uint32_t write_dunits_remainder;
350 STAILQ_HEAD(, pci_nvme_aer) aer_list;
351 pthread_mutex_t aer_mtx;
353 struct pci_nvme_aen aen[PCI_NVME_AE_TYPE_MAX];
355 pthread_mutex_t aen_mtx;
356 pthread_cond_t aen_cond;
360 static void pci_nvme_cq_update(struct pci_nvme_softc *sc,
361 struct nvme_completion_queue *cq,
366 static struct pci_nvme_ioreq *pci_nvme_get_ioreq(struct pci_nvme_softc *);
367 static void pci_nvme_release_ioreq(struct pci_nvme_softc *, struct pci_nvme_ioreq *);
368 static void pci_nvme_io_done(struct blockif_req *, int);
370 /* Controller Configuration utils */
371 #define NVME_CC_GET_EN(cc) \
372 ((cc) >> NVME_CC_REG_EN_SHIFT & NVME_CC_REG_EN_MASK)
373 #define NVME_CC_GET_CSS(cc) \
374 ((cc) >> NVME_CC_REG_CSS_SHIFT & NVME_CC_REG_CSS_MASK)
375 #define NVME_CC_GET_SHN(cc) \
376 ((cc) >> NVME_CC_REG_SHN_SHIFT & NVME_CC_REG_SHN_MASK)
377 #define NVME_CC_GET_IOSQES(cc) \
378 ((cc) >> NVME_CC_REG_IOSQES_SHIFT & NVME_CC_REG_IOSQES_MASK)
379 #define NVME_CC_GET_IOCQES(cc) \
380 ((cc) >> NVME_CC_REG_IOCQES_SHIFT & NVME_CC_REG_IOCQES_MASK)
382 #define NVME_CC_WRITE_MASK \
383 ((NVME_CC_REG_EN_MASK << NVME_CC_REG_EN_SHIFT) | \
384 (NVME_CC_REG_IOSQES_MASK << NVME_CC_REG_IOSQES_SHIFT) | \
385 (NVME_CC_REG_IOCQES_MASK << NVME_CC_REG_IOCQES_SHIFT))
387 #define NVME_CC_NEN_WRITE_MASK \
388 ((NVME_CC_REG_CSS_MASK << NVME_CC_REG_CSS_SHIFT) | \
389 (NVME_CC_REG_MPS_MASK << NVME_CC_REG_MPS_SHIFT) | \
390 (NVME_CC_REG_AMS_MASK << NVME_CC_REG_AMS_SHIFT))
392 /* Controller Status utils */
393 #define NVME_CSTS_GET_RDY(sts) \
394 ((sts) >> NVME_CSTS_REG_RDY_SHIFT & NVME_CSTS_REG_RDY_MASK)
396 #define NVME_CSTS_RDY (1 << NVME_CSTS_REG_RDY_SHIFT)
397 #define NVME_CSTS_CFS (1 << NVME_CSTS_REG_CFS_SHIFT)
399 /* Completion Queue status word utils */
400 #define NVME_STATUS_P (1 << NVME_STATUS_P_SHIFT)
401 #define NVME_STATUS_MASK \
402 ((NVME_STATUS_SCT_MASK << NVME_STATUS_SCT_SHIFT) |\
403 (NVME_STATUS_SC_MASK << NVME_STATUS_SC_SHIFT))
405 #define NVME_ONCS_DSM (NVME_CTRLR_DATA_ONCS_DSM_MASK << \
406 NVME_CTRLR_DATA_ONCS_DSM_SHIFT)
408 static void nvme_feature_invalid_cb(struct pci_nvme_softc *,
409 struct nvme_feature_obj *,
410 struct nvme_command *,
411 struct nvme_completion *);
412 static void nvme_feature_temperature(struct pci_nvme_softc *,
413 struct nvme_feature_obj *,
414 struct nvme_command *,
415 struct nvme_completion *);
416 static void nvme_feature_num_queues(struct pci_nvme_softc *,
417 struct nvme_feature_obj *,
418 struct nvme_command *,
419 struct nvme_completion *);
420 static void nvme_feature_iv_config(struct pci_nvme_softc *,
421 struct nvme_feature_obj *,
422 struct nvme_command *,
423 struct nvme_completion *);
424 static void nvme_feature_async_event(struct pci_nvme_softc *,
425 struct nvme_feature_obj *,
426 struct nvme_command *,
427 struct nvme_completion *);
429 static void *aen_thr(void *arg);
432 cpywithpad(char *dst, size_t dst_size, const char *src, char pad)
436 len = strnlen(src, dst_size);
437 memset(dst, pad, dst_size);
438 memcpy(dst, src, len);
442 pci_nvme_status_tc(uint16_t *status, uint16_t type, uint16_t code)
445 *status &= ~NVME_STATUS_MASK;
446 *status |= (type & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT |
447 (code & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
451 pci_nvme_status_genc(uint16_t *status, uint16_t code)
454 pci_nvme_status_tc(status, NVME_SCT_GENERIC, code);
458 * Initialize the requested number or IO Submission and Completion Queues.
459 * Admin queues are allocated implicitly.
462 pci_nvme_init_queues(struct pci_nvme_softc *sc, uint32_t nsq, uint32_t ncq)
467 * Allocate and initialize the Submission Queues
469 if (nsq > NVME_QUEUES) {
470 WPRINTF("%s: clamping number of SQ from %u to %u",
471 __func__, nsq, NVME_QUEUES);
475 sc->num_squeues = nsq;
477 sc->submit_queues = calloc(sc->num_squeues + 1,
478 sizeof(struct nvme_submission_queue));
479 if (sc->submit_queues == NULL) {
480 WPRINTF("%s: SQ allocation failed", __func__);
483 struct nvme_submission_queue *sq = sc->submit_queues;
485 for (i = 0; i < sc->num_squeues + 1; i++)
486 pthread_mutex_init(&sq[i].mtx, NULL);
490 * Allocate and initialize the Completion Queues
492 if (ncq > NVME_QUEUES) {
493 WPRINTF("%s: clamping number of CQ from %u to %u",
494 __func__, ncq, NVME_QUEUES);
498 sc->num_cqueues = ncq;
500 sc->compl_queues = calloc(sc->num_cqueues + 1,
501 sizeof(struct nvme_completion_queue));
502 if (sc->compl_queues == NULL) {
503 WPRINTF("%s: CQ allocation failed", __func__);
506 struct nvme_completion_queue *cq = sc->compl_queues;
508 for (i = 0; i < sc->num_cqueues + 1; i++)
509 pthread_mutex_init(&cq[i].mtx, NULL);
514 pci_nvme_init_ctrldata(struct pci_nvme_softc *sc)
516 struct nvme_controller_data *cd = &sc->ctrldata;
522 cpywithpad((char *)cd->mn, sizeof(cd->mn), "bhyve-NVMe", ' ');
523 cpywithpad((char *)cd->fr, sizeof(cd->fr), "1.0", ' ');
525 /* Num of submission commands that we can handle at a time (2^rab) */
535 cd->mdts = NVME_MDTS; /* max data transfer size (2^mdts * CAP.MPSMIN) */
537 cd->ver = NVME_REV(1,4);
539 cd->cntrltype = NVME_CNTRLTYPE_IO;
540 cd->oacs = 1 << NVME_CTRLR_DATA_OACS_FORMAT_SHIFT;
541 cd->oaes = NVMEB(NVME_CTRLR_DATA_OAES_NS_ATTR);
545 /* Advertise 1, Read-only firmware slot */
546 cd->frmw = NVMEB(NVME_CTRLR_DATA_FRMW_SLOT1_RO) |
547 (1 << NVME_CTRLR_DATA_FRMW_NUM_SLOTS_SHIFT);
548 cd->lpa = 0; /* TODO: support some simple things like SMART */
549 cd->elpe = 0; /* max error log page entries */
551 * Report a single power state (zero-based value)
552 * power_state[] values are left as zero to indicate "Not reported"
556 /* Warning Composite Temperature Threshold */
560 /* SANICAP must not be 0 for Revision 1.4 and later NVMe Controllers */
561 cd->sanicap = (NVME_CTRLR_DATA_SANICAP_NODMMAS_NO <<
562 NVME_CTRLR_DATA_SANICAP_NODMMAS_SHIFT);
564 cd->sqes = (6 << NVME_CTRLR_DATA_SQES_MAX_SHIFT) |
565 (6 << NVME_CTRLR_DATA_SQES_MIN_SHIFT);
566 cd->cqes = (4 << NVME_CTRLR_DATA_CQES_MAX_SHIFT) |
567 (4 << NVME_CTRLR_DATA_CQES_MIN_SHIFT);
568 cd->nn = 1; /* number of namespaces */
571 switch (sc->dataset_management) {
572 case NVME_DATASET_MANAGEMENT_AUTO:
573 if (sc->nvstore.deallocate)
574 cd->oncs |= NVME_ONCS_DSM;
576 case NVME_DATASET_MANAGEMENT_ENABLE:
577 cd->oncs |= NVME_ONCS_DSM;
583 cd->fna = NVME_CTRLR_DATA_FNA_FORMAT_ALL_MASK <<
584 NVME_CTRLR_DATA_FNA_FORMAT_ALL_SHIFT;
586 cd->vwc = NVME_CTRLR_DATA_VWC_ALL_NO << NVME_CTRLR_DATA_VWC_ALL_SHIFT;
588 ret = snprintf(cd->subnqn, sizeof(cd->subnqn),
589 "nqn.2013-12.org.freebsd:bhyve-%s-%u-%u-%u",
590 get_config_value("name"), sc->nsc_pi->pi_bus,
591 sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
592 if ((ret < 0) || ((unsigned)ret > sizeof(cd->subnqn)))
593 EPRINTLN("%s: error setting subnqn (%d)", __func__, ret);
597 pci_nvme_init_nsdata_size(struct pci_nvme_blockstore *nvstore,
598 struct nvme_namespace_data *nd)
601 /* Get capacity and block size information from backing store */
602 nd->nsze = nvstore->size / nvstore->sectsz;
608 pci_nvme_init_nsdata(struct pci_nvme_softc *sc,
609 struct nvme_namespace_data *nd, uint32_t nsid,
610 struct pci_nvme_blockstore *nvstore)
613 pci_nvme_init_nsdata_size(nvstore, nd);
615 if (nvstore->type == NVME_STOR_BLOCKIF)
616 nvstore->deallocate = blockif_candelete(nvstore->ctx);
618 nd->nlbaf = 0; /* NLBAF is a 0's based value (i.e. 1 LBA Format) */
621 /* Create an EUI-64 if user did not provide one */
622 if (nvstore->eui64 == 0) {
624 uint64_t eui64 = nvstore->eui64;
626 asprintf(&data, "%s%u%u%u", get_config_value("name"),
627 sc->nsc_pi->pi_bus, sc->nsc_pi->pi_slot,
628 sc->nsc_pi->pi_func);
631 eui64 = OUI_FREEBSD_NVME_LOW | crc16(0, data, strlen(data));
634 nvstore->eui64 = (eui64 << 16) | (nsid & 0xffff);
636 be64enc(nd->eui64, nvstore->eui64);
638 /* LBA data-sz = 2^lbads */
639 nd->lbaf[0] = nvstore->sectsz_bits << NVME_NS_DATA_LBAF_LBADS_SHIFT;
643 pci_nvme_init_logpages(struct pci_nvme_softc *sc)
645 __uint128_t power_cycles = 1;
647 memset(&sc->err_log, 0, sizeof(sc->err_log));
648 memset(&sc->health_log, 0, sizeof(sc->health_log));
649 memset(&sc->fw_log, 0, sizeof(sc->fw_log));
650 memset(&sc->ns_log, 0, sizeof(sc->ns_log));
652 /* Set read/write remainder to round up according to spec */
653 sc->read_dunits_remainder = 999;
654 sc->write_dunits_remainder = 999;
656 /* Set nominal Health values checked by implementations */
657 sc->health_log.temperature = NVME_TEMPERATURE;
658 sc->health_log.available_spare = 100;
659 sc->health_log.available_spare_threshold = 10;
661 /* Set Active Firmware Info to slot 1 */
662 sc->fw_log.afi = (1 << NVME_FIRMWARE_PAGE_AFI_SLOT_SHIFT);
663 memcpy(&sc->fw_log.revision[0], sc->ctrldata.fr,
664 sizeof(sc->fw_log.revision[0]));
666 memcpy(&sc->health_log.power_cycles, &power_cycles,
667 sizeof(sc->health_log.power_cycles));
671 pci_nvme_init_features(struct pci_nvme_softc *sc)
673 enum nvme_feature fid;
675 for (fid = 0; fid < NVME_FID_MAX; fid++) {
677 case NVME_FEAT_ARBITRATION:
678 case NVME_FEAT_POWER_MANAGEMENT:
679 case NVME_FEAT_INTERRUPT_COALESCING: //XXX
680 case NVME_FEAT_WRITE_ATOMICITY:
681 /* Mandatory but no special handling required */
682 //XXX hang - case NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG:
683 //XXX hang - case NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
684 // this returns a data buffer
686 case NVME_FEAT_TEMPERATURE_THRESHOLD:
687 sc->feat[fid].set = nvme_feature_temperature;
689 case NVME_FEAT_ERROR_RECOVERY:
690 sc->feat[fid].namespace_specific = true;
692 case NVME_FEAT_NUMBER_OF_QUEUES:
693 sc->feat[fid].set = nvme_feature_num_queues;
695 case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
696 sc->feat[fid].set = nvme_feature_iv_config;
698 case NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
699 sc->feat[fid].set = nvme_feature_async_event;
700 /* Enable all AENs by default */
701 sc->feat[fid].cdw11 = PCI_NVME_AEN_DEFAULT_MASK;
704 sc->feat[fid].set = nvme_feature_invalid_cb;
705 sc->feat[fid].get = nvme_feature_invalid_cb;
711 pci_nvme_aer_reset(struct pci_nvme_softc *sc)
714 STAILQ_INIT(&sc->aer_list);
719 pci_nvme_aer_init(struct pci_nvme_softc *sc)
722 pthread_mutex_init(&sc->aer_mtx, NULL);
723 pci_nvme_aer_reset(sc);
727 pci_nvme_aer_destroy(struct pci_nvme_softc *sc)
729 struct pci_nvme_aer *aer = NULL;
731 pthread_mutex_lock(&sc->aer_mtx);
732 while (!STAILQ_EMPTY(&sc->aer_list)) {
733 aer = STAILQ_FIRST(&sc->aer_list);
734 STAILQ_REMOVE_HEAD(&sc->aer_list, link);
737 pthread_mutex_unlock(&sc->aer_mtx);
739 pci_nvme_aer_reset(sc);
743 pci_nvme_aer_available(struct pci_nvme_softc *sc)
746 return (sc->aer_count != 0);
750 pci_nvme_aer_limit_reached(struct pci_nvme_softc *sc)
752 struct nvme_controller_data *cd = &sc->ctrldata;
754 /* AERL is a zero based value while aer_count is one's based */
755 return (sc->aer_count == (cd->aerl + 1U));
759 * Add an Async Event Request
761 * Stores an AER to be returned later if the Controller needs to notify the
763 * Note that while the NVMe spec doesn't require Controllers to return AER's
764 * in order, this implementation does preserve the order.
767 pci_nvme_aer_add(struct pci_nvme_softc *sc, uint16_t cid)
769 struct pci_nvme_aer *aer = NULL;
771 aer = calloc(1, sizeof(struct pci_nvme_aer));
775 /* Save the Command ID for use in the completion message */
778 pthread_mutex_lock(&sc->aer_mtx);
780 STAILQ_INSERT_TAIL(&sc->aer_list, aer, link);
781 pthread_mutex_unlock(&sc->aer_mtx);
787 * Get an Async Event Request structure
789 * Returns a pointer to an AER previously submitted by the host or NULL if
790 * no AER's exist. Caller is responsible for freeing the returned struct.
792 static struct pci_nvme_aer *
793 pci_nvme_aer_get(struct pci_nvme_softc *sc)
795 struct pci_nvme_aer *aer = NULL;
797 pthread_mutex_lock(&sc->aer_mtx);
798 aer = STAILQ_FIRST(&sc->aer_list);
800 STAILQ_REMOVE_HEAD(&sc->aer_list, link);
803 pthread_mutex_unlock(&sc->aer_mtx);
809 pci_nvme_aen_reset(struct pci_nvme_softc *sc)
813 memset(sc->aen, 0, PCI_NVME_AE_TYPE_MAX * sizeof(struct pci_nvme_aen));
815 for (atype = 0; atype < PCI_NVME_AE_TYPE_MAX; atype++) {
816 sc->aen[atype].atype = atype;
821 pci_nvme_aen_init(struct pci_nvme_softc *sc)
825 pci_nvme_aen_reset(sc);
827 pthread_mutex_init(&sc->aen_mtx, NULL);
828 pthread_create(&sc->aen_tid, NULL, aen_thr, sc);
829 snprintf(nstr, sizeof(nstr), "nvme-aen-%d:%d", sc->nsc_pi->pi_slot,
830 sc->nsc_pi->pi_func);
831 pthread_set_name_np(sc->aen_tid, nstr);
835 pci_nvme_aen_destroy(struct pci_nvme_softc *sc)
838 pci_nvme_aen_reset(sc);
841 /* Notify the AEN thread of pending work */
843 pci_nvme_aen_notify(struct pci_nvme_softc *sc)
846 pthread_cond_signal(&sc->aen_cond);
850 * Post an Asynchronous Event Notification
853 pci_nvme_aen_post(struct pci_nvme_softc *sc, pci_nvme_async_type atype,
856 struct pci_nvme_aen *aen;
858 if (atype >= PCI_NVME_AE_TYPE_MAX) {
862 pthread_mutex_lock(&sc->aen_mtx);
863 aen = &sc->aen[atype];
865 /* Has the controller already posted an event of this type? */
867 pthread_mutex_unlock(&sc->aen_mtx);
871 aen->event_data = event_data;
873 pthread_mutex_unlock(&sc->aen_mtx);
875 pci_nvme_aen_notify(sc);
881 pci_nvme_aen_process(struct pci_nvme_softc *sc)
883 struct pci_nvme_aer *aer;
884 struct pci_nvme_aen *aen;
885 pci_nvme_async_type atype;
890 assert(pthread_mutex_isowned_np(&sc->aen_mtx));
891 for (atype = 0; atype < PCI_NVME_AE_TYPE_MAX; atype++) {
892 aen = &sc->aen[atype];
893 /* Previous iterations may have depleted the available AER's */
894 if (!pci_nvme_aer_available(sc)) {
895 DPRINTF("%s: no AER", __func__);
900 DPRINTF("%s: no AEN posted for atype=%#x", __func__, atype);
904 status = NVME_SC_SUCCESS;
906 /* Is the event masked? */
908 sc->feat[NVME_FEAT_ASYNC_EVENT_CONFIGURATION].cdw11;
910 DPRINTF("%s: atype=%#x mask=%#x event_data=%#x", __func__, atype, mask, aen->event_data);
912 case PCI_NVME_AE_TYPE_ERROR:
913 lid = NVME_LOG_ERROR;
915 case PCI_NVME_AE_TYPE_SMART:
917 if ((mask & aen->event_data) == 0)
919 lid = NVME_LOG_HEALTH_INFORMATION;
921 case PCI_NVME_AE_TYPE_NOTICE:
922 if (aen->event_data >= PCI_NVME_AEI_NOTICE_MAX) {
923 EPRINTLN("%s unknown AEN notice type %u",
924 __func__, aen->event_data);
925 status = NVME_SC_INTERNAL_DEVICE_ERROR;
929 if ((PCI_NVME_AEI_NOTICE_MASK(aen->event_data) & mask) == 0)
931 switch (aen->event_data) {
932 case PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED:
933 lid = NVME_LOG_CHANGED_NAMESPACE;
935 case PCI_NVME_AEI_NOTICE_FW_ACTIVATION:
936 lid = NVME_LOG_FIRMWARE_SLOT;
938 case PCI_NVME_AEI_NOTICE_TELEMETRY_CHANGE:
939 lid = NVME_LOG_TELEMETRY_CONTROLLER_INITIATED;
941 case PCI_NVME_AEI_NOTICE_ANA_CHANGE:
942 lid = NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS;
944 case PCI_NVME_AEI_NOTICE_PREDICT_LATENCY_CHANGE:
945 lid = NVME_LOG_PREDICTABLE_LATENCY_EVENT_AGGREGATE;
947 case PCI_NVME_AEI_NOTICE_LBA_STATUS_ALERT:
948 lid = NVME_LOG_LBA_STATUS_INFORMATION;
950 case PCI_NVME_AEI_NOTICE_ENDURANCE_GROUP_CHANGE:
951 lid = NVME_LOG_ENDURANCE_GROUP_EVENT_AGGREGATE;
959 EPRINTLN("%s unknown AEN type %u", __func__, atype);
960 status = NVME_SC_INTERNAL_DEVICE_ERROR;
965 aer = pci_nvme_aer_get(sc);
968 DPRINTF("%s: CID=%#x CDW0=%#x", __func__, aer->cid, (lid << 16) | (aen->event_data << 8) | atype);
969 pci_nvme_cq_update(sc, &sc->compl_queues[0],
970 (lid << 16) | (aen->event_data << 8) | atype, /* cdw0 */
978 pci_generate_msix(sc->nsc_pi, 0);
985 struct pci_nvme_softc *sc;
989 pthread_mutex_lock(&sc->aen_mtx);
991 pci_nvme_aen_process(sc);
992 pthread_cond_wait(&sc->aen_cond, &sc->aen_mtx);
994 pthread_mutex_unlock(&sc->aen_mtx);
1001 pci_nvme_reset_locked(struct pci_nvme_softc *sc)
1005 DPRINTF("%s", __func__);
1007 sc->regs.cap_lo = (ZERO_BASED(sc->max_qentries) & NVME_CAP_LO_REG_MQES_MASK) |
1008 (1 << NVME_CAP_LO_REG_CQR_SHIFT) |
1009 (60 << NVME_CAP_LO_REG_TO_SHIFT);
1011 sc->regs.cap_hi = 1 << NVME_CAP_HI_REG_CSS_NVM_SHIFT;
1013 sc->regs.vs = NVME_REV(1,4); /* NVMe v1.4 */
1017 assert(sc->submit_queues != NULL);
1019 for (i = 0; i < sc->num_squeues + 1; i++) {
1020 sc->submit_queues[i].qbase = NULL;
1021 sc->submit_queues[i].size = 0;
1022 sc->submit_queues[i].cqid = 0;
1023 sc->submit_queues[i].tail = 0;
1024 sc->submit_queues[i].head = 0;
1027 assert(sc->compl_queues != NULL);
1029 for (i = 0; i < sc->num_cqueues + 1; i++) {
1030 sc->compl_queues[i].qbase = NULL;
1031 sc->compl_queues[i].size = 0;
1032 sc->compl_queues[i].tail = 0;
1033 sc->compl_queues[i].head = 0;
1036 sc->num_q_is_set = false;
1038 pci_nvme_aer_destroy(sc);
1039 pci_nvme_aen_destroy(sc);
1042 * Clear CSTS.RDY last to prevent the host from enabling Controller
1043 * before cleanup completes
1049 pci_nvme_reset(struct pci_nvme_softc *sc)
1051 pthread_mutex_lock(&sc->mtx);
1052 pci_nvme_reset_locked(sc);
1053 pthread_mutex_unlock(&sc->mtx);
1057 pci_nvme_init_controller(struct pci_nvme_softc *sc)
1059 uint16_t acqs, asqs;
1061 DPRINTF("%s", __func__);
1064 * NVMe 2.0 states that "enabling a controller while this field is
1065 * cleared to 0h produces undefined results" for both ACQS and
1066 * ASQS. If zero, set CFS and do not become ready.
1068 asqs = ONE_BASED(sc->regs.aqa & NVME_AQA_REG_ASQS_MASK);
1070 EPRINTLN("%s: illegal ASQS value %#x (aqa=%#x)", __func__,
1071 asqs - 1, sc->regs.aqa);
1072 sc->regs.csts |= NVME_CSTS_CFS;
1075 sc->submit_queues[0].size = asqs;
1076 sc->submit_queues[0].qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1077 sc->regs.asq, sizeof(struct nvme_command) * asqs);
1078 if (sc->submit_queues[0].qbase == NULL) {
1079 EPRINTLN("%s: ASQ vm_map_gpa(%lx) failed", __func__,
1081 sc->regs.csts |= NVME_CSTS_CFS;
1085 DPRINTF("%s mapping Admin-SQ guest 0x%lx, host: %p",
1086 __func__, sc->regs.asq, sc->submit_queues[0].qbase);
1088 acqs = ONE_BASED((sc->regs.aqa >> NVME_AQA_REG_ACQS_SHIFT) &
1089 NVME_AQA_REG_ACQS_MASK);
1091 EPRINTLN("%s: illegal ACQS value %#x (aqa=%#x)", __func__,
1092 acqs - 1, sc->regs.aqa);
1093 sc->regs.csts |= NVME_CSTS_CFS;
1096 sc->compl_queues[0].size = acqs;
1097 sc->compl_queues[0].qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1098 sc->regs.acq, sizeof(struct nvme_completion) * acqs);
1099 if (sc->compl_queues[0].qbase == NULL) {
1100 EPRINTLN("%s: ACQ vm_map_gpa(%lx) failed", __func__,
1102 sc->regs.csts |= NVME_CSTS_CFS;
1105 sc->compl_queues[0].intr_en = NVME_CQ_INTEN;
1107 DPRINTF("%s mapping Admin-CQ guest 0x%lx, host: %p",
1108 __func__, sc->regs.acq, sc->compl_queues[0].qbase);
1114 nvme_prp_memcpy(struct vmctx *ctx, uint64_t prp1, uint64_t prp2, uint8_t *b,
1115 size_t len, enum nvme_copy_dir dir)
1120 if (len > (8 * 1024)) {
1124 /* Copy from the start of prp1 to the end of the physical page */
1125 bytes = PAGE_SIZE - (prp1 & PAGE_MASK);
1126 bytes = MIN(bytes, len);
1128 p = vm_map_gpa(ctx, prp1, bytes);
1133 if (dir == NVME_COPY_TO_PRP)
1134 memcpy(p, b, bytes);
1136 memcpy(b, p, bytes);
1145 len = MIN(len, PAGE_SIZE);
1147 p = vm_map_gpa(ctx, prp2, len);
1152 if (dir == NVME_COPY_TO_PRP)
1161 * Write a Completion Queue Entry update
1163 * Write the completion and update the doorbell value
1166 pci_nvme_cq_update(struct pci_nvme_softc *sc,
1167 struct nvme_completion_queue *cq,
1173 struct nvme_submission_queue *sq = &sc->submit_queues[sqid];
1174 struct nvme_completion *cqe;
1176 assert(cq->qbase != NULL);
1178 pthread_mutex_lock(&cq->mtx);
1180 cqe = &cq->qbase[cq->tail];
1182 /* Flip the phase bit */
1183 status |= (cqe->status ^ NVME_STATUS_P) & NVME_STATUS_P_MASK;
1186 cqe->sqhd = sq->head;
1189 cqe->status = status;
1192 if (cq->tail >= cq->size) {
1196 pthread_mutex_unlock(&cq->mtx);
1200 nvme_opc_delete_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command,
1201 struct nvme_completion* compl)
1203 uint16_t qid = command->cdw10 & 0xffff;
1205 DPRINTF("%s DELETE_IO_SQ %u", __func__, qid);
1206 if (qid == 0 || qid > sc->num_squeues ||
1207 (sc->submit_queues[qid].qbase == NULL)) {
1208 WPRINTF("%s NOT PERMITTED queue id %u / num_squeues %u",
1209 __func__, qid, sc->num_squeues);
1210 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1211 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1215 sc->submit_queues[qid].qbase = NULL;
1216 sc->submit_queues[qid].cqid = 0;
1217 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1222 nvme_opc_create_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command,
1223 struct nvme_completion* compl)
1225 if (command->cdw11 & NVME_CMD_CDW11_PC) {
1226 uint16_t qid = command->cdw10 & 0xffff;
1227 struct nvme_submission_queue *nsq;
1229 if ((qid == 0) || (qid > sc->num_squeues) ||
1230 (sc->submit_queues[qid].qbase != NULL)) {
1231 WPRINTF("%s queue index %u > num_squeues %u",
1232 __func__, qid, sc->num_squeues);
1233 pci_nvme_status_tc(&compl->status,
1234 NVME_SCT_COMMAND_SPECIFIC,
1235 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1239 nsq = &sc->submit_queues[qid];
1240 nsq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff);
1241 DPRINTF("%s size=%u (max=%u)", __func__, nsq->size, sc->max_qentries);
1242 if ((nsq->size < 2) || (nsq->size > sc->max_qentries)) {
1244 * Queues must specify at least two entries
1245 * NOTE: "MAXIMUM QUEUE SIZE EXCEEDED" was renamed to
1246 * "INVALID QUEUE SIZE" in the NVM Express 1.3 Spec
1248 pci_nvme_status_tc(&compl->status,
1249 NVME_SCT_COMMAND_SPECIFIC,
1250 NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED);
1253 nsq->head = nsq->tail = 0;
1255 nsq->cqid = (command->cdw11 >> 16) & 0xffff;
1256 if ((nsq->cqid == 0) || (nsq->cqid > sc->num_cqueues)) {
1257 pci_nvme_status_tc(&compl->status,
1258 NVME_SCT_COMMAND_SPECIFIC,
1259 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1263 if (sc->compl_queues[nsq->cqid].qbase == NULL) {
1264 pci_nvme_status_tc(&compl->status,
1265 NVME_SCT_COMMAND_SPECIFIC,
1266 NVME_SC_COMPLETION_QUEUE_INVALID);
1270 nsq->qpriority = (command->cdw11 >> 1) & 0x03;
1272 nsq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1273 sizeof(struct nvme_command) * (size_t)nsq->size);
1275 DPRINTF("%s sq %u size %u gaddr %p cqid %u", __func__,
1276 qid, nsq->size, nsq->qbase, nsq->cqid);
1278 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1280 DPRINTF("%s completed creating IOSQ qid %u",
1284 * Guest sent non-cont submission queue request.
1285 * This setting is unsupported by this emulation.
1287 WPRINTF("%s unsupported non-contig (list-based) "
1288 "create i/o submission queue", __func__);
1290 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1296 nvme_opc_delete_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command,
1297 struct nvme_completion* compl)
1299 uint16_t qid = command->cdw10 & 0xffff;
1302 DPRINTF("%s DELETE_IO_CQ %u", __func__, qid);
1303 if (qid == 0 || qid > sc->num_cqueues ||
1304 (sc->compl_queues[qid].qbase == NULL)) {
1305 WPRINTF("%s queue index %u / num_cqueues %u",
1306 __func__, qid, sc->num_cqueues);
1307 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1308 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1312 /* Deleting an Active CQ is an error */
1313 for (sqid = 1; sqid < sc->num_squeues + 1; sqid++)
1314 if (sc->submit_queues[sqid].cqid == qid) {
1315 pci_nvme_status_tc(&compl->status,
1316 NVME_SCT_COMMAND_SPECIFIC,
1317 NVME_SC_INVALID_QUEUE_DELETION);
1321 sc->compl_queues[qid].qbase = NULL;
1322 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1327 nvme_opc_create_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command,
1328 struct nvme_completion* compl)
1330 struct nvme_completion_queue *ncq;
1331 uint16_t qid = command->cdw10 & 0xffff;
1333 /* Only support Physically Contiguous queues */
1334 if ((command->cdw11 & NVME_CMD_CDW11_PC) == 0) {
1335 WPRINTF("%s unsupported non-contig (list-based) "
1336 "create i/o completion queue",
1339 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1343 if ((qid == 0) || (qid > sc->num_cqueues) ||
1344 (sc->compl_queues[qid].qbase != NULL)) {
1345 WPRINTF("%s queue index %u > num_cqueues %u",
1346 __func__, qid, sc->num_cqueues);
1347 pci_nvme_status_tc(&compl->status,
1348 NVME_SCT_COMMAND_SPECIFIC,
1349 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1353 ncq = &sc->compl_queues[qid];
1354 ncq->intr_en = (command->cdw11 & NVME_CMD_CDW11_IEN) >> 1;
1355 ncq->intr_vec = (command->cdw11 >> 16) & 0xffff;
1356 if (ncq->intr_vec > (sc->max_queues + 1)) {
1357 pci_nvme_status_tc(&compl->status,
1358 NVME_SCT_COMMAND_SPECIFIC,
1359 NVME_SC_INVALID_INTERRUPT_VECTOR);
1363 ncq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff);
1364 if ((ncq->size < 2) || (ncq->size > sc->max_qentries)) {
1366 * Queues must specify at least two entries
1367 * NOTE: "MAXIMUM QUEUE SIZE EXCEEDED" was renamed to
1368 * "INVALID QUEUE SIZE" in the NVM Express 1.3 Spec
1370 pci_nvme_status_tc(&compl->status,
1371 NVME_SCT_COMMAND_SPECIFIC,
1372 NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED);
1375 ncq->head = ncq->tail = 0;
1376 ncq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1378 sizeof(struct nvme_command) * (size_t)ncq->size);
1380 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1387 nvme_opc_get_log_page(struct pci_nvme_softc* sc, struct nvme_command* command,
1388 struct nvme_completion* compl)
1394 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1397 * Command specifies the number of dwords to return in fields NUMDU
1398 * and NUMDL. This is a zero-based value.
1400 logpage = command->cdw10 & 0xFF;
1401 logsize = ((command->cdw11 << 16) | (command->cdw10 >> 16)) + 1;
1402 logsize *= sizeof(uint32_t);
1403 logoff = ((uint64_t)(command->cdw13) << 32) | command->cdw12;
1405 DPRINTF("%s log page %u len %u", __func__, logpage, logsize);
1408 case NVME_LOG_ERROR:
1409 if (logoff >= sizeof(sc->err_log)) {
1410 pci_nvme_status_genc(&compl->status,
1411 NVME_SC_INVALID_FIELD);
1415 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1416 command->prp2, (uint8_t *)&sc->err_log + logoff,
1417 MIN(logsize - logoff, sizeof(sc->err_log)),
1420 case NVME_LOG_HEALTH_INFORMATION:
1421 if (logoff >= sizeof(sc->health_log)) {
1422 pci_nvme_status_genc(&compl->status,
1423 NVME_SC_INVALID_FIELD);
1427 pthread_mutex_lock(&sc->mtx);
1428 memcpy(&sc->health_log.data_units_read, &sc->read_data_units,
1429 sizeof(sc->health_log.data_units_read));
1430 memcpy(&sc->health_log.data_units_written, &sc->write_data_units,
1431 sizeof(sc->health_log.data_units_written));
1432 memcpy(&sc->health_log.host_read_commands, &sc->read_commands,
1433 sizeof(sc->health_log.host_read_commands));
1434 memcpy(&sc->health_log.host_write_commands, &sc->write_commands,
1435 sizeof(sc->health_log.host_write_commands));
1436 pthread_mutex_unlock(&sc->mtx);
1438 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1439 command->prp2, (uint8_t *)&sc->health_log + logoff,
1440 MIN(logsize - logoff, sizeof(sc->health_log)),
1443 case NVME_LOG_FIRMWARE_SLOT:
1444 if (logoff >= sizeof(sc->fw_log)) {
1445 pci_nvme_status_genc(&compl->status,
1446 NVME_SC_INVALID_FIELD);
1450 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1451 command->prp2, (uint8_t *)&sc->fw_log + logoff,
1452 MIN(logsize - logoff, sizeof(sc->fw_log)),
1455 case NVME_LOG_CHANGED_NAMESPACE:
1456 if (logoff >= sizeof(sc->ns_log)) {
1457 pci_nvme_status_genc(&compl->status,
1458 NVME_SC_INVALID_FIELD);
1462 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1463 command->prp2, (uint8_t *)&sc->ns_log + logoff,
1464 MIN(logsize - logoff, sizeof(sc->ns_log)),
1466 memset(&sc->ns_log, 0, sizeof(sc->ns_log));
1469 DPRINTF("%s get log page %x command not supported",
1472 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1473 NVME_SC_INVALID_LOG_PAGE);
1480 nvme_opc_identify(struct pci_nvme_softc* sc, struct nvme_command* command,
1481 struct nvme_completion* compl)
1486 DPRINTF("%s identify 0x%x nsid 0x%x", __func__,
1487 command->cdw10 & 0xFF, command->nsid);
1490 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
1492 switch (command->cdw10 & 0xFF) {
1493 case 0x00: /* return Identify Namespace data structure */
1494 /* Global NS only valid with NS Management */
1495 if (command->nsid == NVME_GLOBAL_NAMESPACE_TAG) {
1496 pci_nvme_status_genc(&status,
1497 NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1500 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1501 command->prp2, (uint8_t *)&sc->nsdata, sizeof(sc->nsdata),
1504 case 0x01: /* return Identify Controller data structure */
1505 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1506 command->prp2, (uint8_t *)&sc->ctrldata,
1507 sizeof(sc->ctrldata),
1510 case 0x02: /* list of 1024 active NSIDs > CDW1.NSID */
1511 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1512 sizeof(uint32_t) * 1024);
1513 /* All unused entries shall be zero */
1514 memset(dest, 0, sizeof(uint32_t) * 1024);
1515 ((uint32_t *)dest)[0] = 1;
1517 case 0x03: /* list of NSID structures in CDW1.NSID, 4096 bytes */
1518 if (command->nsid != 1) {
1519 pci_nvme_status_genc(&status,
1520 NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1523 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1524 sizeof(uint32_t) * 1024);
1525 /* All bytes after the descriptor shall be zero */
1526 memset(dest, 0, sizeof(uint32_t) * 1024);
1528 /* Return NIDT=1 (i.e. EUI64) descriptor */
1529 ((uint8_t *)dest)[0] = 1;
1530 ((uint8_t *)dest)[1] = sizeof(uint64_t);
1531 memcpy(((uint8_t *)dest) + 4, sc->nsdata.eui64, sizeof(uint64_t));
1535 * Controller list is optional but used by UNH tests. Return
1536 * a valid but empty list.
1538 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1539 sizeof(uint16_t) * 2048);
1540 memset(dest, 0, sizeof(uint16_t) * 2048);
1543 DPRINTF("%s unsupported identify command requested 0x%x",
1544 __func__, command->cdw10 & 0xFF);
1545 pci_nvme_status_genc(&status, NVME_SC_INVALID_FIELD);
1549 compl->status = status;
1554 nvme_fid_to_name(uint8_t fid)
1559 case NVME_FEAT_ARBITRATION:
1560 name = "Arbitration";
1562 case NVME_FEAT_POWER_MANAGEMENT:
1563 name = "Power Management";
1565 case NVME_FEAT_LBA_RANGE_TYPE:
1566 name = "LBA Range Type";
1568 case NVME_FEAT_TEMPERATURE_THRESHOLD:
1569 name = "Temperature Threshold";
1571 case NVME_FEAT_ERROR_RECOVERY:
1572 name = "Error Recovery";
1574 case NVME_FEAT_VOLATILE_WRITE_CACHE:
1575 name = "Volatile Write Cache";
1577 case NVME_FEAT_NUMBER_OF_QUEUES:
1578 name = "Number of Queues";
1580 case NVME_FEAT_INTERRUPT_COALESCING:
1581 name = "Interrupt Coalescing";
1583 case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
1584 name = "Interrupt Vector Configuration";
1586 case NVME_FEAT_WRITE_ATOMICITY:
1587 name = "Write Atomicity Normal";
1589 case NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
1590 name = "Asynchronous Event Configuration";
1592 case NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION:
1593 name = "Autonomous Power State Transition";
1595 case NVME_FEAT_HOST_MEMORY_BUFFER:
1596 name = "Host Memory Buffer";
1598 case NVME_FEAT_TIMESTAMP:
1601 case NVME_FEAT_KEEP_ALIVE_TIMER:
1602 name = "Keep Alive Timer";
1604 case NVME_FEAT_HOST_CONTROLLED_THERMAL_MGMT:
1605 name = "Host Controlled Thermal Management";
1607 case NVME_FEAT_NON_OP_POWER_STATE_CONFIG:
1608 name = "Non-Operation Power State Config";
1610 case NVME_FEAT_READ_RECOVERY_LEVEL_CONFIG:
1611 name = "Read Recovery Level Config";
1613 case NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG:
1614 name = "Predictable Latency Mode Config";
1616 case NVME_FEAT_PREDICTABLE_LATENCY_MODE_WINDOW:
1617 name = "Predictable Latency Mode Window";
1619 case NVME_FEAT_LBA_STATUS_INFORMATION_ATTRIBUTES:
1620 name = "LBA Status Information Report Interval";
1622 case NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
1623 name = "Host Behavior Support";
1625 case NVME_FEAT_SANITIZE_CONFIG:
1626 name = "Sanitize Config";
1628 case NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION:
1629 name = "Endurance Group Event Configuration";
1631 case NVME_FEAT_SOFTWARE_PROGRESS_MARKER:
1632 name = "Software Progress Marker";
1634 case NVME_FEAT_HOST_IDENTIFIER:
1635 name = "Host Identifier";
1637 case NVME_FEAT_RESERVATION_NOTIFICATION_MASK:
1638 name = "Reservation Notification Mask";
1640 case NVME_FEAT_RESERVATION_PERSISTENCE:
1641 name = "Reservation Persistence";
1643 case NVME_FEAT_NAMESPACE_WRITE_PROTECTION_CONFIG:
1644 name = "Namespace Write Protection Config";
1655 nvme_feature_invalid_cb(struct pci_nvme_softc *sc __unused,
1656 struct nvme_feature_obj *feat __unused,
1657 struct nvme_command *command __unused,
1658 struct nvme_completion *compl)
1660 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1664 nvme_feature_iv_config(struct pci_nvme_softc *sc,
1665 struct nvme_feature_obj *feat __unused,
1666 struct nvme_command *command,
1667 struct nvme_completion *compl)
1670 uint32_t cdw11 = command->cdw11;
1674 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1676 iv = cdw11 & 0xffff;
1677 cd = cdw11 & (1 << 16);
1679 if (iv > (sc->max_queues + 1)) {
1683 /* No Interrupt Coalescing (i.e. not Coalescing Disable) for Admin Q */
1684 if ((iv == 0) && !cd)
1687 /* Requested Interrupt Vector must be used by a CQ */
1688 for (i = 0; i < sc->num_cqueues + 1; i++) {
1689 if (sc->compl_queues[i].intr_vec == iv) {
1690 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1695 #define NVME_ASYNC_EVENT_ENDURANCE_GROUP (0x4000)
1697 nvme_feature_async_event(struct pci_nvme_softc *sc __unused,
1698 struct nvme_feature_obj *feat __unused,
1699 struct nvme_command *command,
1700 struct nvme_completion *compl)
1702 if (command->cdw11 & NVME_ASYNC_EVENT_ENDURANCE_GROUP)
1703 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1706 #define NVME_TEMP_THRESH_OVER 0
1707 #define NVME_TEMP_THRESH_UNDER 1
1709 nvme_feature_temperature(struct pci_nvme_softc *sc,
1710 struct nvme_feature_obj *feat __unused,
1711 struct nvme_command *command,
1712 struct nvme_completion *compl)
1714 uint16_t tmpth; /* Temperature Threshold */
1715 uint8_t tmpsel; /* Threshold Temperature Select */
1716 uint8_t thsel; /* Threshold Type Select */
1717 bool set_crit = false;
1720 tmpth = command->cdw11 & 0xffff;
1721 tmpsel = (command->cdw11 >> 16) & 0xf;
1722 thsel = (command->cdw11 >> 20) & 0x3;
1724 DPRINTF("%s: tmpth=%#x tmpsel=%#x thsel=%#x", __func__, tmpth, tmpsel, thsel);
1726 /* Check for unsupported values */
1727 if (((tmpsel != 0) && (tmpsel != 0xf)) ||
1728 (thsel > NVME_TEMP_THRESH_UNDER)) {
1729 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1733 if (((thsel == NVME_TEMP_THRESH_OVER) && (NVME_TEMPERATURE >= tmpth)) ||
1734 ((thsel == NVME_TEMP_THRESH_UNDER) && (NVME_TEMPERATURE <= tmpth)))
1737 pthread_mutex_lock(&sc->mtx);
1739 sc->health_log.critical_warning |=
1740 NVME_CRIT_WARN_ST_TEMPERATURE;
1742 sc->health_log.critical_warning &=
1743 ~NVME_CRIT_WARN_ST_TEMPERATURE;
1744 pthread_mutex_unlock(&sc->mtx);
1746 report_crit = sc->feat[NVME_FEAT_ASYNC_EVENT_CONFIGURATION].cdw11 &
1747 NVME_CRIT_WARN_ST_TEMPERATURE;
1749 if (set_crit && report_crit)
1750 pci_nvme_aen_post(sc, PCI_NVME_AE_TYPE_SMART,
1751 sc->health_log.critical_warning);
1753 DPRINTF("%s: set_crit=%c critical_warning=%#x status=%#x", __func__, set_crit ? 'T':'F', sc->health_log.critical_warning, compl->status);
1757 nvme_feature_num_queues(struct pci_nvme_softc *sc,
1758 struct nvme_feature_obj *feat __unused,
1759 struct nvme_command *command,
1760 struct nvme_completion *compl)
1762 uint16_t nqr; /* Number of Queues Requested */
1764 if (sc->num_q_is_set) {
1765 WPRINTF("%s: Number of Queues already set", __func__);
1766 pci_nvme_status_genc(&compl->status,
1767 NVME_SC_COMMAND_SEQUENCE_ERROR);
1771 nqr = command->cdw11 & 0xFFFF;
1772 if (nqr == 0xffff) {
1773 WPRINTF("%s: Illegal NSQR value %#x", __func__, nqr);
1774 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1778 sc->num_squeues = ONE_BASED(nqr);
1779 if (sc->num_squeues > sc->max_queues) {
1780 DPRINTF("NSQR=%u is greater than max %u", sc->num_squeues,
1782 sc->num_squeues = sc->max_queues;
1785 nqr = (command->cdw11 >> 16) & 0xFFFF;
1786 if (nqr == 0xffff) {
1787 WPRINTF("%s: Illegal NCQR value %#x", __func__, nqr);
1788 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1792 sc->num_cqueues = ONE_BASED(nqr);
1793 if (sc->num_cqueues > sc->max_queues) {
1794 DPRINTF("NCQR=%u is greater than max %u", sc->num_cqueues,
1796 sc->num_cqueues = sc->max_queues;
1799 /* Patch the command value which will be saved on callback's return */
1800 command->cdw11 = NVME_FEATURE_NUM_QUEUES(sc);
1801 compl->cdw0 = NVME_FEATURE_NUM_QUEUES(sc);
1803 sc->num_q_is_set = true;
1807 nvme_opc_set_features(struct pci_nvme_softc *sc, struct nvme_command *command,
1808 struct nvme_completion *compl)
1810 struct nvme_feature_obj *feat;
1811 uint32_t nsid = command->nsid;
1812 uint8_t fid = NVMEV(NVME_FEAT_SET_FID, command->cdw10);
1813 bool sv = NVMEV(NVME_FEAT_SET_SV, command->cdw10);
1815 DPRINTF("%s: Feature ID 0x%x (%s)", __func__, fid, nvme_fid_to_name(fid));
1817 if (fid >= NVME_FID_MAX) {
1818 DPRINTF("%s invalid feature 0x%x", __func__, fid);
1819 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1824 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1825 NVME_SC_FEATURE_NOT_SAVEABLE);
1829 feat = &sc->feat[fid];
1831 if (feat->namespace_specific && (nsid == NVME_GLOBAL_NAMESPACE_TAG)) {
1832 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1836 if (!feat->namespace_specific &&
1837 !((nsid == 0) || (nsid == NVME_GLOBAL_NAMESPACE_TAG))) {
1838 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1839 NVME_SC_FEATURE_NOT_NS_SPECIFIC);
1844 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1847 feat->set(sc, feat, command, compl);
1849 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1850 NVME_SC_FEATURE_NOT_CHANGEABLE);
1854 DPRINTF("%s: status=%#x cdw11=%#x", __func__, compl->status, command->cdw11);
1855 if (compl->status == NVME_SC_SUCCESS) {
1856 feat->cdw11 = command->cdw11;
1857 if ((fid == NVME_FEAT_ASYNC_EVENT_CONFIGURATION) &&
1858 (command->cdw11 != 0))
1859 pci_nvme_aen_notify(sc);
1865 #define NVME_FEATURES_SEL_SUPPORTED 0x3
1866 #define NVME_FEATURES_NS_SPECIFIC (1 << 1)
1869 nvme_opc_get_features(struct pci_nvme_softc* sc, struct nvme_command* command,
1870 struct nvme_completion* compl)
1872 struct nvme_feature_obj *feat;
1873 uint8_t fid = command->cdw10 & 0xFF;
1874 uint8_t sel = (command->cdw10 >> 8) & 0x7;
1876 DPRINTF("%s: Feature ID 0x%x (%s)", __func__, fid, nvme_fid_to_name(fid));
1878 if (fid >= NVME_FID_MAX) {
1879 DPRINTF("%s invalid feature 0x%x", __func__, fid);
1880 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1885 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1887 feat = &sc->feat[fid];
1889 feat->get(sc, feat, command, compl);
1892 if (compl->status == NVME_SC_SUCCESS) {
1893 if ((sel == NVME_FEATURES_SEL_SUPPORTED) && feat->namespace_specific)
1894 compl->cdw0 = NVME_FEATURES_NS_SPECIFIC;
1896 compl->cdw0 = feat->cdw11;
1903 nvme_opc_format_nvm(struct pci_nvme_softc* sc, struct nvme_command* command,
1904 struct nvme_completion* compl)
1906 uint8_t ses, lbaf, pi;
1908 /* Only supports Secure Erase Setting - User Data Erase */
1909 ses = (command->cdw10 >> 9) & 0x7;
1911 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1915 /* Only supports a single LBA Format */
1916 lbaf = command->cdw10 & 0xf;
1918 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1919 NVME_SC_INVALID_FORMAT);
1923 /* Doesn't support Protection Information */
1924 pi = (command->cdw10 >> 5) & 0x7;
1926 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1930 if (sc->nvstore.type == NVME_STOR_RAM) {
1931 if (sc->nvstore.ctx)
1932 free(sc->nvstore.ctx);
1933 sc->nvstore.ctx = calloc(1, sc->nvstore.size);
1934 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1936 struct pci_nvme_ioreq *req;
1939 req = pci_nvme_get_ioreq(sc);
1941 pci_nvme_status_genc(&compl->status,
1942 NVME_SC_INTERNAL_DEVICE_ERROR);
1943 WPRINTF("%s: unable to allocate IO req", __func__);
1946 req->nvme_sq = &sc->submit_queues[0];
1948 req->opc = command->opc;
1949 req->cid = command->cid;
1950 req->nsid = command->nsid;
1952 req->io_req.br_offset = 0;
1953 req->io_req.br_resid = sc->nvstore.size;
1954 req->io_req.br_callback = pci_nvme_io_done;
1956 err = blockif_delete(sc->nvstore.ctx, &req->io_req);
1958 pci_nvme_status_genc(&compl->status,
1959 NVME_SC_INTERNAL_DEVICE_ERROR);
1960 pci_nvme_release_ioreq(sc, req);
1962 compl->status = NVME_NO_STATUS;
1969 nvme_opc_abort(struct pci_nvme_softc *sc __unused, struct nvme_command *command,
1970 struct nvme_completion *compl)
1972 DPRINTF("%s submission queue %u, command ID 0x%x", __func__,
1973 command->cdw10 & 0xFFFF, (command->cdw10 >> 16) & 0xFFFF);
1975 /* TODO: search for the command ID and abort it */
1978 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1983 nvme_opc_async_event_req(struct pci_nvme_softc* sc,
1984 struct nvme_command* command, struct nvme_completion* compl)
1986 DPRINTF("%s async event request count=%u aerl=%u cid=%#x", __func__,
1987 sc->aer_count, sc->ctrldata.aerl, command->cid);
1989 /* Don't exceed the Async Event Request Limit (AERL). */
1990 if (pci_nvme_aer_limit_reached(sc)) {
1991 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1992 NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1996 if (pci_nvme_aer_add(sc, command->cid)) {
1997 pci_nvme_status_tc(&compl->status, NVME_SCT_GENERIC,
1998 NVME_SC_INTERNAL_DEVICE_ERROR);
2003 * Raise events when they happen based on the Set Features cmd.
2004 * These events happen async, so only set completion successful if
2005 * there is an event reflective of the request to get event.
2007 compl->status = NVME_NO_STATUS;
2008 pci_nvme_aen_notify(sc);
2014 pci_nvme_handle_admin_cmd(struct pci_nvme_softc* sc, uint64_t value)
2016 struct nvme_completion compl;
2017 struct nvme_command *cmd;
2018 struct nvme_submission_queue *sq;
2019 struct nvme_completion_queue *cq;
2022 DPRINTF("%s index %u", __func__, (uint32_t)value);
2024 sq = &sc->submit_queues[0];
2025 cq = &sc->compl_queues[0];
2027 pthread_mutex_lock(&sq->mtx);
2030 DPRINTF("sqhead %u, tail %u", sqhead, sq->tail);
2032 while (sqhead != atomic_load_acq_short(&sq->tail)) {
2033 cmd = &(sq->qbase)[sqhead];
2038 case NVME_OPC_DELETE_IO_SQ:
2039 DPRINTF("%s command DELETE_IO_SQ", __func__);
2040 nvme_opc_delete_io_sq(sc, cmd, &compl);
2042 case NVME_OPC_CREATE_IO_SQ:
2043 DPRINTF("%s command CREATE_IO_SQ", __func__);
2044 nvme_opc_create_io_sq(sc, cmd, &compl);
2046 case NVME_OPC_DELETE_IO_CQ:
2047 DPRINTF("%s command DELETE_IO_CQ", __func__);
2048 nvme_opc_delete_io_cq(sc, cmd, &compl);
2050 case NVME_OPC_CREATE_IO_CQ:
2051 DPRINTF("%s command CREATE_IO_CQ", __func__);
2052 nvme_opc_create_io_cq(sc, cmd, &compl);
2054 case NVME_OPC_GET_LOG_PAGE:
2055 DPRINTF("%s command GET_LOG_PAGE", __func__);
2056 nvme_opc_get_log_page(sc, cmd, &compl);
2058 case NVME_OPC_IDENTIFY:
2059 DPRINTF("%s command IDENTIFY", __func__);
2060 nvme_opc_identify(sc, cmd, &compl);
2062 case NVME_OPC_ABORT:
2063 DPRINTF("%s command ABORT", __func__);
2064 nvme_opc_abort(sc, cmd, &compl);
2066 case NVME_OPC_SET_FEATURES:
2067 DPRINTF("%s command SET_FEATURES", __func__);
2068 nvme_opc_set_features(sc, cmd, &compl);
2070 case NVME_OPC_GET_FEATURES:
2071 DPRINTF("%s command GET_FEATURES", __func__);
2072 nvme_opc_get_features(sc, cmd, &compl);
2074 case NVME_OPC_FIRMWARE_ACTIVATE:
2075 DPRINTF("%s command FIRMWARE_ACTIVATE", __func__);
2076 pci_nvme_status_tc(&compl.status,
2077 NVME_SCT_COMMAND_SPECIFIC,
2078 NVME_SC_INVALID_FIRMWARE_SLOT);
2080 case NVME_OPC_ASYNC_EVENT_REQUEST:
2081 DPRINTF("%s command ASYNC_EVENT_REQ", __func__);
2082 nvme_opc_async_event_req(sc, cmd, &compl);
2084 case NVME_OPC_FORMAT_NVM:
2085 DPRINTF("%s command FORMAT_NVM", __func__);
2086 if ((sc->ctrldata.oacs &
2087 (1 << NVME_CTRLR_DATA_OACS_FORMAT_SHIFT)) == 0) {
2088 pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_OPCODE);
2091 nvme_opc_format_nvm(sc, cmd, &compl);
2093 case NVME_OPC_SECURITY_SEND:
2094 case NVME_OPC_SECURITY_RECEIVE:
2095 case NVME_OPC_SANITIZE:
2096 case NVME_OPC_GET_LBA_STATUS:
2097 DPRINTF("%s command OPC=%#x (unsupported)", __func__,
2099 /* Valid but unsupported opcodes */
2100 pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_FIELD);
2103 DPRINTF("%s command OPC=%#X (not implemented)",
2106 pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_OPCODE);
2108 sqhead = (sqhead + 1) % sq->size;
2110 if (NVME_COMPLETION_VALID(compl)) {
2111 pci_nvme_cq_update(sc, &sc->compl_queues[0],
2119 DPRINTF("setting sqhead %u", sqhead);
2122 if (cq->head != cq->tail)
2123 pci_generate_msix(sc->nsc_pi, 0);
2125 pthread_mutex_unlock(&sq->mtx);
2129 * Update the Write and Read statistics reported in SMART data
2131 * NVMe defines "data unit" as thousand's of 512 byte blocks and is rounded up.
2132 * E.g. 1 data unit is 1 - 1,000 512 byte blocks. 3 data units are 2,001 - 3,000
2133 * 512 byte blocks. Rounding up is achieved by initializing the remainder to 999.
2136 pci_nvme_stats_write_read_update(struct pci_nvme_softc *sc, uint8_t opc,
2137 size_t bytes, uint16_t status)
2140 pthread_mutex_lock(&sc->mtx);
2142 case NVME_OPC_WRITE:
2143 sc->write_commands++;
2144 if (status != NVME_SC_SUCCESS)
2146 sc->write_dunits_remainder += (bytes / 512);
2147 while (sc->write_dunits_remainder >= 1000) {
2148 sc->write_data_units++;
2149 sc->write_dunits_remainder -= 1000;
2153 sc->read_commands++;
2154 if (status != NVME_SC_SUCCESS)
2156 sc->read_dunits_remainder += (bytes / 512);
2157 while (sc->read_dunits_remainder >= 1000) {
2158 sc->read_data_units++;
2159 sc->read_dunits_remainder -= 1000;
2163 DPRINTF("%s: Invalid OPC 0x%02x for stats", __func__, opc);
2166 pthread_mutex_unlock(&sc->mtx);
2170 * Check if the combination of Starting LBA (slba) and number of blocks
2171 * exceeds the range of the underlying storage.
2173 * Because NVMe specifies the SLBA in blocks as a uint64_t and blockif stores
2174 * the capacity in bytes as a uint64_t, care must be taken to avoid integer
2178 pci_nvme_out_of_range(struct pci_nvme_blockstore *nvstore, uint64_t slba,
2181 size_t offset, bytes;
2183 /* Overflow check of multiplying Starting LBA by the sector size */
2184 if (slba >> (64 - nvstore->sectsz_bits))
2187 offset = slba << nvstore->sectsz_bits;
2188 bytes = nblocks << nvstore->sectsz_bits;
2190 /* Overflow check of Number of Logical Blocks */
2191 if ((nvstore->size <= offset) || ((nvstore->size - offset) < bytes))
2198 pci_nvme_append_iov_req(struct pci_nvme_softc *sc __unused,
2199 struct pci_nvme_ioreq *req, uint64_t gpaddr, size_t size, uint64_t offset)
2202 bool range_is_contiguous;
2207 if (req->io_req.br_iovcnt == NVME_MAX_IOVEC) {
2212 * Minimize the number of IOVs by concatenating contiguous address
2213 * ranges. If the IOV count is zero, there is no previous range to
2216 if (req->io_req.br_iovcnt == 0)
2217 range_is_contiguous = false;
2219 range_is_contiguous = (req->prev_gpaddr + req->prev_size) == gpaddr;
2221 if (range_is_contiguous) {
2222 iovidx = req->io_req.br_iovcnt - 1;
2224 req->io_req.br_iov[iovidx].iov_base =
2225 paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
2226 req->prev_gpaddr, size);
2227 if (req->io_req.br_iov[iovidx].iov_base == NULL)
2230 req->prev_size += size;
2231 req->io_req.br_resid += size;
2233 req->io_req.br_iov[iovidx].iov_len = req->prev_size;
2235 iovidx = req->io_req.br_iovcnt;
2237 req->io_req.br_offset = offset;
2238 req->io_req.br_resid = 0;
2239 req->io_req.br_param = req;
2242 req->io_req.br_iov[iovidx].iov_base =
2243 paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
2245 if (req->io_req.br_iov[iovidx].iov_base == NULL)
2248 req->io_req.br_iov[iovidx].iov_len = size;
2250 req->prev_gpaddr = gpaddr;
2251 req->prev_size = size;
2252 req->io_req.br_resid += size;
2254 req->io_req.br_iovcnt++;
2261 pci_nvme_set_completion(struct pci_nvme_softc *sc,
2262 struct nvme_submission_queue *sq, int sqid, uint16_t cid, uint16_t status)
2264 struct nvme_completion_queue *cq = &sc->compl_queues[sq->cqid];
2266 DPRINTF("%s sqid %d cqid %u cid %u status: 0x%x 0x%x",
2267 __func__, sqid, sq->cqid, cid, NVME_STATUS_GET_SCT(status),
2268 NVME_STATUS_GET_SC(status));
2270 pci_nvme_cq_update(sc, cq, 0, cid, sqid, status);
2272 if (cq->head != cq->tail) {
2273 if (cq->intr_en & NVME_CQ_INTEN) {
2274 pci_generate_msix(sc->nsc_pi, cq->intr_vec);
2276 DPRINTF("%s: CQ%u interrupt disabled",
2277 __func__, sq->cqid);
2283 pci_nvme_release_ioreq(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req)
2286 req->nvme_sq = NULL;
2289 pthread_mutex_lock(&sc->mtx);
2291 STAILQ_INSERT_TAIL(&sc->ioreqs_free, req, link);
2294 /* when no more IO pending, can set to ready if device reset/enabled */
2295 if (sc->pending_ios == 0 &&
2296 NVME_CC_GET_EN(sc->regs.cc) && !(NVME_CSTS_GET_RDY(sc->regs.csts)))
2297 sc->regs.csts |= NVME_CSTS_RDY;
2299 pthread_mutex_unlock(&sc->mtx);
2301 sem_post(&sc->iosemlock);
2304 static struct pci_nvme_ioreq *
2305 pci_nvme_get_ioreq(struct pci_nvme_softc *sc)
2307 struct pci_nvme_ioreq *req = NULL;
2309 sem_wait(&sc->iosemlock);
2310 pthread_mutex_lock(&sc->mtx);
2312 req = STAILQ_FIRST(&sc->ioreqs_free);
2313 assert(req != NULL);
2314 STAILQ_REMOVE_HEAD(&sc->ioreqs_free, link);
2320 pthread_mutex_unlock(&sc->mtx);
2322 req->io_req.br_iovcnt = 0;
2323 req->io_req.br_offset = 0;
2324 req->io_req.br_resid = 0;
2325 req->io_req.br_param = req;
2326 req->prev_gpaddr = 0;
2333 pci_nvme_io_done(struct blockif_req *br, int err)
2335 struct pci_nvme_ioreq *req = br->br_param;
2336 struct nvme_submission_queue *sq = req->nvme_sq;
2337 uint16_t code, status;
2339 DPRINTF("%s error %d %s", __func__, err, strerror(err));
2341 /* TODO return correct error */
2342 code = err ? NVME_SC_DATA_TRANSFER_ERROR : NVME_SC_SUCCESS;
2344 pci_nvme_status_genc(&status, code);
2346 pci_nvme_set_completion(req->sc, sq, req->sqid, req->cid, status);
2347 pci_nvme_stats_write_read_update(req->sc, req->opc,
2348 req->bytes, status);
2349 pci_nvme_release_ioreq(req->sc, req);
2353 * Implements the Flush command. The specification states:
2354 * If a volatile write cache is not present, Flush commands complete
2355 * successfully and have no effect
2356 * in the description of the Volatile Write Cache (VWC) field of the Identify
2357 * Controller data. Therefore, set status to Success if the command is
2358 * not supported (i.e. RAM or as indicated by the blockif).
2361 nvme_opc_flush(struct pci_nvme_softc *sc __unused,
2362 struct nvme_command *cmd __unused,
2363 struct pci_nvme_blockstore *nvstore,
2364 struct pci_nvme_ioreq *req,
2367 bool pending = false;
2369 if (nvstore->type == NVME_STOR_RAM) {
2370 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2374 req->io_req.br_callback = pci_nvme_io_done;
2376 err = blockif_flush(nvstore->ctx, &req->io_req);
2382 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2385 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2393 nvme_write_read_ram(struct pci_nvme_softc *sc,
2394 struct pci_nvme_blockstore *nvstore,
2395 uint64_t prp1, uint64_t prp2,
2396 size_t offset, uint64_t bytes,
2399 uint8_t *buf = nvstore->ctx;
2400 enum nvme_copy_dir dir;
2404 dir = NVME_COPY_TO_PRP;
2406 dir = NVME_COPY_FROM_PRP;
2409 if (nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, prp1, prp2,
2410 buf + offset, bytes, dir))
2411 pci_nvme_status_genc(&status,
2412 NVME_SC_DATA_TRANSFER_ERROR);
2414 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2420 nvme_write_read_blockif(struct pci_nvme_softc *sc,
2421 struct pci_nvme_blockstore *nvstore,
2422 struct pci_nvme_ioreq *req,
2423 uint64_t prp1, uint64_t prp2,
2424 size_t offset, uint64_t bytes,
2429 uint16_t status = NVME_NO_STATUS;
2431 size = MIN(PAGE_SIZE - (prp1 % PAGE_SIZE), bytes);
2432 if (pci_nvme_append_iov_req(sc, req, prp1, size, offset)) {
2442 } else if (bytes <= PAGE_SIZE) {
2444 if (pci_nvme_append_iov_req(sc, req, prp2, size, offset)) {
2449 void *vmctx = sc->nsc_pi->pi_vmctx;
2450 uint64_t *prp_list = &prp2;
2451 uint64_t *last = prp_list;
2453 /* PRP2 is pointer to a physical region page list */
2455 /* Last entry in list points to the next list */
2456 if ((prp_list == last) && (bytes > PAGE_SIZE)) {
2457 uint64_t prp = *prp_list;
2459 prp_list = paddr_guest2host(vmctx, prp,
2460 PAGE_SIZE - (prp % PAGE_SIZE));
2461 if (prp_list == NULL) {
2465 last = prp_list + (NVME_PRP2_ITEMS - 1);
2468 size = MIN(bytes, PAGE_SIZE);
2470 if (pci_nvme_append_iov_req(sc, req, *prp_list, size,
2482 req->io_req.br_callback = pci_nvme_io_done;
2484 err = blockif_write(nvstore->ctx, &req->io_req);
2486 err = blockif_read(nvstore->ctx, &req->io_req);
2489 pci_nvme_status_genc(&status, NVME_SC_DATA_TRANSFER_ERROR);
2495 nvme_opc_write_read(struct pci_nvme_softc *sc,
2496 struct nvme_command *cmd,
2497 struct pci_nvme_blockstore *nvstore,
2498 struct pci_nvme_ioreq *req,
2501 uint64_t lba, nblocks, bytes;
2503 bool is_write = cmd->opc == NVME_OPC_WRITE;
2504 bool pending = false;
2506 lba = ((uint64_t)cmd->cdw11 << 32) | cmd->cdw10;
2507 nblocks = (cmd->cdw12 & 0xFFFF) + 1;
2508 bytes = nblocks << nvstore->sectsz_bits;
2509 if (bytes > NVME_MAX_DATA_SIZE) {
2510 WPRINTF("%s command would exceed MDTS", __func__);
2511 pci_nvme_status_genc(status, NVME_SC_INVALID_FIELD);
2515 if (pci_nvme_out_of_range(nvstore, lba, nblocks)) {
2516 WPRINTF("%s command would exceed LBA range(slba=%#lx nblocks=%#lx)",
2517 __func__, lba, nblocks);
2518 pci_nvme_status_genc(status, NVME_SC_LBA_OUT_OF_RANGE);
2522 offset = lba << nvstore->sectsz_bits;
2525 req->io_req.br_offset = lba;
2527 /* PRP bits 1:0 must be zero */
2528 cmd->prp1 &= ~0x3UL;
2529 cmd->prp2 &= ~0x3UL;
2531 if (nvstore->type == NVME_STOR_RAM) {
2532 *status = nvme_write_read_ram(sc, nvstore, cmd->prp1,
2533 cmd->prp2, offset, bytes, is_write);
2535 *status = nvme_write_read_blockif(sc, nvstore, req,
2536 cmd->prp1, cmd->prp2, offset, bytes, is_write);
2538 if (*status == NVME_NO_STATUS)
2543 pci_nvme_stats_write_read_update(sc, cmd->opc, bytes, *status);
2549 pci_nvme_dealloc_sm(struct blockif_req *br, int err)
2551 struct pci_nvme_ioreq *req = br->br_param;
2552 struct pci_nvme_softc *sc = req->sc;
2558 pci_nvme_status_genc(&status, NVME_SC_INTERNAL_DEVICE_ERROR);
2559 } else if ((req->prev_gpaddr + 1) == (req->prev_size)) {
2560 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2562 struct iovec *iov = req->io_req.br_iov;
2565 iov += req->prev_gpaddr;
2567 /* The iov_* values already include the sector size */
2568 req->io_req.br_offset = (off_t)iov->iov_base;
2569 req->io_req.br_resid = iov->iov_len;
2570 if (blockif_delete(sc->nvstore.ctx, &req->io_req)) {
2571 pci_nvme_status_genc(&status,
2572 NVME_SC_INTERNAL_DEVICE_ERROR);
2578 pci_nvme_set_completion(sc, req->nvme_sq, req->sqid, req->cid,
2580 pci_nvme_release_ioreq(sc, req);
2585 nvme_opc_dataset_mgmt(struct pci_nvme_softc *sc,
2586 struct nvme_command *cmd,
2587 struct pci_nvme_blockstore *nvstore,
2588 struct pci_nvme_ioreq *req,
2591 struct nvme_dsm_range *range = NULL;
2592 uint32_t nr, r, non_zero, dr;
2594 bool pending = false;
2596 if ((sc->ctrldata.oncs & NVME_ONCS_DSM) == 0) {
2597 pci_nvme_status_genc(status, NVME_SC_INVALID_OPCODE);
2601 nr = cmd->cdw10 & 0xff;
2603 /* copy locally because a range entry could straddle PRPs */
2604 range = calloc(1, NVME_MAX_DSM_TRIM);
2605 if (range == NULL) {
2606 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2609 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, cmd->prp1, cmd->prp2,
2610 (uint8_t *)range, NVME_MAX_DSM_TRIM, NVME_COPY_FROM_PRP);
2612 /* Check for invalid ranges and the number of non-zero lengths */
2614 for (r = 0; r <= nr; r++) {
2615 if (pci_nvme_out_of_range(nvstore,
2616 range[r].starting_lba, range[r].length)) {
2617 pci_nvme_status_genc(status, NVME_SC_LBA_OUT_OF_RANGE);
2620 if (range[r].length != 0)
2624 if (cmd->cdw11 & NVME_DSM_ATTR_DEALLOCATE) {
2625 size_t offset, bytes;
2626 int sectsz_bits = sc->nvstore.sectsz_bits;
2629 * DSM calls are advisory only, and compliant controllers
2630 * may choose to take no actions (i.e. return Success).
2632 if (!nvstore->deallocate) {
2633 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2637 /* If all ranges have a zero length, return Success */
2638 if (non_zero == 0) {
2639 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2644 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2648 offset = range[0].starting_lba << sectsz_bits;
2649 bytes = range[0].length << sectsz_bits;
2652 * If the request is for more than a single range, store
2653 * the ranges in the br_iov. Optimize for the common case
2654 * of a single range.
2656 * Note that NVMe Number of Ranges is a zero based value
2658 req->io_req.br_iovcnt = 0;
2659 req->io_req.br_offset = offset;
2660 req->io_req.br_resid = bytes;
2663 req->io_req.br_callback = pci_nvme_io_done;
2665 struct iovec *iov = req->io_req.br_iov;
2667 for (r = 0, dr = 0; r <= nr; r++) {
2668 offset = range[r].starting_lba << sectsz_bits;
2669 bytes = range[r].length << sectsz_bits;
2673 if ((nvstore->size - offset) < bytes) {
2674 pci_nvme_status_genc(status,
2675 NVME_SC_LBA_OUT_OF_RANGE);
2678 iov[dr].iov_base = (void *)offset;
2679 iov[dr].iov_len = bytes;
2682 req->io_req.br_callback = pci_nvme_dealloc_sm;
2685 * Use prev_gpaddr to track the current entry and
2686 * prev_size to track the number of entries
2688 req->prev_gpaddr = 0;
2689 req->prev_size = dr;
2692 err = blockif_delete(nvstore->ctx, &req->io_req);
2694 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2704 pci_nvme_handle_io_cmd(struct pci_nvme_softc* sc, uint16_t idx)
2706 struct nvme_submission_queue *sq;
2710 /* handle all submissions up to sq->tail index */
2711 sq = &sc->submit_queues[idx];
2713 pthread_mutex_lock(&sq->mtx);
2716 DPRINTF("nvme_handle_io qid %u head %u tail %u cmdlist %p",
2717 idx, sqhead, sq->tail, sq->qbase);
2719 while (sqhead != atomic_load_acq_short(&sq->tail)) {
2720 struct nvme_command *cmd;
2721 struct pci_nvme_ioreq *req;
2729 cmd = &sq->qbase[sqhead];
2730 sqhead = (sqhead + 1) % sq->size;
2732 nsid = le32toh(cmd->nsid);
2733 if ((nsid == 0) || (nsid > sc->ctrldata.nn)) {
2734 pci_nvme_status_genc(&status,
2735 NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2737 NVME_STATUS_DNR_MASK << NVME_STATUS_DNR_SHIFT;
2741 req = pci_nvme_get_ioreq(sc);
2743 pci_nvme_status_genc(&status,
2744 NVME_SC_INTERNAL_DEVICE_ERROR);
2745 WPRINTF("%s: unable to allocate IO req", __func__);
2750 req->opc = cmd->opc;
2751 req->cid = cmd->cid;
2752 req->nsid = cmd->nsid;
2755 case NVME_OPC_FLUSH:
2756 pending = nvme_opc_flush(sc, cmd, &sc->nvstore,
2759 case NVME_OPC_WRITE:
2761 pending = nvme_opc_write_read(sc, cmd, &sc->nvstore,
2764 case NVME_OPC_WRITE_ZEROES:
2765 /* TODO: write zeroes
2766 WPRINTF("%s write zeroes lba 0x%lx blocks %u",
2767 __func__, lba, cmd->cdw12 & 0xFFFF); */
2768 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2770 case NVME_OPC_DATASET_MANAGEMENT:
2771 pending = nvme_opc_dataset_mgmt(sc, cmd, &sc->nvstore,
2775 WPRINTF("%s unhandled io command 0x%x",
2776 __func__, cmd->opc);
2777 pci_nvme_status_genc(&status, NVME_SC_INVALID_OPCODE);
2781 pci_nvme_set_completion(sc, sq, idx, cmd->cid, status);
2783 pci_nvme_release_ioreq(sc, req);
2789 pthread_mutex_unlock(&sq->mtx);
2793 pci_nvme_handle_doorbell(struct pci_nvme_softc* sc,
2794 uint64_t idx, int is_sq, uint64_t value)
2796 DPRINTF("nvme doorbell %lu, %s, val 0x%lx",
2797 idx, is_sq ? "SQ" : "CQ", value & 0xFFFF);
2800 if (idx > sc->num_squeues) {
2801 WPRINTF("%s queue index %lu overflow from "
2803 __func__, idx, sc->num_squeues);
2807 atomic_store_short(&sc->submit_queues[idx].tail,
2811 pci_nvme_handle_admin_cmd(sc, value);
2813 /* submission queue; handle new entries in SQ */
2814 if (idx > sc->num_squeues) {
2815 WPRINTF("%s SQ index %lu overflow from "
2817 __func__, idx, sc->num_squeues);
2820 pci_nvme_handle_io_cmd(sc, (uint16_t)idx);
2823 if (idx > sc->num_cqueues) {
2824 WPRINTF("%s queue index %lu overflow from "
2826 __func__, idx, sc->num_cqueues);
2830 atomic_store_short(&sc->compl_queues[idx].head,
2836 pci_nvme_bar0_reg_dumps(const char *func, uint64_t offset, int iswrite)
2838 const char *s = iswrite ? "WRITE" : "READ";
2841 case NVME_CR_CAP_LOW:
2842 DPRINTF("%s %s NVME_CR_CAP_LOW", func, s);
2844 case NVME_CR_CAP_HI:
2845 DPRINTF("%s %s NVME_CR_CAP_HI", func, s);
2848 DPRINTF("%s %s NVME_CR_VS", func, s);
2851 DPRINTF("%s %s NVME_CR_INTMS", func, s);
2854 DPRINTF("%s %s NVME_CR_INTMC", func, s);
2857 DPRINTF("%s %s NVME_CR_CC", func, s);
2860 DPRINTF("%s %s NVME_CR_CSTS", func, s);
2863 DPRINTF("%s %s NVME_CR_NSSR", func, s);
2866 DPRINTF("%s %s NVME_CR_AQA", func, s);
2868 case NVME_CR_ASQ_LOW:
2869 DPRINTF("%s %s NVME_CR_ASQ_LOW", func, s);
2871 case NVME_CR_ASQ_HI:
2872 DPRINTF("%s %s NVME_CR_ASQ_HI", func, s);
2874 case NVME_CR_ACQ_LOW:
2875 DPRINTF("%s %s NVME_CR_ACQ_LOW", func, s);
2877 case NVME_CR_ACQ_HI:
2878 DPRINTF("%s %s NVME_CR_ACQ_HI", func, s);
2881 DPRINTF("unknown nvme bar-0 offset 0x%lx", offset);
2887 pci_nvme_write_bar_0(struct pci_nvme_softc *sc, uint64_t offset, int size,
2892 if (offset >= NVME_DOORBELL_OFFSET) {
2893 uint64_t belloffset = offset - NVME_DOORBELL_OFFSET;
2894 uint64_t idx = belloffset / 8; /* door bell size = 2*int */
2895 int is_sq = (belloffset % 8) < 4;
2897 if ((sc->regs.csts & NVME_CSTS_RDY) == 0) {
2898 WPRINTF("doorbell write prior to RDY (offset=%#lx)\n",
2903 if (belloffset > ((sc->max_queues+1) * 8 - 4)) {
2904 WPRINTF("guest attempted an overflow write offset "
2905 "0x%lx, val 0x%lx in %s",
2906 offset, value, __func__);
2911 if (sc->submit_queues[idx].qbase == NULL)
2913 } else if (sc->compl_queues[idx].qbase == NULL)
2916 pci_nvme_handle_doorbell(sc, idx, is_sq, value);
2920 DPRINTF("nvme-write offset 0x%lx, size %d, value 0x%lx",
2921 offset, size, value);
2924 WPRINTF("guest wrote invalid size %d (offset 0x%lx, "
2925 "val 0x%lx) to bar0 in %s",
2926 size, offset, value, __func__);
2927 /* TODO: shutdown device */
2931 pci_nvme_bar0_reg_dumps(__func__, offset, 1);
2933 pthread_mutex_lock(&sc->mtx);
2936 case NVME_CR_CAP_LOW:
2937 case NVME_CR_CAP_HI:
2944 /* MSI-X, so ignore */
2947 /* MSI-X, so ignore */
2950 ccreg = (uint32_t)value;
2952 DPRINTF("%s NVME_CR_CC en %x css %x shn %x iosqes %u "
2955 NVME_CC_GET_EN(ccreg), NVME_CC_GET_CSS(ccreg),
2956 NVME_CC_GET_SHN(ccreg), NVME_CC_GET_IOSQES(ccreg),
2957 NVME_CC_GET_IOCQES(ccreg));
2959 if (NVME_CC_GET_SHN(ccreg)) {
2960 /* perform shutdown - flush out data to backend */
2961 sc->regs.csts &= ~(NVME_CSTS_REG_SHST_MASK <<
2962 NVME_CSTS_REG_SHST_SHIFT);
2963 sc->regs.csts |= NVME_SHST_COMPLETE <<
2964 NVME_CSTS_REG_SHST_SHIFT;
2966 if (NVME_CC_GET_EN(ccreg) != NVME_CC_GET_EN(sc->regs.cc)) {
2967 if (NVME_CC_GET_EN(ccreg) == 0)
2968 /* transition 1-> causes controller reset */
2969 pci_nvme_reset_locked(sc);
2971 pci_nvme_init_controller(sc);
2974 /* Insert the iocqes, iosqes and en bits from the write */
2975 sc->regs.cc &= ~NVME_CC_WRITE_MASK;
2976 sc->regs.cc |= ccreg & NVME_CC_WRITE_MASK;
2977 if (NVME_CC_GET_EN(ccreg) == 0) {
2978 /* Insert the ams, mps and css bit fields */
2979 sc->regs.cc &= ~NVME_CC_NEN_WRITE_MASK;
2980 sc->regs.cc |= ccreg & NVME_CC_NEN_WRITE_MASK;
2981 sc->regs.csts &= ~NVME_CSTS_RDY;
2982 } else if ((sc->pending_ios == 0) &&
2983 !(sc->regs.csts & NVME_CSTS_CFS)) {
2984 sc->regs.csts |= NVME_CSTS_RDY;
2990 /* ignore writes; don't support subsystem reset */
2993 sc->regs.aqa = (uint32_t)value;
2995 case NVME_CR_ASQ_LOW:
2996 sc->regs.asq = (sc->regs.asq & (0xFFFFFFFF00000000)) |
2997 (0xFFFFF000 & value);
2999 case NVME_CR_ASQ_HI:
3000 sc->regs.asq = (sc->regs.asq & (0x00000000FFFFFFFF)) |
3003 case NVME_CR_ACQ_LOW:
3004 sc->regs.acq = (sc->regs.acq & (0xFFFFFFFF00000000)) |
3005 (0xFFFFF000 & value);
3007 case NVME_CR_ACQ_HI:
3008 sc->regs.acq = (sc->regs.acq & (0x00000000FFFFFFFF)) |
3012 DPRINTF("%s unknown offset 0x%lx, value 0x%lx size %d",
3013 __func__, offset, value, size);
3015 pthread_mutex_unlock(&sc->mtx);
3019 pci_nvme_write(struct pci_devinst *pi, int baridx, uint64_t offset, int size,
3022 struct pci_nvme_softc* sc = pi->pi_arg;
3024 if (baridx == pci_msix_table_bar(pi) ||
3025 baridx == pci_msix_pba_bar(pi)) {
3026 DPRINTF("nvme-write baridx %d, msix: off 0x%lx, size %d, "
3027 " value 0x%lx", baridx, offset, size, value);
3029 pci_emul_msix_twrite(pi, offset, size, value);
3035 pci_nvme_write_bar_0(sc, offset, size, value);
3039 DPRINTF("%s unknown baridx %d, val 0x%lx",
3040 __func__, baridx, value);
3044 static uint64_t pci_nvme_read_bar_0(struct pci_nvme_softc* sc,
3045 uint64_t offset, int size)
3049 pci_nvme_bar0_reg_dumps(__func__, offset, 0);
3051 if (offset < NVME_DOORBELL_OFFSET) {
3052 void *p = &(sc->regs);
3053 pthread_mutex_lock(&sc->mtx);
3054 memcpy(&value, (void *)((uintptr_t)p + offset), size);
3055 pthread_mutex_unlock(&sc->mtx);
3058 WPRINTF("pci_nvme: read invalid offset %ld", offset);
3069 value &= 0xFFFFFFFF;
3073 DPRINTF(" nvme-read offset 0x%lx, size %d -> value 0x%x",
3074 offset, size, (uint32_t)value);
3082 pci_nvme_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size)
3084 struct pci_nvme_softc* sc = pi->pi_arg;
3086 if (baridx == pci_msix_table_bar(pi) ||
3087 baridx == pci_msix_pba_bar(pi)) {
3088 DPRINTF("nvme-read bar: %d, msix: regoff 0x%lx, size %d",
3089 baridx, offset, size);
3091 return pci_emul_msix_tread(pi, offset, size);
3096 return pci_nvme_read_bar_0(sc, offset, size);
3099 DPRINTF("unknown bar %d, 0x%lx", baridx, offset);
3106 pci_nvme_parse_config(struct pci_nvme_softc *sc, nvlist_t *nvl)
3108 char bident[sizeof("XXX:XXX")];
3112 sc->max_queues = NVME_QUEUES;
3113 sc->max_qentries = NVME_MAX_QENTRIES;
3114 sc->ioslots = NVME_IOSLOTS;
3115 sc->num_squeues = sc->max_queues;
3116 sc->num_cqueues = sc->max_queues;
3117 sc->dataset_management = NVME_DATASET_MANAGEMENT_AUTO;
3119 snprintf(sc->ctrldata.sn, sizeof(sc->ctrldata.sn),
3120 "NVME-%d-%d", sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
3122 value = get_config_value_node(nvl, "maxq");
3124 sc->max_queues = atoi(value);
3125 value = get_config_value_node(nvl, "qsz");
3126 if (value != NULL) {
3127 sc->max_qentries = atoi(value);
3128 if (sc->max_qentries <= 0) {
3129 EPRINTLN("nvme: Invalid qsz option %d",
3134 value = get_config_value_node(nvl, "ioslots");
3135 if (value != NULL) {
3136 sc->ioslots = atoi(value);
3137 if (sc->ioslots <= 0) {
3138 EPRINTLN("Invalid ioslots option %d", sc->ioslots);
3142 value = get_config_value_node(nvl, "sectsz");
3144 sectsz = atoi(value);
3145 value = get_config_value_node(nvl, "ser");
3146 if (value != NULL) {
3148 * This field indicates the Product Serial Number in
3149 * 7-bit ASCII, unused bytes should be space characters.
3152 cpywithpad((char *)sc->ctrldata.sn,
3153 sizeof(sc->ctrldata.sn), value, ' ');
3155 value = get_config_value_node(nvl, "eui64");
3157 sc->nvstore.eui64 = htobe64(strtoull(value, NULL, 0));
3158 value = get_config_value_node(nvl, "dsm");
3159 if (value != NULL) {
3160 if (strcmp(value, "auto") == 0)
3161 sc->dataset_management = NVME_DATASET_MANAGEMENT_AUTO;
3162 else if (strcmp(value, "enable") == 0)
3163 sc->dataset_management = NVME_DATASET_MANAGEMENT_ENABLE;
3164 else if (strcmp(value, "disable") == 0)
3165 sc->dataset_management = NVME_DATASET_MANAGEMENT_DISABLE;
3168 value = get_config_value_node(nvl, "bootindex");
3169 if (value != NULL) {
3170 if (pci_emul_add_boot_device(sc->nsc_pi, atoi(value))) {
3171 EPRINTLN("Invalid bootindex %d", atoi(value));
3176 value = get_config_value_node(nvl, "ram");
3177 if (value != NULL) {
3178 uint64_t sz = strtoull(value, NULL, 10);
3180 sc->nvstore.type = NVME_STOR_RAM;
3181 sc->nvstore.size = sz * 1024 * 1024;
3182 sc->nvstore.ctx = calloc(1, sc->nvstore.size);
3183 sc->nvstore.sectsz = 4096;
3184 sc->nvstore.sectsz_bits = 12;
3185 if (sc->nvstore.ctx == NULL) {
3186 EPRINTLN("nvme: Unable to allocate RAM");
3190 snprintf(bident, sizeof(bident), "%u:%u",
3191 sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
3192 sc->nvstore.ctx = blockif_open(nvl, bident);
3193 if (sc->nvstore.ctx == NULL) {
3194 EPRINTLN("nvme: Could not open backing file: %s",
3198 sc->nvstore.type = NVME_STOR_BLOCKIF;
3199 sc->nvstore.size = blockif_size(sc->nvstore.ctx);
3202 if (sectsz == 512 || sectsz == 4096 || sectsz == 8192)
3203 sc->nvstore.sectsz = sectsz;
3204 else if (sc->nvstore.type != NVME_STOR_RAM)
3205 sc->nvstore.sectsz = blockif_sectsz(sc->nvstore.ctx);
3206 for (sc->nvstore.sectsz_bits = 9;
3207 (1U << sc->nvstore.sectsz_bits) < sc->nvstore.sectsz;
3208 sc->nvstore.sectsz_bits++);
3210 if (sc->max_queues <= 0 || sc->max_queues > NVME_QUEUES)
3211 sc->max_queues = NVME_QUEUES;
3217 pci_nvme_resized(struct blockif_ctxt *bctxt __unused, void *arg,
3220 struct pci_nvme_softc *sc;
3221 struct pci_nvme_blockstore *nvstore;
3222 struct nvme_namespace_data *nd;
3225 nvstore = &sc->nvstore;
3228 nvstore->size = new_size;
3229 pci_nvme_init_nsdata_size(nvstore, nd);
3231 /* Add changed NSID to list */
3232 sc->ns_log.ns[0] = 1;
3233 sc->ns_log.ns[1] = 0;
3235 pci_nvme_aen_post(sc, PCI_NVME_AE_TYPE_NOTICE,
3236 PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED);
3240 pci_nvme_init(struct pci_devinst *pi, nvlist_t *nvl)
3242 struct pci_nvme_softc *sc;
3243 uint32_t pci_membar_sz;
3248 sc = calloc(1, sizeof(struct pci_nvme_softc));
3252 error = pci_nvme_parse_config(sc, nvl);
3258 STAILQ_INIT(&sc->ioreqs_free);
3259 sc->ioreqs = calloc(sc->ioslots, sizeof(struct pci_nvme_ioreq));
3260 for (uint32_t i = 0; i < sc->ioslots; i++) {
3261 STAILQ_INSERT_TAIL(&sc->ioreqs_free, &sc->ioreqs[i], link);
3264 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0A0A);
3265 pci_set_cfgdata16(pi, PCIR_VENDOR, 0xFB5D);
3266 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
3267 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_NVM);
3268 pci_set_cfgdata8(pi, PCIR_PROGIF,
3269 PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0);
3272 * Allocate size of NVMe registers + doorbell space for all queues.
3274 * The specification requires a minimum memory I/O window size of 16K.
3275 * The Windows driver will refuse to start a device with a smaller
3278 pci_membar_sz = sizeof(struct nvme_registers) +
3279 2 * sizeof(uint32_t) * (sc->max_queues + 1);
3280 pci_membar_sz = MAX(pci_membar_sz, NVME_MMIO_SPACE_MIN);
3282 DPRINTF("nvme membar size: %u", pci_membar_sz);
3284 error = pci_emul_alloc_bar(pi, 0, PCIBAR_MEM64, pci_membar_sz);
3286 WPRINTF("%s pci alloc mem bar failed", __func__);
3290 error = pci_emul_add_msixcap(pi, sc->max_queues + 1, NVME_MSIX_BAR);
3292 WPRINTF("%s pci add msixcap failed", __func__);
3296 error = pci_emul_add_pciecap(pi, PCIEM_TYPE_ROOT_INT_EP);
3298 WPRINTF("%s pci add Express capability failed", __func__);
3302 pthread_mutex_init(&sc->mtx, NULL);
3303 sem_init(&sc->iosemlock, 0, sc->ioslots);
3304 blockif_register_resize_callback(sc->nvstore.ctx, pci_nvme_resized, sc);
3306 pci_nvme_init_queues(sc, sc->max_queues, sc->max_queues);
3308 * Controller data depends on Namespace data so initialize Namespace
3311 pci_nvme_init_nsdata(sc, &sc->nsdata, 1, &sc->nvstore);
3312 pci_nvme_init_ctrldata(sc);
3313 pci_nvme_init_logpages(sc);
3314 pci_nvme_init_features(sc);
3316 pci_nvme_aer_init(sc);
3317 pci_nvme_aen_init(sc);
3325 pci_nvme_legacy_config(nvlist_t *nvl, const char *opts)
3332 if (strncmp(opts, "ram=", 4) == 0) {
3333 cp = strchr(opts, ',');
3335 set_config_value_node(nvl, "ram", opts + 4);
3338 ram = strndup(opts + 4, cp - opts - 4);
3339 set_config_value_node(nvl, "ram", ram);
3341 return (pci_parse_legacy_config(nvl, cp + 1));
3343 return (blockif_legacy_config(nvl, opts));
3346 static const struct pci_devemu pci_de_nvme = {
3348 .pe_init = pci_nvme_init,
3349 .pe_legacy_config = pci_nvme_legacy_config,
3350 .pe_barwrite = pci_nvme_write,
3351 .pe_barread = pci_nvme_read
3353 PCI_EMUL_SET(pci_de_nvme);