2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2017 Shunsuke Mie
5 * Copyright (c) 2018 Leon Dang
6 * Copyright (c) 2020 Chuck Tuffli
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * bhyve PCIe-NVMe device emulation.
34 * -s <n>,nvme,devpath,maxq=#,qsz=#,ioslots=#,sectsz=#,ser=A-Z,eui64=#,dsm=<opt>
41 * maxq = max number of queues
42 * qsz = max elements in each queue
43 * ioslots = max number of concurrent io requests
44 * sectsz = sector size (defaults to blockif sector size)
45 * ser = serial number (20-chars max)
46 * eui64 = IEEE Extended Unique Identifier (8 byte value)
47 * dsm = DataSet Management support. Option is one of auto, enable,disable
52 - create async event for smart and log
56 #include <sys/cdefs.h>
57 __FBSDID("$FreeBSD$");
59 #include <sys/errno.h>
60 #include <sys/types.h>
61 #include <sys/crc16.h>
62 #include <net/ieee_oui.h>
66 #include <pthread_np.h>
67 #include <semaphore.h>
75 #include <machine/atomic.h>
76 #include <machine/vmm.h>
79 #include <dev/nvme/nvme.h>
88 static int nvme_debug = 0;
89 #define DPRINTF(fmt, args...) if (nvme_debug) PRINTLN(fmt, ##args)
90 #define WPRINTF(fmt, args...) PRINTLN(fmt, ##args)
92 /* defaults; can be overridden */
93 #define NVME_MSIX_BAR 4
95 #define NVME_IOSLOTS 8
97 /* The NVMe spec defines bits 13:4 in BAR0 as reserved */
98 #define NVME_MMIO_SPACE_MIN (1 << 14)
100 #define NVME_QUEUES 16
101 #define NVME_MAX_QENTRIES 2048
102 /* Memory Page size Minimum reported in CAP register */
103 #define NVME_MPSMIN 0
104 /* MPSMIN converted to bytes */
105 #define NVME_MPSMIN_BYTES (1 << (12 + NVME_MPSMIN))
107 #define NVME_PRP2_ITEMS (PAGE_SIZE/sizeof(uint64_t))
109 /* Note the + 1 allows for the initial descriptor to not be page aligned */
110 #define NVME_MAX_IOVEC ((1 << NVME_MDTS) + 1)
111 #define NVME_MAX_DATA_SIZE ((1 << NVME_MDTS) * NVME_MPSMIN_BYTES)
113 /* This is a synthetic status code to indicate there is no status */
114 #define NVME_NO_STATUS 0xffff
115 #define NVME_COMPLETION_VALID(c) ((c).status != NVME_NO_STATUS)
117 /* Reported temperature in Kelvin (i.e. room temperature) */
118 #define NVME_TEMPERATURE 296
122 /* Convert a zero-based value into a one-based value */
123 #define ONE_BASED(zero) ((zero) + 1)
124 /* Convert a one-based value into a zero-based value */
125 #define ZERO_BASED(one) ((one) - 1)
127 /* Encode number of SQ's and CQ's for Set/Get Features */
128 #define NVME_FEATURE_NUM_QUEUES(sc) \
129 (ZERO_BASED((sc)->num_squeues) & 0xffff) | \
130 (ZERO_BASED((sc)->num_cqueues) & 0xffff) << 16
132 #define NVME_DOORBELL_OFFSET offsetof(struct nvme_registers, doorbell)
134 enum nvme_controller_register_offsets {
135 NVME_CR_CAP_LOW = 0x00,
136 NVME_CR_CAP_HI = 0x04,
138 NVME_CR_INTMS = 0x0c,
139 NVME_CR_INTMC = 0x10,
144 NVME_CR_ASQ_LOW = 0x28,
145 NVME_CR_ASQ_HI = 0x2c,
146 NVME_CR_ACQ_LOW = 0x30,
147 NVME_CR_ACQ_HI = 0x34,
150 enum nvme_cmd_cdw11 {
151 NVME_CMD_CDW11_PC = 0x0001,
152 NVME_CMD_CDW11_IEN = 0x0002,
153 NVME_CMD_CDW11_IV = 0xFFFF0000,
161 #define NVME_CQ_INTEN 0x01
162 #define NVME_CQ_INTCOAL 0x02
164 struct nvme_completion_queue {
165 struct nvme_completion *qbase;
168 uint16_t tail; /* nvme progress */
169 uint16_t head; /* guest progress */
174 struct nvme_submission_queue {
175 struct nvme_command *qbase;
178 uint16_t head; /* nvme progress */
179 uint16_t tail; /* guest progress */
180 uint16_t cqid; /* completion queue id */
184 enum nvme_storage_type {
185 NVME_STOR_BLOCKIF = 0,
189 struct pci_nvme_blockstore {
190 enum nvme_storage_type type;
194 uint32_t sectsz_bits;
196 uint32_t deallocate:1;
200 * Calculate the number of additional page descriptors for guest IO requests
201 * based on the advertised Max Data Transfer (MDTS) and given the number of
202 * default iovec's in a struct blockif_req.
204 #define MDTS_PAD_SIZE \
205 ( NVME_MAX_IOVEC > BLOCKIF_IOV_MAX ? \
206 NVME_MAX_IOVEC - BLOCKIF_IOV_MAX : \
209 struct pci_nvme_ioreq {
210 struct pci_nvme_softc *sc;
211 STAILQ_ENTRY(pci_nvme_ioreq) link;
212 struct nvme_submission_queue *nvme_sq;
215 /* command information */
220 uint64_t prev_gpaddr;
224 struct blockif_req io_req;
226 struct iovec iovpadding[MDTS_PAD_SIZE];
230 /* Dataset Management bit in ONCS reflects backing storage capability */
231 NVME_DATASET_MANAGEMENT_AUTO,
232 /* Unconditionally set Dataset Management bit in ONCS */
233 NVME_DATASET_MANAGEMENT_ENABLE,
234 /* Unconditionally clear Dataset Management bit in ONCS */
235 NVME_DATASET_MANAGEMENT_DISABLE,
238 struct pci_nvme_softc;
239 struct nvme_feature_obj;
241 typedef void (*nvme_feature_cb)(struct pci_nvme_softc *,
242 struct nvme_feature_obj *,
243 struct nvme_command *,
244 struct nvme_completion *);
246 struct nvme_feature_obj {
250 bool namespace_specific;
253 #define NVME_FID_MAX (NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION + 1)
256 PCI_NVME_AE_TYPE_ERROR = 0,
257 PCI_NVME_AE_TYPE_SMART,
258 PCI_NVME_AE_TYPE_NOTICE,
259 PCI_NVME_AE_TYPE_IO_CMD = 6,
260 PCI_NVME_AE_TYPE_VENDOR = 7,
261 PCI_NVME_AE_TYPE_MAX /* Must be last */
262 } pci_nvme_async_type;
264 /* Asynchronous Event Requests */
265 struct pci_nvme_aer {
266 STAILQ_ENTRY(pci_nvme_aer) link;
267 uint16_t cid; /* Command ID of the submitted AER */
270 /** Asynchronous Event Information - Notice */
272 PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED = 0,
273 PCI_NVME_AEI_NOTICE_FW_ACTIVATION,
274 PCI_NVME_AEI_NOTICE_TELEMETRY_CHANGE,
275 PCI_NVME_AEI_NOTICE_ANA_CHANGE,
276 PCI_NVME_AEI_NOTICE_PREDICT_LATENCY_CHANGE,
277 PCI_NVME_AEI_NOTICE_LBA_STATUS_ALERT,
278 PCI_NVME_AEI_NOTICE_ENDURANCE_GROUP_CHANGE,
279 PCI_NVME_AEI_NOTICE_MAX,
280 } pci_nvme_async_event_info_notice;
282 #define PCI_NVME_AEI_NOTICE_SHIFT 8
283 #define PCI_NVME_AEI_NOTICE_MASK(event) (1 << (event + PCI_NVME_AEI_NOTICE_SHIFT))
285 /* Asynchronous Event Notifications */
286 struct pci_nvme_aen {
287 pci_nvme_async_type atype;
293 * By default, enable all Asynchrnous Event Notifications:
294 * SMART / Health Critical Warnings
295 * Namespace Attribute Notices
297 #define PCI_NVME_AEN_DEFAULT_MASK 0x11f
300 NVME_CNTRLTYPE_IO = 1,
301 NVME_CNTRLTYPE_DISCOVERY = 2,
302 NVME_CNTRLTYPE_ADMIN = 3,
303 } pci_nvme_cntrl_type;
305 struct pci_nvme_softc {
306 struct pci_devinst *nsc_pi;
310 struct nvme_registers regs;
312 struct nvme_namespace_data nsdata;
313 struct nvme_controller_data ctrldata;
314 struct nvme_error_information_entry err_log;
315 struct nvme_health_information_page health_log;
316 struct nvme_firmware_page fw_log;
317 struct nvme_ns_list ns_log;
319 struct pci_nvme_blockstore nvstore;
321 uint16_t max_qentries; /* max entries per queue */
322 uint32_t max_queues; /* max number of IO SQ's or CQ's */
323 uint32_t num_cqueues;
324 uint32_t num_squeues;
325 bool num_q_is_set; /* Has host set Number of Queues */
327 struct pci_nvme_ioreq *ioreqs;
328 STAILQ_HEAD(, pci_nvme_ioreq) ioreqs_free; /* free list of ioreqs */
329 uint32_t pending_ios;
334 * Memory mapped Submission and Completion queues
335 * Each array includes both Admin and IO queues
337 struct nvme_completion_queue *compl_queues;
338 struct nvme_submission_queue *submit_queues;
340 struct nvme_feature_obj feat[NVME_FID_MAX];
342 enum nvme_dsm_type dataset_management;
344 /* Accounting for SMART data */
345 __uint128_t read_data_units;
346 __uint128_t write_data_units;
347 __uint128_t read_commands;
348 __uint128_t write_commands;
349 uint32_t read_dunits_remainder;
350 uint32_t write_dunits_remainder;
352 STAILQ_HEAD(, pci_nvme_aer) aer_list;
353 pthread_mutex_t aer_mtx;
355 struct pci_nvme_aen aen[PCI_NVME_AE_TYPE_MAX];
357 pthread_mutex_t aen_mtx;
358 pthread_cond_t aen_cond;
362 static void pci_nvme_cq_update(struct pci_nvme_softc *sc,
363 struct nvme_completion_queue *cq,
368 static struct pci_nvme_ioreq *pci_nvme_get_ioreq(struct pci_nvme_softc *);
369 static void pci_nvme_release_ioreq(struct pci_nvme_softc *, struct pci_nvme_ioreq *);
370 static void pci_nvme_io_done(struct blockif_req *, int);
372 /* Controller Configuration utils */
373 #define NVME_CC_GET_EN(cc) \
374 ((cc) >> NVME_CC_REG_EN_SHIFT & NVME_CC_REG_EN_MASK)
375 #define NVME_CC_GET_CSS(cc) \
376 ((cc) >> NVME_CC_REG_CSS_SHIFT & NVME_CC_REG_CSS_MASK)
377 #define NVME_CC_GET_SHN(cc) \
378 ((cc) >> NVME_CC_REG_SHN_SHIFT & NVME_CC_REG_SHN_MASK)
379 #define NVME_CC_GET_IOSQES(cc) \
380 ((cc) >> NVME_CC_REG_IOSQES_SHIFT & NVME_CC_REG_IOSQES_MASK)
381 #define NVME_CC_GET_IOCQES(cc) \
382 ((cc) >> NVME_CC_REG_IOCQES_SHIFT & NVME_CC_REG_IOCQES_MASK)
384 #define NVME_CC_WRITE_MASK \
385 ((NVME_CC_REG_EN_MASK << NVME_CC_REG_EN_SHIFT) | \
386 (NVME_CC_REG_IOSQES_MASK << NVME_CC_REG_IOSQES_SHIFT) | \
387 (NVME_CC_REG_IOCQES_MASK << NVME_CC_REG_IOCQES_SHIFT))
389 #define NVME_CC_NEN_WRITE_MASK \
390 ((NVME_CC_REG_CSS_MASK << NVME_CC_REG_CSS_SHIFT) | \
391 (NVME_CC_REG_MPS_MASK << NVME_CC_REG_MPS_SHIFT) | \
392 (NVME_CC_REG_AMS_MASK << NVME_CC_REG_AMS_SHIFT))
394 /* Controller Status utils */
395 #define NVME_CSTS_GET_RDY(sts) \
396 ((sts) >> NVME_CSTS_REG_RDY_SHIFT & NVME_CSTS_REG_RDY_MASK)
398 #define NVME_CSTS_RDY (1 << NVME_CSTS_REG_RDY_SHIFT)
399 #define NVME_CSTS_CFS (1 << NVME_CSTS_REG_CFS_SHIFT)
401 /* Completion Queue status word utils */
402 #define NVME_STATUS_P (1 << NVME_STATUS_P_SHIFT)
403 #define NVME_STATUS_MASK \
404 ((NVME_STATUS_SCT_MASK << NVME_STATUS_SCT_SHIFT) |\
405 (NVME_STATUS_SC_MASK << NVME_STATUS_SC_SHIFT))
407 #define NVME_ONCS_DSM (NVME_CTRLR_DATA_ONCS_DSM_MASK << \
408 NVME_CTRLR_DATA_ONCS_DSM_SHIFT)
410 static void nvme_feature_invalid_cb(struct pci_nvme_softc *,
411 struct nvme_feature_obj *,
412 struct nvme_command *,
413 struct nvme_completion *);
414 static void nvme_feature_temperature(struct pci_nvme_softc *,
415 struct nvme_feature_obj *,
416 struct nvme_command *,
417 struct nvme_completion *);
418 static void nvme_feature_num_queues(struct pci_nvme_softc *,
419 struct nvme_feature_obj *,
420 struct nvme_command *,
421 struct nvme_completion *);
422 static void nvme_feature_iv_config(struct pci_nvme_softc *,
423 struct nvme_feature_obj *,
424 struct nvme_command *,
425 struct nvme_completion *);
426 static void nvme_feature_async_event(struct pci_nvme_softc *,
427 struct nvme_feature_obj *,
428 struct nvme_command *,
429 struct nvme_completion *);
431 static void *aen_thr(void *arg);
434 cpywithpad(char *dst, size_t dst_size, const char *src, char pad)
438 len = strnlen(src, dst_size);
439 memset(dst, pad, dst_size);
440 memcpy(dst, src, len);
444 pci_nvme_status_tc(uint16_t *status, uint16_t type, uint16_t code)
447 *status &= ~NVME_STATUS_MASK;
448 *status |= (type & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT |
449 (code & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
453 pci_nvme_status_genc(uint16_t *status, uint16_t code)
456 pci_nvme_status_tc(status, NVME_SCT_GENERIC, code);
460 * Initialize the requested number or IO Submission and Completion Queues.
461 * Admin queues are allocated implicitly.
464 pci_nvme_init_queues(struct pci_nvme_softc *sc, uint32_t nsq, uint32_t ncq)
469 * Allocate and initialize the Submission Queues
471 if (nsq > NVME_QUEUES) {
472 WPRINTF("%s: clamping number of SQ from %u to %u",
473 __func__, nsq, NVME_QUEUES);
477 sc->num_squeues = nsq;
479 sc->submit_queues = calloc(sc->num_squeues + 1,
480 sizeof(struct nvme_submission_queue));
481 if (sc->submit_queues == NULL) {
482 WPRINTF("%s: SQ allocation failed", __func__);
485 struct nvme_submission_queue *sq = sc->submit_queues;
487 for (i = 0; i < sc->num_squeues + 1; i++)
488 pthread_mutex_init(&sq[i].mtx, NULL);
492 * Allocate and initialize the Completion Queues
494 if (ncq > NVME_QUEUES) {
495 WPRINTF("%s: clamping number of CQ from %u to %u",
496 __func__, ncq, NVME_QUEUES);
500 sc->num_cqueues = ncq;
502 sc->compl_queues = calloc(sc->num_cqueues + 1,
503 sizeof(struct nvme_completion_queue));
504 if (sc->compl_queues == NULL) {
505 WPRINTF("%s: CQ allocation failed", __func__);
508 struct nvme_completion_queue *cq = sc->compl_queues;
510 for (i = 0; i < sc->num_cqueues + 1; i++)
511 pthread_mutex_init(&cq[i].mtx, NULL);
516 pci_nvme_init_ctrldata(struct pci_nvme_softc *sc)
518 struct nvme_controller_data *cd = &sc->ctrldata;
523 cpywithpad((char *)cd->mn, sizeof(cd->mn), "bhyve-NVMe", ' ');
524 cpywithpad((char *)cd->fr, sizeof(cd->fr), "1.0", ' ');
526 /* Num of submission commands that we can handle at a time (2^rab) */
536 cd->mdts = NVME_MDTS; /* max data transfer size (2^mdts * CAP.MPSMIN) */
538 cd->ver = NVME_REV(1,4);
540 cd->cntrltype = NVME_CNTRLTYPE_IO;
541 cd->oacs = 1 << NVME_CTRLR_DATA_OACS_FORMAT_SHIFT;
542 cd->oaes = NVMEB(NVME_CTRLR_DATA_OAES_NS_ATTR);
546 /* Advertise 1, Read-only firmware slot */
547 cd->frmw = NVMEB(NVME_CTRLR_DATA_FRMW_SLOT1_RO) |
548 (1 << NVME_CTRLR_DATA_FRMW_NUM_SLOTS_SHIFT);
549 cd->lpa = 0; /* TODO: support some simple things like SMART */
550 cd->elpe = 0; /* max error log page entries */
552 * Report a single power state (zero-based value)
553 * power_state[] values are left as zero to indicate "Not reported"
557 /* Warning Composite Temperature Threshold */
561 /* SANICAP must not be 0 for Revision 1.4 and later NVMe Controllers */
562 cd->sanicap = (NVME_CTRLR_DATA_SANICAP_NODMMAS_NO <<
563 NVME_CTRLR_DATA_SANICAP_NODMMAS_SHIFT);
565 cd->sqes = (6 << NVME_CTRLR_DATA_SQES_MAX_SHIFT) |
566 (6 << NVME_CTRLR_DATA_SQES_MIN_SHIFT);
567 cd->cqes = (4 << NVME_CTRLR_DATA_CQES_MAX_SHIFT) |
568 (4 << NVME_CTRLR_DATA_CQES_MIN_SHIFT);
569 cd->nn = 1; /* number of namespaces */
572 switch (sc->dataset_management) {
573 case NVME_DATASET_MANAGEMENT_AUTO:
574 if (sc->nvstore.deallocate)
575 cd->oncs |= NVME_ONCS_DSM;
577 case NVME_DATASET_MANAGEMENT_ENABLE:
578 cd->oncs |= NVME_ONCS_DSM;
584 cd->fna = NVME_CTRLR_DATA_FNA_FORMAT_ALL_MASK <<
585 NVME_CTRLR_DATA_FNA_FORMAT_ALL_SHIFT;
587 cd->vwc = NVME_CTRLR_DATA_VWC_ALL_NO << NVME_CTRLR_DATA_VWC_ALL_SHIFT;
591 pci_nvme_init_nsdata_size(struct pci_nvme_blockstore *nvstore,
592 struct nvme_namespace_data *nd)
595 /* Get capacity and block size information from backing store */
596 nd->nsze = nvstore->size / nvstore->sectsz;
602 pci_nvme_init_nsdata(struct pci_nvme_softc *sc,
603 struct nvme_namespace_data *nd, uint32_t nsid,
604 struct pci_nvme_blockstore *nvstore)
607 pci_nvme_init_nsdata_size(nvstore, nd);
609 if (nvstore->type == NVME_STOR_BLOCKIF)
610 nvstore->deallocate = blockif_candelete(nvstore->ctx);
612 nd->nlbaf = 0; /* NLBAF is a 0's based value (i.e. 1 LBA Format) */
615 /* Create an EUI-64 if user did not provide one */
616 if (nvstore->eui64 == 0) {
618 uint64_t eui64 = nvstore->eui64;
620 asprintf(&data, "%s%u%u%u", get_config_value("name"),
621 sc->nsc_pi->pi_bus, sc->nsc_pi->pi_slot,
622 sc->nsc_pi->pi_func);
625 eui64 = OUI_FREEBSD_NVME_LOW | crc16(0, data, strlen(data));
628 nvstore->eui64 = (eui64 << 16) | (nsid & 0xffff);
630 be64enc(nd->eui64, nvstore->eui64);
632 /* LBA data-sz = 2^lbads */
633 nd->lbaf[0] = nvstore->sectsz_bits << NVME_NS_DATA_LBAF_LBADS_SHIFT;
637 pci_nvme_init_logpages(struct pci_nvme_softc *sc)
639 __uint128_t power_cycles = 1;
641 memset(&sc->err_log, 0, sizeof(sc->err_log));
642 memset(&sc->health_log, 0, sizeof(sc->health_log));
643 memset(&sc->fw_log, 0, sizeof(sc->fw_log));
644 memset(&sc->ns_log, 0, sizeof(sc->ns_log));
646 /* Set read/write remainder to round up according to spec */
647 sc->read_dunits_remainder = 999;
648 sc->write_dunits_remainder = 999;
650 /* Set nominal Health values checked by implementations */
651 sc->health_log.temperature = NVME_TEMPERATURE;
652 sc->health_log.available_spare = 100;
653 sc->health_log.available_spare_threshold = 10;
655 /* Set Active Firmware Info to slot 1 */
656 sc->fw_log.afi = (1 << NVME_FIRMWARE_PAGE_AFI_SLOT_SHIFT);
657 memcpy(&sc->fw_log.revision[0], sc->ctrldata.fr,
658 sizeof(sc->fw_log.revision[0]));
660 memcpy(&sc->health_log.power_cycles, &power_cycles,
661 sizeof(sc->health_log.power_cycles));
665 pci_nvme_init_features(struct pci_nvme_softc *sc)
667 enum nvme_feature fid;
669 for (fid = 0; fid < NVME_FID_MAX; fid++) {
671 case NVME_FEAT_ARBITRATION:
672 case NVME_FEAT_POWER_MANAGEMENT:
673 case NVME_FEAT_INTERRUPT_COALESCING: //XXX
674 case NVME_FEAT_WRITE_ATOMICITY:
675 /* Mandatory but no special handling required */
676 //XXX hang - case NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG:
677 //XXX hang - case NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
678 // this returns a data buffer
680 case NVME_FEAT_TEMPERATURE_THRESHOLD:
681 sc->feat[fid].set = nvme_feature_temperature;
683 case NVME_FEAT_ERROR_RECOVERY:
684 sc->feat[fid].namespace_specific = true;
686 case NVME_FEAT_NUMBER_OF_QUEUES:
687 sc->feat[fid].set = nvme_feature_num_queues;
689 case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
690 sc->feat[fid].set = nvme_feature_iv_config;
692 case NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
693 sc->feat[fid].set = nvme_feature_async_event;
694 /* Enable all AENs by default */
695 sc->feat[fid].cdw11 = PCI_NVME_AEN_DEFAULT_MASK;
698 sc->feat[fid].set = nvme_feature_invalid_cb;
699 sc->feat[fid].get = nvme_feature_invalid_cb;
705 pci_nvme_aer_reset(struct pci_nvme_softc *sc)
708 STAILQ_INIT(&sc->aer_list);
713 pci_nvme_aer_init(struct pci_nvme_softc *sc)
716 pthread_mutex_init(&sc->aer_mtx, NULL);
717 pci_nvme_aer_reset(sc);
721 pci_nvme_aer_destroy(struct pci_nvme_softc *sc)
723 struct pci_nvme_aer *aer = NULL;
725 pthread_mutex_lock(&sc->aer_mtx);
726 while (!STAILQ_EMPTY(&sc->aer_list)) {
727 aer = STAILQ_FIRST(&sc->aer_list);
728 STAILQ_REMOVE_HEAD(&sc->aer_list, link);
731 pthread_mutex_unlock(&sc->aer_mtx);
733 pci_nvme_aer_reset(sc);
737 pci_nvme_aer_available(struct pci_nvme_softc *sc)
740 return (sc->aer_count != 0);
744 pci_nvme_aer_limit_reached(struct pci_nvme_softc *sc)
746 struct nvme_controller_data *cd = &sc->ctrldata;
748 /* AERL is a zero based value while aer_count is one's based */
749 return (sc->aer_count == (cd->aerl + 1U));
753 * Add an Async Event Request
755 * Stores an AER to be returned later if the Controller needs to notify the
757 * Note that while the NVMe spec doesn't require Controllers to return AER's
758 * in order, this implementation does preserve the order.
761 pci_nvme_aer_add(struct pci_nvme_softc *sc, uint16_t cid)
763 struct pci_nvme_aer *aer = NULL;
765 aer = calloc(1, sizeof(struct pci_nvme_aer));
769 /* Save the Command ID for use in the completion message */
772 pthread_mutex_lock(&sc->aer_mtx);
774 STAILQ_INSERT_TAIL(&sc->aer_list, aer, link);
775 pthread_mutex_unlock(&sc->aer_mtx);
781 * Get an Async Event Request structure
783 * Returns a pointer to an AER previously submitted by the host or NULL if
784 * no AER's exist. Caller is responsible for freeing the returned struct.
786 static struct pci_nvme_aer *
787 pci_nvme_aer_get(struct pci_nvme_softc *sc)
789 struct pci_nvme_aer *aer = NULL;
791 pthread_mutex_lock(&sc->aer_mtx);
792 aer = STAILQ_FIRST(&sc->aer_list);
794 STAILQ_REMOVE_HEAD(&sc->aer_list, link);
797 pthread_mutex_unlock(&sc->aer_mtx);
803 pci_nvme_aen_reset(struct pci_nvme_softc *sc)
807 memset(sc->aen, 0, PCI_NVME_AE_TYPE_MAX * sizeof(struct pci_nvme_aen));
809 for (atype = 0; atype < PCI_NVME_AE_TYPE_MAX; atype++) {
810 sc->aen[atype].atype = atype;
815 pci_nvme_aen_init(struct pci_nvme_softc *sc)
819 pci_nvme_aen_reset(sc);
821 pthread_mutex_init(&sc->aen_mtx, NULL);
822 pthread_create(&sc->aen_tid, NULL, aen_thr, sc);
823 snprintf(nstr, sizeof(nstr), "nvme-aen-%d:%d", sc->nsc_pi->pi_slot,
824 sc->nsc_pi->pi_func);
825 pthread_set_name_np(sc->aen_tid, nstr);
829 pci_nvme_aen_destroy(struct pci_nvme_softc *sc)
832 pci_nvme_aen_reset(sc);
835 /* Notify the AEN thread of pending work */
837 pci_nvme_aen_notify(struct pci_nvme_softc *sc)
840 pthread_cond_signal(&sc->aen_cond);
844 * Post an Asynchronous Event Notification
847 pci_nvme_aen_post(struct pci_nvme_softc *sc, pci_nvme_async_type atype,
850 struct pci_nvme_aen *aen;
852 if (atype >= PCI_NVME_AE_TYPE_MAX) {
856 pthread_mutex_lock(&sc->aen_mtx);
857 aen = &sc->aen[atype];
859 /* Has the controller already posted an event of this type? */
861 pthread_mutex_unlock(&sc->aen_mtx);
865 aen->event_data = event_data;
867 pthread_mutex_unlock(&sc->aen_mtx);
869 pci_nvme_aen_notify(sc);
875 pci_nvme_aen_process(struct pci_nvme_softc *sc)
877 struct pci_nvme_aer *aer;
878 struct pci_nvme_aen *aen;
879 pci_nvme_async_type atype;
884 assert(pthread_mutex_isowned_np(&sc->aen_mtx));
885 for (atype = 0; atype < PCI_NVME_AE_TYPE_MAX; atype++) {
886 aen = &sc->aen[atype];
887 /* Previous iterations may have depleted the available AER's */
888 if (!pci_nvme_aer_available(sc)) {
889 DPRINTF("%s: no AER", __func__);
894 DPRINTF("%s: no AEN posted for atype=%#x", __func__, atype);
898 status = NVME_SC_SUCCESS;
900 /* Is the event masked? */
902 sc->feat[NVME_FEAT_ASYNC_EVENT_CONFIGURATION].cdw11;
904 DPRINTF("%s: atype=%#x mask=%#x event_data=%#x", __func__, atype, mask, aen->event_data);
906 case PCI_NVME_AE_TYPE_ERROR:
907 lid = NVME_LOG_ERROR;
909 case PCI_NVME_AE_TYPE_SMART:
911 if ((mask & aen->event_data) == 0)
913 lid = NVME_LOG_HEALTH_INFORMATION;
915 case PCI_NVME_AE_TYPE_NOTICE:
916 if (aen->event_data >= PCI_NVME_AEI_NOTICE_MAX) {
917 EPRINTLN("%s unknown AEN notice type %u",
918 __func__, aen->event_data);
919 status = NVME_SC_INTERNAL_DEVICE_ERROR;
923 if ((PCI_NVME_AEI_NOTICE_MASK(aen->event_data) & mask) == 0)
925 switch (aen->event_data) {
926 case PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED:
927 lid = NVME_LOG_CHANGED_NAMESPACE;
929 case PCI_NVME_AEI_NOTICE_FW_ACTIVATION:
930 lid = NVME_LOG_FIRMWARE_SLOT;
932 case PCI_NVME_AEI_NOTICE_TELEMETRY_CHANGE:
933 lid = NVME_LOG_TELEMETRY_CONTROLLER_INITIATED;
935 case PCI_NVME_AEI_NOTICE_ANA_CHANGE:
936 lid = NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS;
938 case PCI_NVME_AEI_NOTICE_PREDICT_LATENCY_CHANGE:
939 lid = NVME_LOG_PREDICTABLE_LATENCY_EVENT_AGGREGATE;
941 case PCI_NVME_AEI_NOTICE_LBA_STATUS_ALERT:
942 lid = NVME_LOG_LBA_STATUS_INFORMATION;
944 case PCI_NVME_AEI_NOTICE_ENDURANCE_GROUP_CHANGE:
945 lid = NVME_LOG_ENDURANCE_GROUP_EVENT_AGGREGATE;
953 EPRINTLN("%s unknown AEN type %u", __func__, atype);
954 status = NVME_SC_INTERNAL_DEVICE_ERROR;
959 aer = pci_nvme_aer_get(sc);
962 DPRINTF("%s: CID=%#x CDW0=%#x", __func__, aer->cid, (lid << 16) | (aen->event_data << 8) | atype);
963 pci_nvme_cq_update(sc, &sc->compl_queues[0],
964 (lid << 16) | (aen->event_data << 8) | atype, /* cdw0 */
972 pci_generate_msix(sc->nsc_pi, 0);
979 struct pci_nvme_softc *sc;
983 pthread_mutex_lock(&sc->aen_mtx);
985 pci_nvme_aen_process(sc);
986 pthread_cond_wait(&sc->aen_cond, &sc->aen_mtx);
988 pthread_mutex_unlock(&sc->aen_mtx);
995 pci_nvme_reset_locked(struct pci_nvme_softc *sc)
999 DPRINTF("%s", __func__);
1001 sc->regs.cap_lo = (ZERO_BASED(sc->max_qentries) & NVME_CAP_LO_REG_MQES_MASK) |
1002 (1 << NVME_CAP_LO_REG_CQR_SHIFT) |
1003 (60 << NVME_CAP_LO_REG_TO_SHIFT);
1005 sc->regs.cap_hi = 1 << NVME_CAP_HI_REG_CSS_NVM_SHIFT;
1007 sc->regs.vs = NVME_REV(1,4); /* NVMe v1.4 */
1011 assert(sc->submit_queues != NULL);
1013 for (i = 0; i < sc->num_squeues + 1; i++) {
1014 sc->submit_queues[i].qbase = NULL;
1015 sc->submit_queues[i].size = 0;
1016 sc->submit_queues[i].cqid = 0;
1017 sc->submit_queues[i].tail = 0;
1018 sc->submit_queues[i].head = 0;
1021 assert(sc->compl_queues != NULL);
1023 for (i = 0; i < sc->num_cqueues + 1; i++) {
1024 sc->compl_queues[i].qbase = NULL;
1025 sc->compl_queues[i].size = 0;
1026 sc->compl_queues[i].tail = 0;
1027 sc->compl_queues[i].head = 0;
1030 sc->num_q_is_set = false;
1032 pci_nvme_aer_destroy(sc);
1033 pci_nvme_aen_destroy(sc);
1036 * Clear CSTS.RDY last to prevent the host from enabling Controller
1037 * before cleanup completes
1043 pci_nvme_reset(struct pci_nvme_softc *sc)
1045 pthread_mutex_lock(&sc->mtx);
1046 pci_nvme_reset_locked(sc);
1047 pthread_mutex_unlock(&sc->mtx);
1051 pci_nvme_init_controller(struct pci_nvme_softc *sc)
1053 uint16_t acqs, asqs;
1055 DPRINTF("%s", __func__);
1058 * NVMe 2.0 states that "enabling a controller while this field is
1059 * cleared to 0h produces undefined results" for both ACQS and
1060 * ASQS. If zero, set CFS and do not become ready.
1062 asqs = ONE_BASED(sc->regs.aqa & NVME_AQA_REG_ASQS_MASK);
1064 EPRINTLN("%s: illegal ASQS value %#x (aqa=%#x)", __func__,
1065 asqs - 1, sc->regs.aqa);
1066 sc->regs.csts |= NVME_CSTS_CFS;
1069 sc->submit_queues[0].size = asqs;
1070 sc->submit_queues[0].qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1071 sc->regs.asq, sizeof(struct nvme_command) * asqs);
1072 if (sc->submit_queues[0].qbase == NULL) {
1073 EPRINTLN("%s: ASQ vm_map_gpa(%lx) failed", __func__,
1075 sc->regs.csts |= NVME_CSTS_CFS;
1079 DPRINTF("%s mapping Admin-SQ guest 0x%lx, host: %p",
1080 __func__, sc->regs.asq, sc->submit_queues[0].qbase);
1082 acqs = ONE_BASED((sc->regs.aqa >> NVME_AQA_REG_ACQS_SHIFT) &
1083 NVME_AQA_REG_ACQS_MASK);
1085 EPRINTLN("%s: illegal ACQS value %#x (aqa=%#x)", __func__,
1086 acqs - 1, sc->regs.aqa);
1087 sc->regs.csts |= NVME_CSTS_CFS;
1090 sc->compl_queues[0].size = acqs;
1091 sc->compl_queues[0].qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1092 sc->regs.acq, sizeof(struct nvme_completion) * acqs);
1093 if (sc->compl_queues[0].qbase == NULL) {
1094 EPRINTLN("%s: ACQ vm_map_gpa(%lx) failed", __func__,
1096 sc->regs.csts |= NVME_CSTS_CFS;
1099 sc->compl_queues[0].intr_en = NVME_CQ_INTEN;
1101 DPRINTF("%s mapping Admin-CQ guest 0x%lx, host: %p",
1102 __func__, sc->regs.acq, sc->compl_queues[0].qbase);
1108 nvme_prp_memcpy(struct vmctx *ctx, uint64_t prp1, uint64_t prp2, uint8_t *b,
1109 size_t len, enum nvme_copy_dir dir)
1114 if (len > (8 * 1024)) {
1118 /* Copy from the start of prp1 to the end of the physical page */
1119 bytes = PAGE_SIZE - (prp1 & PAGE_MASK);
1120 bytes = MIN(bytes, len);
1122 p = vm_map_gpa(ctx, prp1, bytes);
1127 if (dir == NVME_COPY_TO_PRP)
1128 memcpy(p, b, bytes);
1130 memcpy(b, p, bytes);
1139 len = MIN(len, PAGE_SIZE);
1141 p = vm_map_gpa(ctx, prp2, len);
1146 if (dir == NVME_COPY_TO_PRP)
1155 * Write a Completion Queue Entry update
1157 * Write the completion and update the doorbell value
1160 pci_nvme_cq_update(struct pci_nvme_softc *sc,
1161 struct nvme_completion_queue *cq,
1167 struct nvme_submission_queue *sq = &sc->submit_queues[sqid];
1168 struct nvme_completion *cqe;
1170 assert(cq->qbase != NULL);
1172 pthread_mutex_lock(&cq->mtx);
1174 cqe = &cq->qbase[cq->tail];
1176 /* Flip the phase bit */
1177 status |= (cqe->status ^ NVME_STATUS_P) & NVME_STATUS_P_MASK;
1180 cqe->sqhd = sq->head;
1183 cqe->status = status;
1186 if (cq->tail >= cq->size) {
1190 pthread_mutex_unlock(&cq->mtx);
1194 nvme_opc_delete_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command,
1195 struct nvme_completion* compl)
1197 uint16_t qid = command->cdw10 & 0xffff;
1199 DPRINTF("%s DELETE_IO_SQ %u", __func__, qid);
1200 if (qid == 0 || qid > sc->num_squeues ||
1201 (sc->submit_queues[qid].qbase == NULL)) {
1202 WPRINTF("%s NOT PERMITTED queue id %u / num_squeues %u",
1203 __func__, qid, sc->num_squeues);
1204 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1205 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1209 sc->submit_queues[qid].qbase = NULL;
1210 sc->submit_queues[qid].cqid = 0;
1211 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1216 nvme_opc_create_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command,
1217 struct nvme_completion* compl)
1219 if (command->cdw11 & NVME_CMD_CDW11_PC) {
1220 uint16_t qid = command->cdw10 & 0xffff;
1221 struct nvme_submission_queue *nsq;
1223 if ((qid == 0) || (qid > sc->num_squeues) ||
1224 (sc->submit_queues[qid].qbase != NULL)) {
1225 WPRINTF("%s queue index %u > num_squeues %u",
1226 __func__, qid, sc->num_squeues);
1227 pci_nvme_status_tc(&compl->status,
1228 NVME_SCT_COMMAND_SPECIFIC,
1229 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1233 nsq = &sc->submit_queues[qid];
1234 nsq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff);
1235 DPRINTF("%s size=%u (max=%u)", __func__, nsq->size, sc->max_qentries);
1236 if ((nsq->size < 2) || (nsq->size > sc->max_qentries)) {
1238 * Queues must specify at least two entries
1239 * NOTE: "MAXIMUM QUEUE SIZE EXCEEDED" was renamed to
1240 * "INVALID QUEUE SIZE" in the NVM Express 1.3 Spec
1242 pci_nvme_status_tc(&compl->status,
1243 NVME_SCT_COMMAND_SPECIFIC,
1244 NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED);
1247 nsq->head = nsq->tail = 0;
1249 nsq->cqid = (command->cdw11 >> 16) & 0xffff;
1250 if ((nsq->cqid == 0) || (nsq->cqid > sc->num_cqueues)) {
1251 pci_nvme_status_tc(&compl->status,
1252 NVME_SCT_COMMAND_SPECIFIC,
1253 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1257 if (sc->compl_queues[nsq->cqid].qbase == NULL) {
1258 pci_nvme_status_tc(&compl->status,
1259 NVME_SCT_COMMAND_SPECIFIC,
1260 NVME_SC_COMPLETION_QUEUE_INVALID);
1264 nsq->qpriority = (command->cdw11 >> 1) & 0x03;
1266 nsq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1267 sizeof(struct nvme_command) * (size_t)nsq->size);
1269 DPRINTF("%s sq %u size %u gaddr %p cqid %u", __func__,
1270 qid, nsq->size, nsq->qbase, nsq->cqid);
1272 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1274 DPRINTF("%s completed creating IOSQ qid %u",
1278 * Guest sent non-cont submission queue request.
1279 * This setting is unsupported by this emulation.
1281 WPRINTF("%s unsupported non-contig (list-based) "
1282 "create i/o submission queue", __func__);
1284 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1290 nvme_opc_delete_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command,
1291 struct nvme_completion* compl)
1293 uint16_t qid = command->cdw10 & 0xffff;
1296 DPRINTF("%s DELETE_IO_CQ %u", __func__, qid);
1297 if (qid == 0 || qid > sc->num_cqueues ||
1298 (sc->compl_queues[qid].qbase == NULL)) {
1299 WPRINTF("%s queue index %u / num_cqueues %u",
1300 __func__, qid, sc->num_cqueues);
1301 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1302 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1306 /* Deleting an Active CQ is an error */
1307 for (sqid = 1; sqid < sc->num_squeues + 1; sqid++)
1308 if (sc->submit_queues[sqid].cqid == qid) {
1309 pci_nvme_status_tc(&compl->status,
1310 NVME_SCT_COMMAND_SPECIFIC,
1311 NVME_SC_INVALID_QUEUE_DELETION);
1315 sc->compl_queues[qid].qbase = NULL;
1316 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1321 nvme_opc_create_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command,
1322 struct nvme_completion* compl)
1324 struct nvme_completion_queue *ncq;
1325 uint16_t qid = command->cdw10 & 0xffff;
1327 /* Only support Physically Contiguous queues */
1328 if ((command->cdw11 & NVME_CMD_CDW11_PC) == 0) {
1329 WPRINTF("%s unsupported non-contig (list-based) "
1330 "create i/o completion queue",
1333 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1337 if ((qid == 0) || (qid > sc->num_cqueues) ||
1338 (sc->compl_queues[qid].qbase != NULL)) {
1339 WPRINTF("%s queue index %u > num_cqueues %u",
1340 __func__, qid, sc->num_cqueues);
1341 pci_nvme_status_tc(&compl->status,
1342 NVME_SCT_COMMAND_SPECIFIC,
1343 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1347 ncq = &sc->compl_queues[qid];
1348 ncq->intr_en = (command->cdw11 & NVME_CMD_CDW11_IEN) >> 1;
1349 ncq->intr_vec = (command->cdw11 >> 16) & 0xffff;
1350 if (ncq->intr_vec > (sc->max_queues + 1)) {
1351 pci_nvme_status_tc(&compl->status,
1352 NVME_SCT_COMMAND_SPECIFIC,
1353 NVME_SC_INVALID_INTERRUPT_VECTOR);
1357 ncq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff);
1358 if ((ncq->size < 2) || (ncq->size > sc->max_qentries)) {
1360 * Queues must specify at least two entries
1361 * NOTE: "MAXIMUM QUEUE SIZE EXCEEDED" was renamed to
1362 * "INVALID QUEUE SIZE" in the NVM Express 1.3 Spec
1364 pci_nvme_status_tc(&compl->status,
1365 NVME_SCT_COMMAND_SPECIFIC,
1366 NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED);
1369 ncq->head = ncq->tail = 0;
1370 ncq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1372 sizeof(struct nvme_command) * (size_t)ncq->size);
1374 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1381 nvme_opc_get_log_page(struct pci_nvme_softc* sc, struct nvme_command* command,
1382 struct nvme_completion* compl)
1388 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1391 * Command specifies the number of dwords to return in fields NUMDU
1392 * and NUMDL. This is a zero-based value.
1394 logpage = command->cdw10 & 0xFF;
1395 logsize = ((command->cdw11 << 16) | (command->cdw10 >> 16)) + 1;
1396 logsize *= sizeof(uint32_t);
1397 logoff = ((uint64_t)(command->cdw13) << 32) | command->cdw12;
1399 DPRINTF("%s log page %u len %u", __func__, logpage, logsize);
1402 case NVME_LOG_ERROR:
1403 if (logoff >= sizeof(sc->err_log)) {
1404 pci_nvme_status_genc(&compl->status,
1405 NVME_SC_INVALID_FIELD);
1409 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1410 command->prp2, (uint8_t *)&sc->err_log + logoff,
1411 MIN(logsize - logoff, sizeof(sc->err_log)),
1414 case NVME_LOG_HEALTH_INFORMATION:
1415 if (logoff >= sizeof(sc->health_log)) {
1416 pci_nvme_status_genc(&compl->status,
1417 NVME_SC_INVALID_FIELD);
1421 pthread_mutex_lock(&sc->mtx);
1422 memcpy(&sc->health_log.data_units_read, &sc->read_data_units,
1423 sizeof(sc->health_log.data_units_read));
1424 memcpy(&sc->health_log.data_units_written, &sc->write_data_units,
1425 sizeof(sc->health_log.data_units_written));
1426 memcpy(&sc->health_log.host_read_commands, &sc->read_commands,
1427 sizeof(sc->health_log.host_read_commands));
1428 memcpy(&sc->health_log.host_write_commands, &sc->write_commands,
1429 sizeof(sc->health_log.host_write_commands));
1430 pthread_mutex_unlock(&sc->mtx);
1432 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1433 command->prp2, (uint8_t *)&sc->health_log + logoff,
1434 MIN(logsize - logoff, sizeof(sc->health_log)),
1437 case NVME_LOG_FIRMWARE_SLOT:
1438 if (logoff >= sizeof(sc->fw_log)) {
1439 pci_nvme_status_genc(&compl->status,
1440 NVME_SC_INVALID_FIELD);
1444 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1445 command->prp2, (uint8_t *)&sc->fw_log + logoff,
1446 MIN(logsize - logoff, sizeof(sc->fw_log)),
1449 case NVME_LOG_CHANGED_NAMESPACE:
1450 if (logoff >= sizeof(sc->ns_log)) {
1451 pci_nvme_status_genc(&compl->status,
1452 NVME_SC_INVALID_FIELD);
1456 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1457 command->prp2, (uint8_t *)&sc->ns_log + logoff,
1458 MIN(logsize - logoff, sizeof(sc->ns_log)),
1460 memset(&sc->ns_log, 0, sizeof(sc->ns_log));
1463 DPRINTF("%s get log page %x command not supported",
1466 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1467 NVME_SC_INVALID_LOG_PAGE);
1474 nvme_opc_identify(struct pci_nvme_softc* sc, struct nvme_command* command,
1475 struct nvme_completion* compl)
1480 DPRINTF("%s identify 0x%x nsid 0x%x", __func__,
1481 command->cdw10 & 0xFF, command->nsid);
1484 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
1486 switch (command->cdw10 & 0xFF) {
1487 case 0x00: /* return Identify Namespace data structure */
1488 /* Global NS only valid with NS Management */
1489 if (command->nsid == NVME_GLOBAL_NAMESPACE_TAG) {
1490 pci_nvme_status_genc(&status,
1491 NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1494 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1495 command->prp2, (uint8_t *)&sc->nsdata, sizeof(sc->nsdata),
1498 case 0x01: /* return Identify Controller data structure */
1499 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1500 command->prp2, (uint8_t *)&sc->ctrldata,
1501 sizeof(sc->ctrldata),
1504 case 0x02: /* list of 1024 active NSIDs > CDW1.NSID */
1505 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1506 sizeof(uint32_t) * 1024);
1507 /* All unused entries shall be zero */
1508 memset(dest, 0, sizeof(uint32_t) * 1024);
1509 ((uint32_t *)dest)[0] = 1;
1511 case 0x03: /* list of NSID structures in CDW1.NSID, 4096 bytes */
1512 if (command->nsid != 1) {
1513 pci_nvme_status_genc(&status,
1514 NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1517 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1518 sizeof(uint32_t) * 1024);
1519 /* All bytes after the descriptor shall be zero */
1520 memset(dest, 0, sizeof(uint32_t) * 1024);
1522 /* Return NIDT=1 (i.e. EUI64) descriptor */
1523 ((uint8_t *)dest)[0] = 1;
1524 ((uint8_t *)dest)[1] = sizeof(uint64_t);
1525 memcpy(((uint8_t *)dest) + 4, sc->nsdata.eui64, sizeof(uint64_t));
1529 * Controller list is optional but used by UNH tests. Return
1530 * a valid but empty list.
1532 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1533 sizeof(uint16_t) * 2048);
1534 memset(dest, 0, sizeof(uint16_t) * 2048);
1537 DPRINTF("%s unsupported identify command requested 0x%x",
1538 __func__, command->cdw10 & 0xFF);
1539 pci_nvme_status_genc(&status, NVME_SC_INVALID_FIELD);
1543 compl->status = status;
1548 nvme_fid_to_name(uint8_t fid)
1553 case NVME_FEAT_ARBITRATION:
1554 name = "Arbitration";
1556 case NVME_FEAT_POWER_MANAGEMENT:
1557 name = "Power Management";
1559 case NVME_FEAT_LBA_RANGE_TYPE:
1560 name = "LBA Range Type";
1562 case NVME_FEAT_TEMPERATURE_THRESHOLD:
1563 name = "Temperature Threshold";
1565 case NVME_FEAT_ERROR_RECOVERY:
1566 name = "Error Recovery";
1568 case NVME_FEAT_VOLATILE_WRITE_CACHE:
1569 name = "Volatile Write Cache";
1571 case NVME_FEAT_NUMBER_OF_QUEUES:
1572 name = "Number of Queues";
1574 case NVME_FEAT_INTERRUPT_COALESCING:
1575 name = "Interrupt Coalescing";
1577 case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
1578 name = "Interrupt Vector Configuration";
1580 case NVME_FEAT_WRITE_ATOMICITY:
1581 name = "Write Atomicity Normal";
1583 case NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
1584 name = "Asynchronous Event Configuration";
1586 case NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION:
1587 name = "Autonomous Power State Transition";
1589 case NVME_FEAT_HOST_MEMORY_BUFFER:
1590 name = "Host Memory Buffer";
1592 case NVME_FEAT_TIMESTAMP:
1595 case NVME_FEAT_KEEP_ALIVE_TIMER:
1596 name = "Keep Alive Timer";
1598 case NVME_FEAT_HOST_CONTROLLED_THERMAL_MGMT:
1599 name = "Host Controlled Thermal Management";
1601 case NVME_FEAT_NON_OP_POWER_STATE_CONFIG:
1602 name = "Non-Operation Power State Config";
1604 case NVME_FEAT_READ_RECOVERY_LEVEL_CONFIG:
1605 name = "Read Recovery Level Config";
1607 case NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG:
1608 name = "Predictable Latency Mode Config";
1610 case NVME_FEAT_PREDICTABLE_LATENCY_MODE_WINDOW:
1611 name = "Predictable Latency Mode Window";
1613 case NVME_FEAT_LBA_STATUS_INFORMATION_ATTRIBUTES:
1614 name = "LBA Status Information Report Interval";
1616 case NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
1617 name = "Host Behavior Support";
1619 case NVME_FEAT_SANITIZE_CONFIG:
1620 name = "Sanitize Config";
1622 case NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION:
1623 name = "Endurance Group Event Configuration";
1625 case NVME_FEAT_SOFTWARE_PROGRESS_MARKER:
1626 name = "Software Progress Marker";
1628 case NVME_FEAT_HOST_IDENTIFIER:
1629 name = "Host Identifier";
1631 case NVME_FEAT_RESERVATION_NOTIFICATION_MASK:
1632 name = "Reservation Notification Mask";
1634 case NVME_FEAT_RESERVATION_PERSISTENCE:
1635 name = "Reservation Persistence";
1637 case NVME_FEAT_NAMESPACE_WRITE_PROTECTION_CONFIG:
1638 name = "Namespace Write Protection Config";
1649 nvme_feature_invalid_cb(struct pci_nvme_softc *sc __unused,
1650 struct nvme_feature_obj *feat __unused,
1651 struct nvme_command *command __unused,
1652 struct nvme_completion *compl)
1654 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1658 nvme_feature_iv_config(struct pci_nvme_softc *sc,
1659 struct nvme_feature_obj *feat __unused,
1660 struct nvme_command *command,
1661 struct nvme_completion *compl)
1664 uint32_t cdw11 = command->cdw11;
1668 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1670 iv = cdw11 & 0xffff;
1671 cd = cdw11 & (1 << 16);
1673 if (iv > (sc->max_queues + 1)) {
1677 /* No Interrupt Coalescing (i.e. not Coalescing Disable) for Admin Q */
1678 if ((iv == 0) && !cd)
1681 /* Requested Interrupt Vector must be used by a CQ */
1682 for (i = 0; i < sc->num_cqueues + 1; i++) {
1683 if (sc->compl_queues[i].intr_vec == iv) {
1684 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1689 #define NVME_ASYNC_EVENT_ENDURANCE_GROUP (0x4000)
1691 nvme_feature_async_event(struct pci_nvme_softc *sc __unused,
1692 struct nvme_feature_obj *feat __unused,
1693 struct nvme_command *command,
1694 struct nvme_completion *compl)
1696 if (command->cdw11 & NVME_ASYNC_EVENT_ENDURANCE_GROUP)
1697 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1700 #define NVME_TEMP_THRESH_OVER 0
1701 #define NVME_TEMP_THRESH_UNDER 1
1703 nvme_feature_temperature(struct pci_nvme_softc *sc,
1704 struct nvme_feature_obj *feat __unused,
1705 struct nvme_command *command,
1706 struct nvme_completion *compl)
1708 uint16_t tmpth; /* Temperature Threshold */
1709 uint8_t tmpsel; /* Threshold Temperature Select */
1710 uint8_t thsel; /* Threshold Type Select */
1711 bool set_crit = false;
1714 tmpth = command->cdw11 & 0xffff;
1715 tmpsel = (command->cdw11 >> 16) & 0xf;
1716 thsel = (command->cdw11 >> 20) & 0x3;
1718 DPRINTF("%s: tmpth=%#x tmpsel=%#x thsel=%#x", __func__, tmpth, tmpsel, thsel);
1720 /* Check for unsupported values */
1721 if (((tmpsel != 0) && (tmpsel != 0xf)) ||
1722 (thsel > NVME_TEMP_THRESH_UNDER)) {
1723 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1727 if (((thsel == NVME_TEMP_THRESH_OVER) && (NVME_TEMPERATURE >= tmpth)) ||
1728 ((thsel == NVME_TEMP_THRESH_UNDER) && (NVME_TEMPERATURE <= tmpth)))
1731 pthread_mutex_lock(&sc->mtx);
1733 sc->health_log.critical_warning |=
1734 NVME_CRIT_WARN_ST_TEMPERATURE;
1736 sc->health_log.critical_warning &=
1737 ~NVME_CRIT_WARN_ST_TEMPERATURE;
1738 pthread_mutex_unlock(&sc->mtx);
1740 report_crit = sc->feat[NVME_FEAT_ASYNC_EVENT_CONFIGURATION].cdw11 &
1741 NVME_CRIT_WARN_ST_TEMPERATURE;
1743 if (set_crit && report_crit)
1744 pci_nvme_aen_post(sc, PCI_NVME_AE_TYPE_SMART,
1745 sc->health_log.critical_warning);
1747 DPRINTF("%s: set_crit=%c critical_warning=%#x status=%#x", __func__, set_crit ? 'T':'F', sc->health_log.critical_warning, compl->status);
1751 nvme_feature_num_queues(struct pci_nvme_softc *sc,
1752 struct nvme_feature_obj *feat __unused,
1753 struct nvme_command *command,
1754 struct nvme_completion *compl)
1756 uint16_t nqr; /* Number of Queues Requested */
1758 if (sc->num_q_is_set) {
1759 WPRINTF("%s: Number of Queues already set", __func__);
1760 pci_nvme_status_genc(&compl->status,
1761 NVME_SC_COMMAND_SEQUENCE_ERROR);
1765 nqr = command->cdw11 & 0xFFFF;
1766 if (nqr == 0xffff) {
1767 WPRINTF("%s: Illegal NSQR value %#x", __func__, nqr);
1768 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1772 sc->num_squeues = ONE_BASED(nqr);
1773 if (sc->num_squeues > sc->max_queues) {
1774 DPRINTF("NSQR=%u is greater than max %u", sc->num_squeues,
1776 sc->num_squeues = sc->max_queues;
1779 nqr = (command->cdw11 >> 16) & 0xFFFF;
1780 if (nqr == 0xffff) {
1781 WPRINTF("%s: Illegal NCQR value %#x", __func__, nqr);
1782 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1786 sc->num_cqueues = ONE_BASED(nqr);
1787 if (sc->num_cqueues > sc->max_queues) {
1788 DPRINTF("NCQR=%u is greater than max %u", sc->num_cqueues,
1790 sc->num_cqueues = sc->max_queues;
1793 /* Patch the command value which will be saved on callback's return */
1794 command->cdw11 = NVME_FEATURE_NUM_QUEUES(sc);
1795 compl->cdw0 = NVME_FEATURE_NUM_QUEUES(sc);
1797 sc->num_q_is_set = true;
1801 nvme_opc_set_features(struct pci_nvme_softc *sc, struct nvme_command *command,
1802 struct nvme_completion *compl)
1804 struct nvme_feature_obj *feat;
1805 uint32_t nsid = command->nsid;
1806 uint8_t fid = NVMEV(NVME_FEAT_SET_FID, command->cdw10);
1807 bool sv = NVMEV(NVME_FEAT_SET_SV, command->cdw10);
1809 DPRINTF("%s: Feature ID 0x%x (%s)", __func__, fid, nvme_fid_to_name(fid));
1811 if (fid >= NVME_FID_MAX) {
1812 DPRINTF("%s invalid feature 0x%x", __func__, fid);
1813 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1818 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1819 NVME_SC_FEATURE_NOT_SAVEABLE);
1823 feat = &sc->feat[fid];
1825 if (feat->namespace_specific && (nsid == NVME_GLOBAL_NAMESPACE_TAG)) {
1826 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1830 if (!feat->namespace_specific &&
1831 !((nsid == 0) || (nsid == NVME_GLOBAL_NAMESPACE_TAG))) {
1832 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1833 NVME_SC_FEATURE_NOT_NS_SPECIFIC);
1838 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1841 feat->set(sc, feat, command, compl);
1843 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1844 NVME_SC_FEATURE_NOT_CHANGEABLE);
1848 DPRINTF("%s: status=%#x cdw11=%#x", __func__, compl->status, command->cdw11);
1849 if (compl->status == NVME_SC_SUCCESS) {
1850 feat->cdw11 = command->cdw11;
1851 if ((fid == NVME_FEAT_ASYNC_EVENT_CONFIGURATION) &&
1852 (command->cdw11 != 0))
1853 pci_nvme_aen_notify(sc);
1859 #define NVME_FEATURES_SEL_SUPPORTED 0x3
1860 #define NVME_FEATURES_NS_SPECIFIC (1 << 1)
1863 nvme_opc_get_features(struct pci_nvme_softc* sc, struct nvme_command* command,
1864 struct nvme_completion* compl)
1866 struct nvme_feature_obj *feat;
1867 uint8_t fid = command->cdw10 & 0xFF;
1868 uint8_t sel = (command->cdw10 >> 8) & 0x7;
1870 DPRINTF("%s: Feature ID 0x%x (%s)", __func__, fid, nvme_fid_to_name(fid));
1872 if (fid >= NVME_FID_MAX) {
1873 DPRINTF("%s invalid feature 0x%x", __func__, fid);
1874 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1879 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1881 feat = &sc->feat[fid];
1883 feat->get(sc, feat, command, compl);
1886 if (compl->status == NVME_SC_SUCCESS) {
1887 if ((sel == NVME_FEATURES_SEL_SUPPORTED) && feat->namespace_specific)
1888 compl->cdw0 = NVME_FEATURES_NS_SPECIFIC;
1890 compl->cdw0 = feat->cdw11;
1897 nvme_opc_format_nvm(struct pci_nvme_softc* sc, struct nvme_command* command,
1898 struct nvme_completion* compl)
1900 uint8_t ses, lbaf, pi;
1902 /* Only supports Secure Erase Setting - User Data Erase */
1903 ses = (command->cdw10 >> 9) & 0x7;
1905 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1909 /* Only supports a single LBA Format */
1910 lbaf = command->cdw10 & 0xf;
1912 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1913 NVME_SC_INVALID_FORMAT);
1917 /* Doesn't support Protection Information */
1918 pi = (command->cdw10 >> 5) & 0x7;
1920 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1924 if (sc->nvstore.type == NVME_STOR_RAM) {
1925 if (sc->nvstore.ctx)
1926 free(sc->nvstore.ctx);
1927 sc->nvstore.ctx = calloc(1, sc->nvstore.size);
1928 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1930 struct pci_nvme_ioreq *req;
1933 req = pci_nvme_get_ioreq(sc);
1935 pci_nvme_status_genc(&compl->status,
1936 NVME_SC_INTERNAL_DEVICE_ERROR);
1937 WPRINTF("%s: unable to allocate IO req", __func__);
1940 req->nvme_sq = &sc->submit_queues[0];
1942 req->opc = command->opc;
1943 req->cid = command->cid;
1944 req->nsid = command->nsid;
1946 req->io_req.br_offset = 0;
1947 req->io_req.br_resid = sc->nvstore.size;
1948 req->io_req.br_callback = pci_nvme_io_done;
1950 err = blockif_delete(sc->nvstore.ctx, &req->io_req);
1952 pci_nvme_status_genc(&compl->status,
1953 NVME_SC_INTERNAL_DEVICE_ERROR);
1954 pci_nvme_release_ioreq(sc, req);
1956 compl->status = NVME_NO_STATUS;
1963 nvme_opc_abort(struct pci_nvme_softc *sc __unused, struct nvme_command *command,
1964 struct nvme_completion *compl)
1966 DPRINTF("%s submission queue %u, command ID 0x%x", __func__,
1967 command->cdw10 & 0xFFFF, (command->cdw10 >> 16) & 0xFFFF);
1969 /* TODO: search for the command ID and abort it */
1972 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1977 nvme_opc_async_event_req(struct pci_nvme_softc* sc,
1978 struct nvme_command* command, struct nvme_completion* compl)
1980 DPRINTF("%s async event request count=%u aerl=%u cid=%#x", __func__,
1981 sc->aer_count, sc->ctrldata.aerl, command->cid);
1983 /* Don't exceed the Async Event Request Limit (AERL). */
1984 if (pci_nvme_aer_limit_reached(sc)) {
1985 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1986 NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1990 if (pci_nvme_aer_add(sc, command->cid)) {
1991 pci_nvme_status_tc(&compl->status, NVME_SCT_GENERIC,
1992 NVME_SC_INTERNAL_DEVICE_ERROR);
1997 * Raise events when they happen based on the Set Features cmd.
1998 * These events happen async, so only set completion successful if
1999 * there is an event reflective of the request to get event.
2001 compl->status = NVME_NO_STATUS;
2002 pci_nvme_aen_notify(sc);
2008 pci_nvme_handle_admin_cmd(struct pci_nvme_softc* sc, uint64_t value)
2010 struct nvme_completion compl;
2011 struct nvme_command *cmd;
2012 struct nvme_submission_queue *sq;
2013 struct nvme_completion_queue *cq;
2016 DPRINTF("%s index %u", __func__, (uint32_t)value);
2018 sq = &sc->submit_queues[0];
2019 cq = &sc->compl_queues[0];
2021 pthread_mutex_lock(&sq->mtx);
2024 DPRINTF("sqhead %u, tail %u", sqhead, sq->tail);
2026 while (sqhead != atomic_load_acq_short(&sq->tail)) {
2027 cmd = &(sq->qbase)[sqhead];
2032 case NVME_OPC_DELETE_IO_SQ:
2033 DPRINTF("%s command DELETE_IO_SQ", __func__);
2034 nvme_opc_delete_io_sq(sc, cmd, &compl);
2036 case NVME_OPC_CREATE_IO_SQ:
2037 DPRINTF("%s command CREATE_IO_SQ", __func__);
2038 nvme_opc_create_io_sq(sc, cmd, &compl);
2040 case NVME_OPC_DELETE_IO_CQ:
2041 DPRINTF("%s command DELETE_IO_CQ", __func__);
2042 nvme_opc_delete_io_cq(sc, cmd, &compl);
2044 case NVME_OPC_CREATE_IO_CQ:
2045 DPRINTF("%s command CREATE_IO_CQ", __func__);
2046 nvme_opc_create_io_cq(sc, cmd, &compl);
2048 case NVME_OPC_GET_LOG_PAGE:
2049 DPRINTF("%s command GET_LOG_PAGE", __func__);
2050 nvme_opc_get_log_page(sc, cmd, &compl);
2052 case NVME_OPC_IDENTIFY:
2053 DPRINTF("%s command IDENTIFY", __func__);
2054 nvme_opc_identify(sc, cmd, &compl);
2056 case NVME_OPC_ABORT:
2057 DPRINTF("%s command ABORT", __func__);
2058 nvme_opc_abort(sc, cmd, &compl);
2060 case NVME_OPC_SET_FEATURES:
2061 DPRINTF("%s command SET_FEATURES", __func__);
2062 nvme_opc_set_features(sc, cmd, &compl);
2064 case NVME_OPC_GET_FEATURES:
2065 DPRINTF("%s command GET_FEATURES", __func__);
2066 nvme_opc_get_features(sc, cmd, &compl);
2068 case NVME_OPC_FIRMWARE_ACTIVATE:
2069 DPRINTF("%s command FIRMWARE_ACTIVATE", __func__);
2070 pci_nvme_status_tc(&compl.status,
2071 NVME_SCT_COMMAND_SPECIFIC,
2072 NVME_SC_INVALID_FIRMWARE_SLOT);
2074 case NVME_OPC_ASYNC_EVENT_REQUEST:
2075 DPRINTF("%s command ASYNC_EVENT_REQ", __func__);
2076 nvme_opc_async_event_req(sc, cmd, &compl);
2078 case NVME_OPC_FORMAT_NVM:
2079 DPRINTF("%s command FORMAT_NVM", __func__);
2080 if ((sc->ctrldata.oacs &
2081 (1 << NVME_CTRLR_DATA_OACS_FORMAT_SHIFT)) == 0) {
2082 pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_OPCODE);
2085 nvme_opc_format_nvm(sc, cmd, &compl);
2087 case NVME_OPC_SECURITY_SEND:
2088 case NVME_OPC_SECURITY_RECEIVE:
2089 case NVME_OPC_SANITIZE:
2090 case NVME_OPC_GET_LBA_STATUS:
2091 DPRINTF("%s command OPC=%#x (unsupported)", __func__,
2093 /* Valid but unsupported opcodes */
2094 pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_FIELD);
2097 DPRINTF("%s command OPC=%#X (not implemented)",
2100 pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_OPCODE);
2102 sqhead = (sqhead + 1) % sq->size;
2104 if (NVME_COMPLETION_VALID(compl)) {
2105 pci_nvme_cq_update(sc, &sc->compl_queues[0],
2113 DPRINTF("setting sqhead %u", sqhead);
2116 if (cq->head != cq->tail)
2117 pci_generate_msix(sc->nsc_pi, 0);
2119 pthread_mutex_unlock(&sq->mtx);
2123 * Update the Write and Read statistics reported in SMART data
2125 * NVMe defines "data unit" as thousand's of 512 byte blocks and is rounded up.
2126 * E.g. 1 data unit is 1 - 1,000 512 byte blocks. 3 data units are 2,001 - 3,000
2127 * 512 byte blocks. Rounding up is achieved by initializing the remainder to 999.
2130 pci_nvme_stats_write_read_update(struct pci_nvme_softc *sc, uint8_t opc,
2131 size_t bytes, uint16_t status)
2134 pthread_mutex_lock(&sc->mtx);
2136 case NVME_OPC_WRITE:
2137 sc->write_commands++;
2138 if (status != NVME_SC_SUCCESS)
2140 sc->write_dunits_remainder += (bytes / 512);
2141 while (sc->write_dunits_remainder >= 1000) {
2142 sc->write_data_units++;
2143 sc->write_dunits_remainder -= 1000;
2147 sc->read_commands++;
2148 if (status != NVME_SC_SUCCESS)
2150 sc->read_dunits_remainder += (bytes / 512);
2151 while (sc->read_dunits_remainder >= 1000) {
2152 sc->read_data_units++;
2153 sc->read_dunits_remainder -= 1000;
2157 DPRINTF("%s: Invalid OPC 0x%02x for stats", __func__, opc);
2160 pthread_mutex_unlock(&sc->mtx);
2164 * Check if the combination of Starting LBA (slba) and number of blocks
2165 * exceeds the range of the underlying storage.
2167 * Because NVMe specifies the SLBA in blocks as a uint64_t and blockif stores
2168 * the capacity in bytes as a uint64_t, care must be taken to avoid integer
2172 pci_nvme_out_of_range(struct pci_nvme_blockstore *nvstore, uint64_t slba,
2175 size_t offset, bytes;
2177 /* Overflow check of multiplying Starting LBA by the sector size */
2178 if (slba >> (64 - nvstore->sectsz_bits))
2181 offset = slba << nvstore->sectsz_bits;
2182 bytes = nblocks << nvstore->sectsz_bits;
2184 /* Overflow check of Number of Logical Blocks */
2185 if ((nvstore->size <= offset) || ((nvstore->size - offset) < bytes))
2192 pci_nvme_append_iov_req(struct pci_nvme_softc *sc __unused,
2193 struct pci_nvme_ioreq *req, uint64_t gpaddr, size_t size, uint64_t offset)
2196 bool range_is_contiguous;
2201 if (req->io_req.br_iovcnt == NVME_MAX_IOVEC) {
2206 * Minimize the number of IOVs by concatenating contiguous address
2207 * ranges. If the IOV count is zero, there is no previous range to
2210 if (req->io_req.br_iovcnt == 0)
2211 range_is_contiguous = false;
2213 range_is_contiguous = (req->prev_gpaddr + req->prev_size) == gpaddr;
2215 if (range_is_contiguous) {
2216 iovidx = req->io_req.br_iovcnt - 1;
2218 req->io_req.br_iov[iovidx].iov_base =
2219 paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
2220 req->prev_gpaddr, size);
2221 if (req->io_req.br_iov[iovidx].iov_base == NULL)
2224 req->prev_size += size;
2225 req->io_req.br_resid += size;
2227 req->io_req.br_iov[iovidx].iov_len = req->prev_size;
2229 iovidx = req->io_req.br_iovcnt;
2231 req->io_req.br_offset = offset;
2232 req->io_req.br_resid = 0;
2233 req->io_req.br_param = req;
2236 req->io_req.br_iov[iovidx].iov_base =
2237 paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
2239 if (req->io_req.br_iov[iovidx].iov_base == NULL)
2242 req->io_req.br_iov[iovidx].iov_len = size;
2244 req->prev_gpaddr = gpaddr;
2245 req->prev_size = size;
2246 req->io_req.br_resid += size;
2248 req->io_req.br_iovcnt++;
2255 pci_nvme_set_completion(struct pci_nvme_softc *sc,
2256 struct nvme_submission_queue *sq, int sqid, uint16_t cid, uint16_t status)
2258 struct nvme_completion_queue *cq = &sc->compl_queues[sq->cqid];
2260 DPRINTF("%s sqid %d cqid %u cid %u status: 0x%x 0x%x",
2261 __func__, sqid, sq->cqid, cid, NVME_STATUS_GET_SCT(status),
2262 NVME_STATUS_GET_SC(status));
2264 pci_nvme_cq_update(sc, cq, 0, cid, sqid, status);
2266 if (cq->head != cq->tail) {
2267 if (cq->intr_en & NVME_CQ_INTEN) {
2268 pci_generate_msix(sc->nsc_pi, cq->intr_vec);
2270 DPRINTF("%s: CQ%u interrupt disabled",
2271 __func__, sq->cqid);
2277 pci_nvme_release_ioreq(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req)
2280 req->nvme_sq = NULL;
2283 pthread_mutex_lock(&sc->mtx);
2285 STAILQ_INSERT_TAIL(&sc->ioreqs_free, req, link);
2288 /* when no more IO pending, can set to ready if device reset/enabled */
2289 if (sc->pending_ios == 0 &&
2290 NVME_CC_GET_EN(sc->regs.cc) && !(NVME_CSTS_GET_RDY(sc->regs.csts)))
2291 sc->regs.csts |= NVME_CSTS_RDY;
2293 pthread_mutex_unlock(&sc->mtx);
2295 sem_post(&sc->iosemlock);
2298 static struct pci_nvme_ioreq *
2299 pci_nvme_get_ioreq(struct pci_nvme_softc *sc)
2301 struct pci_nvme_ioreq *req = NULL;
2303 sem_wait(&sc->iosemlock);
2304 pthread_mutex_lock(&sc->mtx);
2306 req = STAILQ_FIRST(&sc->ioreqs_free);
2307 assert(req != NULL);
2308 STAILQ_REMOVE_HEAD(&sc->ioreqs_free, link);
2314 pthread_mutex_unlock(&sc->mtx);
2316 req->io_req.br_iovcnt = 0;
2317 req->io_req.br_offset = 0;
2318 req->io_req.br_resid = 0;
2319 req->io_req.br_param = req;
2320 req->prev_gpaddr = 0;
2327 pci_nvme_io_done(struct blockif_req *br, int err)
2329 struct pci_nvme_ioreq *req = br->br_param;
2330 struct nvme_submission_queue *sq = req->nvme_sq;
2331 uint16_t code, status;
2333 DPRINTF("%s error %d %s", __func__, err, strerror(err));
2335 /* TODO return correct error */
2336 code = err ? NVME_SC_DATA_TRANSFER_ERROR : NVME_SC_SUCCESS;
2338 pci_nvme_status_genc(&status, code);
2340 pci_nvme_set_completion(req->sc, sq, req->sqid, req->cid, status);
2341 pci_nvme_stats_write_read_update(req->sc, req->opc,
2342 req->bytes, status);
2343 pci_nvme_release_ioreq(req->sc, req);
2347 * Implements the Flush command. The specification states:
2348 * If a volatile write cache is not present, Flush commands complete
2349 * successfully and have no effect
2350 * in the description of the Volatile Write Cache (VWC) field of the Identify
2351 * Controller data. Therefore, set status to Success if the command is
2352 * not supported (i.e. RAM or as indicated by the blockif).
2355 nvme_opc_flush(struct pci_nvme_softc *sc __unused,
2356 struct nvme_command *cmd __unused,
2357 struct pci_nvme_blockstore *nvstore,
2358 struct pci_nvme_ioreq *req,
2361 bool pending = false;
2363 if (nvstore->type == NVME_STOR_RAM) {
2364 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2368 req->io_req.br_callback = pci_nvme_io_done;
2370 err = blockif_flush(nvstore->ctx, &req->io_req);
2376 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2379 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2387 nvme_write_read_ram(struct pci_nvme_softc *sc,
2388 struct pci_nvme_blockstore *nvstore,
2389 uint64_t prp1, uint64_t prp2,
2390 size_t offset, uint64_t bytes,
2393 uint8_t *buf = nvstore->ctx;
2394 enum nvme_copy_dir dir;
2398 dir = NVME_COPY_TO_PRP;
2400 dir = NVME_COPY_FROM_PRP;
2403 if (nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, prp1, prp2,
2404 buf + offset, bytes, dir))
2405 pci_nvme_status_genc(&status,
2406 NVME_SC_DATA_TRANSFER_ERROR);
2408 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2414 nvme_write_read_blockif(struct pci_nvme_softc *sc,
2415 struct pci_nvme_blockstore *nvstore,
2416 struct pci_nvme_ioreq *req,
2417 uint64_t prp1, uint64_t prp2,
2418 size_t offset, uint64_t bytes,
2423 uint16_t status = NVME_NO_STATUS;
2425 size = MIN(PAGE_SIZE - (prp1 % PAGE_SIZE), bytes);
2426 if (pci_nvme_append_iov_req(sc, req, prp1, size, offset)) {
2436 } else if (bytes <= PAGE_SIZE) {
2438 if (pci_nvme_append_iov_req(sc, req, prp2, size, offset)) {
2443 void *vmctx = sc->nsc_pi->pi_vmctx;
2444 uint64_t *prp_list = &prp2;
2445 uint64_t *last = prp_list;
2447 /* PRP2 is pointer to a physical region page list */
2449 /* Last entry in list points to the next list */
2450 if ((prp_list == last) && (bytes > PAGE_SIZE)) {
2451 uint64_t prp = *prp_list;
2453 prp_list = paddr_guest2host(vmctx, prp,
2454 PAGE_SIZE - (prp % PAGE_SIZE));
2455 if (prp_list == NULL) {
2459 last = prp_list + (NVME_PRP2_ITEMS - 1);
2462 size = MIN(bytes, PAGE_SIZE);
2464 if (pci_nvme_append_iov_req(sc, req, *prp_list, size,
2476 req->io_req.br_callback = pci_nvme_io_done;
2478 err = blockif_write(nvstore->ctx, &req->io_req);
2480 err = blockif_read(nvstore->ctx, &req->io_req);
2483 pci_nvme_status_genc(&status, NVME_SC_DATA_TRANSFER_ERROR);
2489 nvme_opc_write_read(struct pci_nvme_softc *sc,
2490 struct nvme_command *cmd,
2491 struct pci_nvme_blockstore *nvstore,
2492 struct pci_nvme_ioreq *req,
2495 uint64_t lba, nblocks, bytes;
2497 bool is_write = cmd->opc == NVME_OPC_WRITE;
2498 bool pending = false;
2500 lba = ((uint64_t)cmd->cdw11 << 32) | cmd->cdw10;
2501 nblocks = (cmd->cdw12 & 0xFFFF) + 1;
2502 bytes = nblocks << nvstore->sectsz_bits;
2503 if (bytes > NVME_MAX_DATA_SIZE) {
2504 WPRINTF("%s command would exceed MDTS", __func__);
2505 pci_nvme_status_genc(status, NVME_SC_INVALID_FIELD);
2509 if (pci_nvme_out_of_range(nvstore, lba, nblocks)) {
2510 WPRINTF("%s command would exceed LBA range(slba=%#lx nblocks=%#lx)",
2511 __func__, lba, nblocks);
2512 pci_nvme_status_genc(status, NVME_SC_LBA_OUT_OF_RANGE);
2516 offset = lba << nvstore->sectsz_bits;
2519 req->io_req.br_offset = lba;
2521 /* PRP bits 1:0 must be zero */
2522 cmd->prp1 &= ~0x3UL;
2523 cmd->prp2 &= ~0x3UL;
2525 if (nvstore->type == NVME_STOR_RAM) {
2526 *status = nvme_write_read_ram(sc, nvstore, cmd->prp1,
2527 cmd->prp2, offset, bytes, is_write);
2529 *status = nvme_write_read_blockif(sc, nvstore, req,
2530 cmd->prp1, cmd->prp2, offset, bytes, is_write);
2532 if (*status == NVME_NO_STATUS)
2537 pci_nvme_stats_write_read_update(sc, cmd->opc, bytes, *status);
2543 pci_nvme_dealloc_sm(struct blockif_req *br, int err)
2545 struct pci_nvme_ioreq *req = br->br_param;
2546 struct pci_nvme_softc *sc = req->sc;
2552 pci_nvme_status_genc(&status, NVME_SC_INTERNAL_DEVICE_ERROR);
2553 } else if ((req->prev_gpaddr + 1) == (req->prev_size)) {
2554 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2556 struct iovec *iov = req->io_req.br_iov;
2559 iov += req->prev_gpaddr;
2561 /* The iov_* values already include the sector size */
2562 req->io_req.br_offset = (off_t)iov->iov_base;
2563 req->io_req.br_resid = iov->iov_len;
2564 if (blockif_delete(sc->nvstore.ctx, &req->io_req)) {
2565 pci_nvme_status_genc(&status,
2566 NVME_SC_INTERNAL_DEVICE_ERROR);
2572 pci_nvme_set_completion(sc, req->nvme_sq, req->sqid, req->cid,
2574 pci_nvme_release_ioreq(sc, req);
2579 nvme_opc_dataset_mgmt(struct pci_nvme_softc *sc,
2580 struct nvme_command *cmd,
2581 struct pci_nvme_blockstore *nvstore,
2582 struct pci_nvme_ioreq *req,
2585 struct nvme_dsm_range *range = NULL;
2586 uint32_t nr, r, non_zero, dr;
2588 bool pending = false;
2590 if ((sc->ctrldata.oncs & NVME_ONCS_DSM) == 0) {
2591 pci_nvme_status_genc(status, NVME_SC_INVALID_OPCODE);
2595 nr = cmd->cdw10 & 0xff;
2597 /* copy locally because a range entry could straddle PRPs */
2598 range = calloc(1, NVME_MAX_DSM_TRIM);
2599 if (range == NULL) {
2600 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2603 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, cmd->prp1, cmd->prp2,
2604 (uint8_t *)range, NVME_MAX_DSM_TRIM, NVME_COPY_FROM_PRP);
2606 /* Check for invalid ranges and the number of non-zero lengths */
2608 for (r = 0; r <= nr; r++) {
2609 if (pci_nvme_out_of_range(nvstore,
2610 range[r].starting_lba, range[r].length)) {
2611 pci_nvme_status_genc(status, NVME_SC_LBA_OUT_OF_RANGE);
2614 if (range[r].length != 0)
2618 if (cmd->cdw11 & NVME_DSM_ATTR_DEALLOCATE) {
2619 size_t offset, bytes;
2620 int sectsz_bits = sc->nvstore.sectsz_bits;
2623 * DSM calls are advisory only, and compliant controllers
2624 * may choose to take no actions (i.e. return Success).
2626 if (!nvstore->deallocate) {
2627 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2631 /* If all ranges have a zero length, return Success */
2632 if (non_zero == 0) {
2633 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2638 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2642 offset = range[0].starting_lba << sectsz_bits;
2643 bytes = range[0].length << sectsz_bits;
2646 * If the request is for more than a single range, store
2647 * the ranges in the br_iov. Optimize for the common case
2648 * of a single range.
2650 * Note that NVMe Number of Ranges is a zero based value
2652 req->io_req.br_iovcnt = 0;
2653 req->io_req.br_offset = offset;
2654 req->io_req.br_resid = bytes;
2657 req->io_req.br_callback = pci_nvme_io_done;
2659 struct iovec *iov = req->io_req.br_iov;
2661 for (r = 0, dr = 0; r <= nr; r++) {
2662 offset = range[r].starting_lba << sectsz_bits;
2663 bytes = range[r].length << sectsz_bits;
2667 if ((nvstore->size - offset) < bytes) {
2668 pci_nvme_status_genc(status,
2669 NVME_SC_LBA_OUT_OF_RANGE);
2672 iov[dr].iov_base = (void *)offset;
2673 iov[dr].iov_len = bytes;
2676 req->io_req.br_callback = pci_nvme_dealloc_sm;
2679 * Use prev_gpaddr to track the current entry and
2680 * prev_size to track the number of entries
2682 req->prev_gpaddr = 0;
2683 req->prev_size = dr;
2686 err = blockif_delete(nvstore->ctx, &req->io_req);
2688 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2698 pci_nvme_handle_io_cmd(struct pci_nvme_softc* sc, uint16_t idx)
2700 struct nvme_submission_queue *sq;
2704 /* handle all submissions up to sq->tail index */
2705 sq = &sc->submit_queues[idx];
2707 pthread_mutex_lock(&sq->mtx);
2710 DPRINTF("nvme_handle_io qid %u head %u tail %u cmdlist %p",
2711 idx, sqhead, sq->tail, sq->qbase);
2713 while (sqhead != atomic_load_acq_short(&sq->tail)) {
2714 struct nvme_command *cmd;
2715 struct pci_nvme_ioreq *req;
2723 cmd = &sq->qbase[sqhead];
2724 sqhead = (sqhead + 1) % sq->size;
2726 nsid = le32toh(cmd->nsid);
2727 if ((nsid == 0) || (nsid > sc->ctrldata.nn)) {
2728 pci_nvme_status_genc(&status,
2729 NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2731 NVME_STATUS_DNR_MASK << NVME_STATUS_DNR_SHIFT;
2735 req = pci_nvme_get_ioreq(sc);
2737 pci_nvme_status_genc(&status,
2738 NVME_SC_INTERNAL_DEVICE_ERROR);
2739 WPRINTF("%s: unable to allocate IO req", __func__);
2744 req->opc = cmd->opc;
2745 req->cid = cmd->cid;
2746 req->nsid = cmd->nsid;
2749 case NVME_OPC_FLUSH:
2750 pending = nvme_opc_flush(sc, cmd, &sc->nvstore,
2753 case NVME_OPC_WRITE:
2755 pending = nvme_opc_write_read(sc, cmd, &sc->nvstore,
2758 case NVME_OPC_WRITE_ZEROES:
2759 /* TODO: write zeroes
2760 WPRINTF("%s write zeroes lba 0x%lx blocks %u",
2761 __func__, lba, cmd->cdw12 & 0xFFFF); */
2762 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2764 case NVME_OPC_DATASET_MANAGEMENT:
2765 pending = nvme_opc_dataset_mgmt(sc, cmd, &sc->nvstore,
2769 WPRINTF("%s unhandled io command 0x%x",
2770 __func__, cmd->opc);
2771 pci_nvme_status_genc(&status, NVME_SC_INVALID_OPCODE);
2775 pci_nvme_set_completion(sc, sq, idx, cmd->cid, status);
2777 pci_nvme_release_ioreq(sc, req);
2783 pthread_mutex_unlock(&sq->mtx);
2787 pci_nvme_handle_doorbell(struct pci_nvme_softc* sc,
2788 uint64_t idx, int is_sq, uint64_t value)
2790 DPRINTF("nvme doorbell %lu, %s, val 0x%lx",
2791 idx, is_sq ? "SQ" : "CQ", value & 0xFFFF);
2794 if (idx > sc->num_squeues) {
2795 WPRINTF("%s queue index %lu overflow from "
2797 __func__, idx, sc->num_squeues);
2801 atomic_store_short(&sc->submit_queues[idx].tail,
2805 pci_nvme_handle_admin_cmd(sc, value);
2807 /* submission queue; handle new entries in SQ */
2808 if (idx > sc->num_squeues) {
2809 WPRINTF("%s SQ index %lu overflow from "
2811 __func__, idx, sc->num_squeues);
2814 pci_nvme_handle_io_cmd(sc, (uint16_t)idx);
2817 if (idx > sc->num_cqueues) {
2818 WPRINTF("%s queue index %lu overflow from "
2820 __func__, idx, sc->num_cqueues);
2824 atomic_store_short(&sc->compl_queues[idx].head,
2830 pci_nvme_bar0_reg_dumps(const char *func, uint64_t offset, int iswrite)
2832 const char *s = iswrite ? "WRITE" : "READ";
2835 case NVME_CR_CAP_LOW:
2836 DPRINTF("%s %s NVME_CR_CAP_LOW", func, s);
2838 case NVME_CR_CAP_HI:
2839 DPRINTF("%s %s NVME_CR_CAP_HI", func, s);
2842 DPRINTF("%s %s NVME_CR_VS", func, s);
2845 DPRINTF("%s %s NVME_CR_INTMS", func, s);
2848 DPRINTF("%s %s NVME_CR_INTMC", func, s);
2851 DPRINTF("%s %s NVME_CR_CC", func, s);
2854 DPRINTF("%s %s NVME_CR_CSTS", func, s);
2857 DPRINTF("%s %s NVME_CR_NSSR", func, s);
2860 DPRINTF("%s %s NVME_CR_AQA", func, s);
2862 case NVME_CR_ASQ_LOW:
2863 DPRINTF("%s %s NVME_CR_ASQ_LOW", func, s);
2865 case NVME_CR_ASQ_HI:
2866 DPRINTF("%s %s NVME_CR_ASQ_HI", func, s);
2868 case NVME_CR_ACQ_LOW:
2869 DPRINTF("%s %s NVME_CR_ACQ_LOW", func, s);
2871 case NVME_CR_ACQ_HI:
2872 DPRINTF("%s %s NVME_CR_ACQ_HI", func, s);
2875 DPRINTF("unknown nvme bar-0 offset 0x%lx", offset);
2881 pci_nvme_write_bar_0(struct pci_nvme_softc *sc, uint64_t offset, int size,
2886 if (offset >= NVME_DOORBELL_OFFSET) {
2887 uint64_t belloffset = offset - NVME_DOORBELL_OFFSET;
2888 uint64_t idx = belloffset / 8; /* door bell size = 2*int */
2889 int is_sq = (belloffset % 8) < 4;
2891 if ((sc->regs.csts & NVME_CSTS_RDY) == 0) {
2892 WPRINTF("doorbell write prior to RDY (offset=%#lx)\n",
2897 if (belloffset > ((sc->max_queues+1) * 8 - 4)) {
2898 WPRINTF("guest attempted an overflow write offset "
2899 "0x%lx, val 0x%lx in %s",
2900 offset, value, __func__);
2905 if (sc->submit_queues[idx].qbase == NULL)
2907 } else if (sc->compl_queues[idx].qbase == NULL)
2910 pci_nvme_handle_doorbell(sc, idx, is_sq, value);
2914 DPRINTF("nvme-write offset 0x%lx, size %d, value 0x%lx",
2915 offset, size, value);
2918 WPRINTF("guest wrote invalid size %d (offset 0x%lx, "
2919 "val 0x%lx) to bar0 in %s",
2920 size, offset, value, __func__);
2921 /* TODO: shutdown device */
2925 pci_nvme_bar0_reg_dumps(__func__, offset, 1);
2927 pthread_mutex_lock(&sc->mtx);
2930 case NVME_CR_CAP_LOW:
2931 case NVME_CR_CAP_HI:
2938 /* MSI-X, so ignore */
2941 /* MSI-X, so ignore */
2944 ccreg = (uint32_t)value;
2946 DPRINTF("%s NVME_CR_CC en %x css %x shn %x iosqes %u "
2949 NVME_CC_GET_EN(ccreg), NVME_CC_GET_CSS(ccreg),
2950 NVME_CC_GET_SHN(ccreg), NVME_CC_GET_IOSQES(ccreg),
2951 NVME_CC_GET_IOCQES(ccreg));
2953 if (NVME_CC_GET_SHN(ccreg)) {
2954 /* perform shutdown - flush out data to backend */
2955 sc->regs.csts &= ~(NVME_CSTS_REG_SHST_MASK <<
2956 NVME_CSTS_REG_SHST_SHIFT);
2957 sc->regs.csts |= NVME_SHST_COMPLETE <<
2958 NVME_CSTS_REG_SHST_SHIFT;
2960 if (NVME_CC_GET_EN(ccreg) != NVME_CC_GET_EN(sc->regs.cc)) {
2961 if (NVME_CC_GET_EN(ccreg) == 0)
2962 /* transition 1-> causes controller reset */
2963 pci_nvme_reset_locked(sc);
2965 pci_nvme_init_controller(sc);
2968 /* Insert the iocqes, iosqes and en bits from the write */
2969 sc->regs.cc &= ~NVME_CC_WRITE_MASK;
2970 sc->regs.cc |= ccreg & NVME_CC_WRITE_MASK;
2971 if (NVME_CC_GET_EN(ccreg) == 0) {
2972 /* Insert the ams, mps and css bit fields */
2973 sc->regs.cc &= ~NVME_CC_NEN_WRITE_MASK;
2974 sc->regs.cc |= ccreg & NVME_CC_NEN_WRITE_MASK;
2975 sc->regs.csts &= ~NVME_CSTS_RDY;
2976 } else if ((sc->pending_ios == 0) &&
2977 !(sc->regs.csts & NVME_CSTS_CFS)) {
2978 sc->regs.csts |= NVME_CSTS_RDY;
2984 /* ignore writes; don't support subsystem reset */
2987 sc->regs.aqa = (uint32_t)value;
2989 case NVME_CR_ASQ_LOW:
2990 sc->regs.asq = (sc->regs.asq & (0xFFFFFFFF00000000)) |
2991 (0xFFFFF000 & value);
2993 case NVME_CR_ASQ_HI:
2994 sc->regs.asq = (sc->regs.asq & (0x00000000FFFFFFFF)) |
2997 case NVME_CR_ACQ_LOW:
2998 sc->regs.acq = (sc->regs.acq & (0xFFFFFFFF00000000)) |
2999 (0xFFFFF000 & value);
3001 case NVME_CR_ACQ_HI:
3002 sc->regs.acq = (sc->regs.acq & (0x00000000FFFFFFFF)) |
3006 DPRINTF("%s unknown offset 0x%lx, value 0x%lx size %d",
3007 __func__, offset, value, size);
3009 pthread_mutex_unlock(&sc->mtx);
3013 pci_nvme_write(struct pci_devinst *pi, int baridx, uint64_t offset, int size,
3016 struct pci_nvme_softc* sc = pi->pi_arg;
3018 if (baridx == pci_msix_table_bar(pi) ||
3019 baridx == pci_msix_pba_bar(pi)) {
3020 DPRINTF("nvme-write baridx %d, msix: off 0x%lx, size %d, "
3021 " value 0x%lx", baridx, offset, size, value);
3023 pci_emul_msix_twrite(pi, offset, size, value);
3029 pci_nvme_write_bar_0(sc, offset, size, value);
3033 DPRINTF("%s unknown baridx %d, val 0x%lx",
3034 __func__, baridx, value);
3038 static uint64_t pci_nvme_read_bar_0(struct pci_nvme_softc* sc,
3039 uint64_t offset, int size)
3043 pci_nvme_bar0_reg_dumps(__func__, offset, 0);
3045 if (offset < NVME_DOORBELL_OFFSET) {
3046 void *p = &(sc->regs);
3047 pthread_mutex_lock(&sc->mtx);
3048 memcpy(&value, (void *)((uintptr_t)p + offset), size);
3049 pthread_mutex_unlock(&sc->mtx);
3052 WPRINTF("pci_nvme: read invalid offset %ld", offset);
3063 value &= 0xFFFFFFFF;
3067 DPRINTF(" nvme-read offset 0x%lx, size %d -> value 0x%x",
3068 offset, size, (uint32_t)value);
3076 pci_nvme_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size)
3078 struct pci_nvme_softc* sc = pi->pi_arg;
3080 if (baridx == pci_msix_table_bar(pi) ||
3081 baridx == pci_msix_pba_bar(pi)) {
3082 DPRINTF("nvme-read bar: %d, msix: regoff 0x%lx, size %d",
3083 baridx, offset, size);
3085 return pci_emul_msix_tread(pi, offset, size);
3090 return pci_nvme_read_bar_0(sc, offset, size);
3093 DPRINTF("unknown bar %d, 0x%lx", baridx, offset);
3100 pci_nvme_parse_config(struct pci_nvme_softc *sc, nvlist_t *nvl)
3102 char bident[sizeof("XXX:XXX")];
3106 sc->max_queues = NVME_QUEUES;
3107 sc->max_qentries = NVME_MAX_QENTRIES;
3108 sc->ioslots = NVME_IOSLOTS;
3109 sc->num_squeues = sc->max_queues;
3110 sc->num_cqueues = sc->max_queues;
3111 sc->dataset_management = NVME_DATASET_MANAGEMENT_AUTO;
3113 snprintf(sc->ctrldata.sn, sizeof(sc->ctrldata.sn),
3114 "NVME-%d-%d", sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
3116 value = get_config_value_node(nvl, "maxq");
3118 sc->max_queues = atoi(value);
3119 value = get_config_value_node(nvl, "qsz");
3120 if (value != NULL) {
3121 sc->max_qentries = atoi(value);
3122 if (sc->max_qentries <= 0) {
3123 EPRINTLN("nvme: Invalid qsz option %d",
3128 value = get_config_value_node(nvl, "ioslots");
3129 if (value != NULL) {
3130 sc->ioslots = atoi(value);
3131 if (sc->ioslots <= 0) {
3132 EPRINTLN("Invalid ioslots option %d", sc->ioslots);
3136 value = get_config_value_node(nvl, "sectsz");
3138 sectsz = atoi(value);
3139 value = get_config_value_node(nvl, "ser");
3140 if (value != NULL) {
3142 * This field indicates the Product Serial Number in
3143 * 7-bit ASCII, unused bytes should be space characters.
3146 cpywithpad((char *)sc->ctrldata.sn,
3147 sizeof(sc->ctrldata.sn), value, ' ');
3149 value = get_config_value_node(nvl, "eui64");
3151 sc->nvstore.eui64 = htobe64(strtoull(value, NULL, 0));
3152 value = get_config_value_node(nvl, "dsm");
3153 if (value != NULL) {
3154 if (strcmp(value, "auto") == 0)
3155 sc->dataset_management = NVME_DATASET_MANAGEMENT_AUTO;
3156 else if (strcmp(value, "enable") == 0)
3157 sc->dataset_management = NVME_DATASET_MANAGEMENT_ENABLE;
3158 else if (strcmp(value, "disable") == 0)
3159 sc->dataset_management = NVME_DATASET_MANAGEMENT_DISABLE;
3162 value = get_config_value_node(nvl, "bootindex");
3163 if (value != NULL) {
3164 if (pci_emul_add_boot_device(sc->nsc_pi, atoi(value))) {
3165 EPRINTLN("Invalid bootindex %d", atoi(value));
3170 value = get_config_value_node(nvl, "ram");
3171 if (value != NULL) {
3172 uint64_t sz = strtoull(value, NULL, 10);
3174 sc->nvstore.type = NVME_STOR_RAM;
3175 sc->nvstore.size = sz * 1024 * 1024;
3176 sc->nvstore.ctx = calloc(1, sc->nvstore.size);
3177 sc->nvstore.sectsz = 4096;
3178 sc->nvstore.sectsz_bits = 12;
3179 if (sc->nvstore.ctx == NULL) {
3180 EPRINTLN("nvme: Unable to allocate RAM");
3184 snprintf(bident, sizeof(bident), "%u:%u",
3185 sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
3186 sc->nvstore.ctx = blockif_open(nvl, bident);
3187 if (sc->nvstore.ctx == NULL) {
3188 EPRINTLN("nvme: Could not open backing file: %s",
3192 sc->nvstore.type = NVME_STOR_BLOCKIF;
3193 sc->nvstore.size = blockif_size(sc->nvstore.ctx);
3196 if (sectsz == 512 || sectsz == 4096 || sectsz == 8192)
3197 sc->nvstore.sectsz = sectsz;
3198 else if (sc->nvstore.type != NVME_STOR_RAM)
3199 sc->nvstore.sectsz = blockif_sectsz(sc->nvstore.ctx);
3200 for (sc->nvstore.sectsz_bits = 9;
3201 (1U << sc->nvstore.sectsz_bits) < sc->nvstore.sectsz;
3202 sc->nvstore.sectsz_bits++);
3204 if (sc->max_queues <= 0 || sc->max_queues > NVME_QUEUES)
3205 sc->max_queues = NVME_QUEUES;
3211 pci_nvme_resized(struct blockif_ctxt *bctxt __unused, void *arg,
3214 struct pci_nvme_softc *sc;
3215 struct pci_nvme_blockstore *nvstore;
3216 struct nvme_namespace_data *nd;
3219 nvstore = &sc->nvstore;
3222 nvstore->size = new_size;
3223 pci_nvme_init_nsdata_size(nvstore, nd);
3225 /* Add changed NSID to list */
3226 sc->ns_log.ns[0] = 1;
3227 sc->ns_log.ns[1] = 0;
3229 pci_nvme_aen_post(sc, PCI_NVME_AE_TYPE_NOTICE,
3230 PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED);
3234 pci_nvme_init(struct pci_devinst *pi, nvlist_t *nvl)
3236 struct pci_nvme_softc *sc;
3237 uint32_t pci_membar_sz;
3242 sc = calloc(1, sizeof(struct pci_nvme_softc));
3246 error = pci_nvme_parse_config(sc, nvl);
3252 STAILQ_INIT(&sc->ioreqs_free);
3253 sc->ioreqs = calloc(sc->ioslots, sizeof(struct pci_nvme_ioreq));
3254 for (uint32_t i = 0; i < sc->ioslots; i++) {
3255 STAILQ_INSERT_TAIL(&sc->ioreqs_free, &sc->ioreqs[i], link);
3258 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0A0A);
3259 pci_set_cfgdata16(pi, PCIR_VENDOR, 0xFB5D);
3260 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
3261 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_NVM);
3262 pci_set_cfgdata8(pi, PCIR_PROGIF,
3263 PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0);
3266 * Allocate size of NVMe registers + doorbell space for all queues.
3268 * The specification requires a minimum memory I/O window size of 16K.
3269 * The Windows driver will refuse to start a device with a smaller
3272 pci_membar_sz = sizeof(struct nvme_registers) +
3273 2 * sizeof(uint32_t) * (sc->max_queues + 1);
3274 pci_membar_sz = MAX(pci_membar_sz, NVME_MMIO_SPACE_MIN);
3276 DPRINTF("nvme membar size: %u", pci_membar_sz);
3278 error = pci_emul_alloc_bar(pi, 0, PCIBAR_MEM64, pci_membar_sz);
3280 WPRINTF("%s pci alloc mem bar failed", __func__);
3284 error = pci_emul_add_msixcap(pi, sc->max_queues + 1, NVME_MSIX_BAR);
3286 WPRINTF("%s pci add msixcap failed", __func__);
3290 error = pci_emul_add_pciecap(pi, PCIEM_TYPE_ROOT_INT_EP);
3292 WPRINTF("%s pci add Express capability failed", __func__);
3296 pthread_mutex_init(&sc->mtx, NULL);
3297 sem_init(&sc->iosemlock, 0, sc->ioslots);
3298 blockif_register_resize_callback(sc->nvstore.ctx, pci_nvme_resized, sc);
3300 pci_nvme_init_queues(sc, sc->max_queues, sc->max_queues);
3302 * Controller data depends on Namespace data so initialize Namespace
3305 pci_nvme_init_nsdata(sc, &sc->nsdata, 1, &sc->nvstore);
3306 pci_nvme_init_ctrldata(sc);
3307 pci_nvme_init_logpages(sc);
3308 pci_nvme_init_features(sc);
3310 pci_nvme_aer_init(sc);
3311 pci_nvme_aen_init(sc);
3320 pci_nvme_legacy_config(nvlist_t *nvl, const char *opts)
3327 if (strncmp(opts, "ram=", 4) == 0) {
3328 cp = strchr(opts, ',');
3330 set_config_value_node(nvl, "ram", opts + 4);
3333 ram = strndup(opts + 4, cp - opts - 4);
3334 set_config_value_node(nvl, "ram", ram);
3336 return (pci_parse_legacy_config(nvl, cp + 1));
3338 return (blockif_legacy_config(nvl, opts));
3341 static const struct pci_devemu pci_de_nvme = {
3343 .pe_init = pci_nvme_init,
3344 .pe_legacy_config = pci_nvme_legacy_config,
3345 .pe_barwrite = pci_nvme_write,
3346 .pe_barread = pci_nvme_read
3348 PCI_EMUL_SET(pci_de_nvme);