2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2017 Shunsuke Mie
5 * Copyright (c) 2018 Leon Dang
6 * Copyright (c) 2020 Chuck Tuffli
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * bhyve PCIe-NVMe device emulation.
34 * -s <n>,nvme,devpath,maxq=#,qsz=#,ioslots=#,sectsz=#,ser=A-Z,eui64=#,dsm=<opt>
41 * maxq = max number of queues
42 * qsz = max elements in each queue
43 * ioslots = max number of concurrent io requests
44 * sectsz = sector size (defaults to blockif sector size)
45 * ser = serial number (20-chars max)
46 * eui64 = IEEE Extended Unique Identifier (8 byte value)
47 * dsm = DataSet Management support. Option is one of auto, enable,disable
52 - create async event for smart and log
56 #include <sys/cdefs.h>
57 #include <sys/errno.h>
58 #include <sys/types.h>
59 #include <sys/crc16.h>
60 #include <net/ieee_oui.h>
64 #include <pthread_np.h>
65 #include <semaphore.h>
73 #include <machine/atomic.h>
74 #include <machine/vmm.h>
77 #include <dev/nvme/nvme.h>
86 static int nvme_debug = 0;
87 #define DPRINTF(fmt, args...) if (nvme_debug) PRINTLN(fmt, ##args)
88 #define WPRINTF(fmt, args...) PRINTLN(fmt, ##args)
90 /* defaults; can be overridden */
91 #define NVME_MSIX_BAR 4
93 #define NVME_IOSLOTS 8
95 /* The NVMe spec defines bits 13:4 in BAR0 as reserved */
96 #define NVME_MMIO_SPACE_MIN (1 << 14)
98 #define NVME_QUEUES 16
99 #define NVME_MAX_QENTRIES 2048
100 /* Memory Page size Minimum reported in CAP register */
101 #define NVME_MPSMIN 0
102 /* MPSMIN converted to bytes */
103 #define NVME_MPSMIN_BYTES (1 << (12 + NVME_MPSMIN))
105 #define NVME_PRP2_ITEMS (PAGE_SIZE/sizeof(uint64_t))
107 /* Note the + 1 allows for the initial descriptor to not be page aligned */
108 #define NVME_MAX_IOVEC ((1 << NVME_MDTS) + 1)
109 #define NVME_MAX_DATA_SIZE ((1 << NVME_MDTS) * NVME_MPSMIN_BYTES)
111 /* This is a synthetic status code to indicate there is no status */
112 #define NVME_NO_STATUS 0xffff
113 #define NVME_COMPLETION_VALID(c) ((c).status != NVME_NO_STATUS)
115 /* Reported temperature in Kelvin (i.e. room temperature) */
116 #define NVME_TEMPERATURE 296
120 /* Convert a zero-based value into a one-based value */
121 #define ONE_BASED(zero) ((zero) + 1)
122 /* Convert a one-based value into a zero-based value */
123 #define ZERO_BASED(one) ((one) - 1)
125 /* Encode number of SQ's and CQ's for Set/Get Features */
126 #define NVME_FEATURE_NUM_QUEUES(sc) \
127 (ZERO_BASED((sc)->num_squeues) & 0xffff) | \
128 (ZERO_BASED((sc)->num_cqueues) & 0xffff) << 16
130 #define NVME_DOORBELL_OFFSET offsetof(struct nvme_registers, doorbell)
132 enum nvme_controller_register_offsets {
133 NVME_CR_CAP_LOW = 0x00,
134 NVME_CR_CAP_HI = 0x04,
136 NVME_CR_INTMS = 0x0c,
137 NVME_CR_INTMC = 0x10,
142 NVME_CR_ASQ_LOW = 0x28,
143 NVME_CR_ASQ_HI = 0x2c,
144 NVME_CR_ACQ_LOW = 0x30,
145 NVME_CR_ACQ_HI = 0x34,
148 enum nvme_cmd_cdw11 {
149 NVME_CMD_CDW11_PC = 0x0001,
150 NVME_CMD_CDW11_IEN = 0x0002,
151 NVME_CMD_CDW11_IV = 0xFFFF0000,
159 #define NVME_CQ_INTEN 0x01
160 #define NVME_CQ_INTCOAL 0x02
162 struct nvme_completion_queue {
163 struct nvme_completion *qbase;
166 uint16_t tail; /* nvme progress */
167 uint16_t head; /* guest progress */
172 struct nvme_submission_queue {
173 struct nvme_command *qbase;
176 uint16_t head; /* nvme progress */
177 uint16_t tail; /* guest progress */
178 uint16_t cqid; /* completion queue id */
182 enum nvme_storage_type {
183 NVME_STOR_BLOCKIF = 0,
187 struct pci_nvme_blockstore {
188 enum nvme_storage_type type;
192 uint32_t sectsz_bits;
194 uint32_t deallocate:1;
198 * Calculate the number of additional page descriptors for guest IO requests
199 * based on the advertised Max Data Transfer (MDTS) and given the number of
200 * default iovec's in a struct blockif_req.
202 #define MDTS_PAD_SIZE \
203 ( NVME_MAX_IOVEC > BLOCKIF_IOV_MAX ? \
204 NVME_MAX_IOVEC - BLOCKIF_IOV_MAX : \
207 struct pci_nvme_ioreq {
208 struct pci_nvme_softc *sc;
209 STAILQ_ENTRY(pci_nvme_ioreq) link;
210 struct nvme_submission_queue *nvme_sq;
213 /* command information */
218 uint64_t prev_gpaddr;
222 struct blockif_req io_req;
224 struct iovec iovpadding[MDTS_PAD_SIZE];
228 /* Dataset Management bit in ONCS reflects backing storage capability */
229 NVME_DATASET_MANAGEMENT_AUTO,
230 /* Unconditionally set Dataset Management bit in ONCS */
231 NVME_DATASET_MANAGEMENT_ENABLE,
232 /* Unconditionally clear Dataset Management bit in ONCS */
233 NVME_DATASET_MANAGEMENT_DISABLE,
236 struct pci_nvme_softc;
237 struct nvme_feature_obj;
239 typedef void (*nvme_feature_cb)(struct pci_nvme_softc *,
240 struct nvme_feature_obj *,
241 struct nvme_command *,
242 struct nvme_completion *);
244 struct nvme_feature_obj {
248 bool namespace_specific;
251 #define NVME_FID_MAX (NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION + 1)
254 PCI_NVME_AE_TYPE_ERROR = 0,
255 PCI_NVME_AE_TYPE_SMART,
256 PCI_NVME_AE_TYPE_NOTICE,
257 PCI_NVME_AE_TYPE_IO_CMD = 6,
258 PCI_NVME_AE_TYPE_VENDOR = 7,
259 PCI_NVME_AE_TYPE_MAX /* Must be last */
260 } pci_nvme_async_type;
262 /* Asynchronous Event Requests */
263 struct pci_nvme_aer {
264 STAILQ_ENTRY(pci_nvme_aer) link;
265 uint16_t cid; /* Command ID of the submitted AER */
268 /** Asynchronous Event Information - Notice */
270 PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED = 0,
271 PCI_NVME_AEI_NOTICE_FW_ACTIVATION,
272 PCI_NVME_AEI_NOTICE_TELEMETRY_CHANGE,
273 PCI_NVME_AEI_NOTICE_ANA_CHANGE,
274 PCI_NVME_AEI_NOTICE_PREDICT_LATENCY_CHANGE,
275 PCI_NVME_AEI_NOTICE_LBA_STATUS_ALERT,
276 PCI_NVME_AEI_NOTICE_ENDURANCE_GROUP_CHANGE,
277 PCI_NVME_AEI_NOTICE_MAX,
278 } pci_nvme_async_event_info_notice;
280 #define PCI_NVME_AEI_NOTICE_SHIFT 8
281 #define PCI_NVME_AEI_NOTICE_MASK(event) (1 << (event + PCI_NVME_AEI_NOTICE_SHIFT))
283 /* Asynchronous Event Notifications */
284 struct pci_nvme_aen {
285 pci_nvme_async_type atype;
291 * By default, enable all Asynchrnous Event Notifications:
292 * SMART / Health Critical Warnings
293 * Namespace Attribute Notices
295 #define PCI_NVME_AEN_DEFAULT_MASK 0x11f
298 NVME_CNTRLTYPE_IO = 1,
299 NVME_CNTRLTYPE_DISCOVERY = 2,
300 NVME_CNTRLTYPE_ADMIN = 3,
301 } pci_nvme_cntrl_type;
303 struct pci_nvme_softc {
304 struct pci_devinst *nsc_pi;
308 struct nvme_registers regs;
310 struct nvme_namespace_data nsdata;
311 struct nvme_controller_data ctrldata;
312 struct nvme_error_information_entry err_log;
313 struct nvme_health_information_page health_log;
314 struct nvme_firmware_page fw_log;
315 struct nvme_ns_list ns_log;
317 struct pci_nvme_blockstore nvstore;
319 uint16_t max_qentries; /* max entries per queue */
320 uint32_t max_queues; /* max number of IO SQ's or CQ's */
321 uint32_t num_cqueues;
322 uint32_t num_squeues;
323 bool num_q_is_set; /* Has host set Number of Queues */
325 struct pci_nvme_ioreq *ioreqs;
326 STAILQ_HEAD(, pci_nvme_ioreq) ioreqs_free; /* free list of ioreqs */
327 uint32_t pending_ios;
332 * Memory mapped Submission and Completion queues
333 * Each array includes both Admin and IO queues
335 struct nvme_completion_queue *compl_queues;
336 struct nvme_submission_queue *submit_queues;
338 struct nvme_feature_obj feat[NVME_FID_MAX];
340 enum nvme_dsm_type dataset_management;
342 /* Accounting for SMART data */
343 __uint128_t read_data_units;
344 __uint128_t write_data_units;
345 __uint128_t read_commands;
346 __uint128_t write_commands;
347 uint32_t read_dunits_remainder;
348 uint32_t write_dunits_remainder;
350 STAILQ_HEAD(, pci_nvme_aer) aer_list;
351 pthread_mutex_t aer_mtx;
353 struct pci_nvme_aen aen[PCI_NVME_AE_TYPE_MAX];
355 pthread_mutex_t aen_mtx;
356 pthread_cond_t aen_cond;
360 static void pci_nvme_cq_update(struct pci_nvme_softc *sc,
361 struct nvme_completion_queue *cq,
366 static struct pci_nvme_ioreq *pci_nvme_get_ioreq(struct pci_nvme_softc *);
367 static void pci_nvme_release_ioreq(struct pci_nvme_softc *, struct pci_nvme_ioreq *);
368 static void pci_nvme_io_done(struct blockif_req *, int);
370 /* Controller Configuration utils */
371 #define NVME_CC_GET_EN(cc) \
372 ((cc) >> NVME_CC_REG_EN_SHIFT & NVME_CC_REG_EN_MASK)
373 #define NVME_CC_GET_CSS(cc) \
374 ((cc) >> NVME_CC_REG_CSS_SHIFT & NVME_CC_REG_CSS_MASK)
375 #define NVME_CC_GET_SHN(cc) \
376 ((cc) >> NVME_CC_REG_SHN_SHIFT & NVME_CC_REG_SHN_MASK)
377 #define NVME_CC_GET_IOSQES(cc) \
378 ((cc) >> NVME_CC_REG_IOSQES_SHIFT & NVME_CC_REG_IOSQES_MASK)
379 #define NVME_CC_GET_IOCQES(cc) \
380 ((cc) >> NVME_CC_REG_IOCQES_SHIFT & NVME_CC_REG_IOCQES_MASK)
382 #define NVME_CC_WRITE_MASK \
383 ((NVME_CC_REG_EN_MASK << NVME_CC_REG_EN_SHIFT) | \
384 (NVME_CC_REG_IOSQES_MASK << NVME_CC_REG_IOSQES_SHIFT) | \
385 (NVME_CC_REG_IOCQES_MASK << NVME_CC_REG_IOCQES_SHIFT))
387 #define NVME_CC_NEN_WRITE_MASK \
388 ((NVME_CC_REG_CSS_MASK << NVME_CC_REG_CSS_SHIFT) | \
389 (NVME_CC_REG_MPS_MASK << NVME_CC_REG_MPS_SHIFT) | \
390 (NVME_CC_REG_AMS_MASK << NVME_CC_REG_AMS_SHIFT))
392 /* Controller Status utils */
393 #define NVME_CSTS_GET_RDY(sts) \
394 ((sts) >> NVME_CSTS_REG_RDY_SHIFT & NVME_CSTS_REG_RDY_MASK)
396 #define NVME_CSTS_RDY (1 << NVME_CSTS_REG_RDY_SHIFT)
397 #define NVME_CSTS_CFS (1 << NVME_CSTS_REG_CFS_SHIFT)
399 /* Completion Queue status word utils */
400 #define NVME_STATUS_P (1 << NVME_STATUS_P_SHIFT)
401 #define NVME_STATUS_MASK \
402 ((NVME_STATUS_SCT_MASK << NVME_STATUS_SCT_SHIFT) |\
403 (NVME_STATUS_SC_MASK << NVME_STATUS_SC_SHIFT))
405 #define NVME_ONCS_DSM (NVME_CTRLR_DATA_ONCS_DSM_MASK << \
406 NVME_CTRLR_DATA_ONCS_DSM_SHIFT)
408 static void nvme_feature_invalid_cb(struct pci_nvme_softc *,
409 struct nvme_feature_obj *,
410 struct nvme_command *,
411 struct nvme_completion *);
412 static void nvme_feature_temperature(struct pci_nvme_softc *,
413 struct nvme_feature_obj *,
414 struct nvme_command *,
415 struct nvme_completion *);
416 static void nvme_feature_num_queues(struct pci_nvme_softc *,
417 struct nvme_feature_obj *,
418 struct nvme_command *,
419 struct nvme_completion *);
420 static void nvme_feature_iv_config(struct pci_nvme_softc *,
421 struct nvme_feature_obj *,
422 struct nvme_command *,
423 struct nvme_completion *);
424 static void nvme_feature_async_event(struct pci_nvme_softc *,
425 struct nvme_feature_obj *,
426 struct nvme_command *,
427 struct nvme_completion *);
429 static void *aen_thr(void *arg);
432 cpywithpad(char *dst, size_t dst_size, const char *src, char pad)
436 len = strnlen(src, dst_size);
437 memset(dst, pad, dst_size);
438 memcpy(dst, src, len);
442 pci_nvme_status_tc(uint16_t *status, uint16_t type, uint16_t code)
445 *status &= ~NVME_STATUS_MASK;
446 *status |= (type & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT |
447 (code & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
451 pci_nvme_status_genc(uint16_t *status, uint16_t code)
454 pci_nvme_status_tc(status, NVME_SCT_GENERIC, code);
458 * Initialize the requested number or IO Submission and Completion Queues.
459 * Admin queues are allocated implicitly.
462 pci_nvme_init_queues(struct pci_nvme_softc *sc, uint32_t nsq, uint32_t ncq)
467 * Allocate and initialize the Submission Queues
469 if (nsq > NVME_QUEUES) {
470 WPRINTF("%s: clamping number of SQ from %u to %u",
471 __func__, nsq, NVME_QUEUES);
475 sc->num_squeues = nsq;
477 sc->submit_queues = calloc(sc->num_squeues + 1,
478 sizeof(struct nvme_submission_queue));
479 if (sc->submit_queues == NULL) {
480 WPRINTF("%s: SQ allocation failed", __func__);
483 struct nvme_submission_queue *sq = sc->submit_queues;
485 for (i = 0; i < sc->num_squeues + 1; i++)
486 pthread_mutex_init(&sq[i].mtx, NULL);
490 * Allocate and initialize the Completion Queues
492 if (ncq > NVME_QUEUES) {
493 WPRINTF("%s: clamping number of CQ from %u to %u",
494 __func__, ncq, NVME_QUEUES);
498 sc->num_cqueues = ncq;
500 sc->compl_queues = calloc(sc->num_cqueues + 1,
501 sizeof(struct nvme_completion_queue));
502 if (sc->compl_queues == NULL) {
503 WPRINTF("%s: CQ allocation failed", __func__);
506 struct nvme_completion_queue *cq = sc->compl_queues;
508 for (i = 0; i < sc->num_cqueues + 1; i++)
509 pthread_mutex_init(&cq[i].mtx, NULL);
514 pci_nvme_init_ctrldata(struct pci_nvme_softc *sc)
516 struct nvme_controller_data *cd = &sc->ctrldata;
521 cpywithpad((char *)cd->mn, sizeof(cd->mn), "bhyve-NVMe", ' ');
522 cpywithpad((char *)cd->fr, sizeof(cd->fr), "1.0", ' ');
524 /* Num of submission commands that we can handle at a time (2^rab) */
534 cd->mdts = NVME_MDTS; /* max data transfer size (2^mdts * CAP.MPSMIN) */
536 cd->ver = NVME_REV(1,4);
538 cd->cntrltype = NVME_CNTRLTYPE_IO;
539 cd->oacs = 1 << NVME_CTRLR_DATA_OACS_FORMAT_SHIFT;
540 cd->oaes = NVMEB(NVME_CTRLR_DATA_OAES_NS_ATTR);
544 /* Advertise 1, Read-only firmware slot */
545 cd->frmw = NVMEB(NVME_CTRLR_DATA_FRMW_SLOT1_RO) |
546 (1 << NVME_CTRLR_DATA_FRMW_NUM_SLOTS_SHIFT);
547 cd->lpa = 0; /* TODO: support some simple things like SMART */
548 cd->elpe = 0; /* max error log page entries */
550 * Report a single power state (zero-based value)
551 * power_state[] values are left as zero to indicate "Not reported"
555 /* Warning Composite Temperature Threshold */
559 /* SANICAP must not be 0 for Revision 1.4 and later NVMe Controllers */
560 cd->sanicap = (NVME_CTRLR_DATA_SANICAP_NODMMAS_NO <<
561 NVME_CTRLR_DATA_SANICAP_NODMMAS_SHIFT);
563 cd->sqes = (6 << NVME_CTRLR_DATA_SQES_MAX_SHIFT) |
564 (6 << NVME_CTRLR_DATA_SQES_MIN_SHIFT);
565 cd->cqes = (4 << NVME_CTRLR_DATA_CQES_MAX_SHIFT) |
566 (4 << NVME_CTRLR_DATA_CQES_MIN_SHIFT);
567 cd->nn = 1; /* number of namespaces */
570 switch (sc->dataset_management) {
571 case NVME_DATASET_MANAGEMENT_AUTO:
572 if (sc->nvstore.deallocate)
573 cd->oncs |= NVME_ONCS_DSM;
575 case NVME_DATASET_MANAGEMENT_ENABLE:
576 cd->oncs |= NVME_ONCS_DSM;
582 cd->fna = NVME_CTRLR_DATA_FNA_FORMAT_ALL_MASK <<
583 NVME_CTRLR_DATA_FNA_FORMAT_ALL_SHIFT;
585 cd->vwc = NVME_CTRLR_DATA_VWC_ALL_NO << NVME_CTRLR_DATA_VWC_ALL_SHIFT;
589 pci_nvme_init_nsdata_size(struct pci_nvme_blockstore *nvstore,
590 struct nvme_namespace_data *nd)
593 /* Get capacity and block size information from backing store */
594 nd->nsze = nvstore->size / nvstore->sectsz;
600 pci_nvme_init_nsdata(struct pci_nvme_softc *sc,
601 struct nvme_namespace_data *nd, uint32_t nsid,
602 struct pci_nvme_blockstore *nvstore)
605 pci_nvme_init_nsdata_size(nvstore, nd);
607 if (nvstore->type == NVME_STOR_BLOCKIF)
608 nvstore->deallocate = blockif_candelete(nvstore->ctx);
610 nd->nlbaf = 0; /* NLBAF is a 0's based value (i.e. 1 LBA Format) */
613 /* Create an EUI-64 if user did not provide one */
614 if (nvstore->eui64 == 0) {
616 uint64_t eui64 = nvstore->eui64;
618 asprintf(&data, "%s%u%u%u", get_config_value("name"),
619 sc->nsc_pi->pi_bus, sc->nsc_pi->pi_slot,
620 sc->nsc_pi->pi_func);
623 eui64 = OUI_FREEBSD_NVME_LOW | crc16(0, data, strlen(data));
626 nvstore->eui64 = (eui64 << 16) | (nsid & 0xffff);
628 be64enc(nd->eui64, nvstore->eui64);
630 /* LBA data-sz = 2^lbads */
631 nd->lbaf[0] = nvstore->sectsz_bits << NVME_NS_DATA_LBAF_LBADS_SHIFT;
635 pci_nvme_init_logpages(struct pci_nvme_softc *sc)
637 __uint128_t power_cycles = 1;
639 memset(&sc->err_log, 0, sizeof(sc->err_log));
640 memset(&sc->health_log, 0, sizeof(sc->health_log));
641 memset(&sc->fw_log, 0, sizeof(sc->fw_log));
642 memset(&sc->ns_log, 0, sizeof(sc->ns_log));
644 /* Set read/write remainder to round up according to spec */
645 sc->read_dunits_remainder = 999;
646 sc->write_dunits_remainder = 999;
648 /* Set nominal Health values checked by implementations */
649 sc->health_log.temperature = NVME_TEMPERATURE;
650 sc->health_log.available_spare = 100;
651 sc->health_log.available_spare_threshold = 10;
653 /* Set Active Firmware Info to slot 1 */
654 sc->fw_log.afi = (1 << NVME_FIRMWARE_PAGE_AFI_SLOT_SHIFT);
655 memcpy(&sc->fw_log.revision[0], sc->ctrldata.fr,
656 sizeof(sc->fw_log.revision[0]));
658 memcpy(&sc->health_log.power_cycles, &power_cycles,
659 sizeof(sc->health_log.power_cycles));
663 pci_nvme_init_features(struct pci_nvme_softc *sc)
665 enum nvme_feature fid;
667 for (fid = 0; fid < NVME_FID_MAX; fid++) {
669 case NVME_FEAT_ARBITRATION:
670 case NVME_FEAT_POWER_MANAGEMENT:
671 case NVME_FEAT_INTERRUPT_COALESCING: //XXX
672 case NVME_FEAT_WRITE_ATOMICITY:
673 /* Mandatory but no special handling required */
674 //XXX hang - case NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG:
675 //XXX hang - case NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
676 // this returns a data buffer
678 case NVME_FEAT_TEMPERATURE_THRESHOLD:
679 sc->feat[fid].set = nvme_feature_temperature;
681 case NVME_FEAT_ERROR_RECOVERY:
682 sc->feat[fid].namespace_specific = true;
684 case NVME_FEAT_NUMBER_OF_QUEUES:
685 sc->feat[fid].set = nvme_feature_num_queues;
687 case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
688 sc->feat[fid].set = nvme_feature_iv_config;
690 case NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
691 sc->feat[fid].set = nvme_feature_async_event;
692 /* Enable all AENs by default */
693 sc->feat[fid].cdw11 = PCI_NVME_AEN_DEFAULT_MASK;
696 sc->feat[fid].set = nvme_feature_invalid_cb;
697 sc->feat[fid].get = nvme_feature_invalid_cb;
703 pci_nvme_aer_reset(struct pci_nvme_softc *sc)
706 STAILQ_INIT(&sc->aer_list);
711 pci_nvme_aer_init(struct pci_nvme_softc *sc)
714 pthread_mutex_init(&sc->aer_mtx, NULL);
715 pci_nvme_aer_reset(sc);
719 pci_nvme_aer_destroy(struct pci_nvme_softc *sc)
721 struct pci_nvme_aer *aer = NULL;
723 pthread_mutex_lock(&sc->aer_mtx);
724 while (!STAILQ_EMPTY(&sc->aer_list)) {
725 aer = STAILQ_FIRST(&sc->aer_list);
726 STAILQ_REMOVE_HEAD(&sc->aer_list, link);
729 pthread_mutex_unlock(&sc->aer_mtx);
731 pci_nvme_aer_reset(sc);
735 pci_nvme_aer_available(struct pci_nvme_softc *sc)
738 return (sc->aer_count != 0);
742 pci_nvme_aer_limit_reached(struct pci_nvme_softc *sc)
744 struct nvme_controller_data *cd = &sc->ctrldata;
746 /* AERL is a zero based value while aer_count is one's based */
747 return (sc->aer_count == (cd->aerl + 1U));
751 * Add an Async Event Request
753 * Stores an AER to be returned later if the Controller needs to notify the
755 * Note that while the NVMe spec doesn't require Controllers to return AER's
756 * in order, this implementation does preserve the order.
759 pci_nvme_aer_add(struct pci_nvme_softc *sc, uint16_t cid)
761 struct pci_nvme_aer *aer = NULL;
763 aer = calloc(1, sizeof(struct pci_nvme_aer));
767 /* Save the Command ID for use in the completion message */
770 pthread_mutex_lock(&sc->aer_mtx);
772 STAILQ_INSERT_TAIL(&sc->aer_list, aer, link);
773 pthread_mutex_unlock(&sc->aer_mtx);
779 * Get an Async Event Request structure
781 * Returns a pointer to an AER previously submitted by the host or NULL if
782 * no AER's exist. Caller is responsible for freeing the returned struct.
784 static struct pci_nvme_aer *
785 pci_nvme_aer_get(struct pci_nvme_softc *sc)
787 struct pci_nvme_aer *aer = NULL;
789 pthread_mutex_lock(&sc->aer_mtx);
790 aer = STAILQ_FIRST(&sc->aer_list);
792 STAILQ_REMOVE_HEAD(&sc->aer_list, link);
795 pthread_mutex_unlock(&sc->aer_mtx);
801 pci_nvme_aen_reset(struct pci_nvme_softc *sc)
805 memset(sc->aen, 0, PCI_NVME_AE_TYPE_MAX * sizeof(struct pci_nvme_aen));
807 for (atype = 0; atype < PCI_NVME_AE_TYPE_MAX; atype++) {
808 sc->aen[atype].atype = atype;
813 pci_nvme_aen_init(struct pci_nvme_softc *sc)
817 pci_nvme_aen_reset(sc);
819 pthread_mutex_init(&sc->aen_mtx, NULL);
820 pthread_create(&sc->aen_tid, NULL, aen_thr, sc);
821 snprintf(nstr, sizeof(nstr), "nvme-aen-%d:%d", sc->nsc_pi->pi_slot,
822 sc->nsc_pi->pi_func);
823 pthread_set_name_np(sc->aen_tid, nstr);
827 pci_nvme_aen_destroy(struct pci_nvme_softc *sc)
830 pci_nvme_aen_reset(sc);
833 /* Notify the AEN thread of pending work */
835 pci_nvme_aen_notify(struct pci_nvme_softc *sc)
838 pthread_cond_signal(&sc->aen_cond);
842 * Post an Asynchronous Event Notification
845 pci_nvme_aen_post(struct pci_nvme_softc *sc, pci_nvme_async_type atype,
848 struct pci_nvme_aen *aen;
850 if (atype >= PCI_NVME_AE_TYPE_MAX) {
854 pthread_mutex_lock(&sc->aen_mtx);
855 aen = &sc->aen[atype];
857 /* Has the controller already posted an event of this type? */
859 pthread_mutex_unlock(&sc->aen_mtx);
863 aen->event_data = event_data;
865 pthread_mutex_unlock(&sc->aen_mtx);
867 pci_nvme_aen_notify(sc);
873 pci_nvme_aen_process(struct pci_nvme_softc *sc)
875 struct pci_nvme_aer *aer;
876 struct pci_nvme_aen *aen;
877 pci_nvme_async_type atype;
882 assert(pthread_mutex_isowned_np(&sc->aen_mtx));
883 for (atype = 0; atype < PCI_NVME_AE_TYPE_MAX; atype++) {
884 aen = &sc->aen[atype];
885 /* Previous iterations may have depleted the available AER's */
886 if (!pci_nvme_aer_available(sc)) {
887 DPRINTF("%s: no AER", __func__);
892 DPRINTF("%s: no AEN posted for atype=%#x", __func__, atype);
896 status = NVME_SC_SUCCESS;
898 /* Is the event masked? */
900 sc->feat[NVME_FEAT_ASYNC_EVENT_CONFIGURATION].cdw11;
902 DPRINTF("%s: atype=%#x mask=%#x event_data=%#x", __func__, atype, mask, aen->event_data);
904 case PCI_NVME_AE_TYPE_ERROR:
905 lid = NVME_LOG_ERROR;
907 case PCI_NVME_AE_TYPE_SMART:
909 if ((mask & aen->event_data) == 0)
911 lid = NVME_LOG_HEALTH_INFORMATION;
913 case PCI_NVME_AE_TYPE_NOTICE:
914 if (aen->event_data >= PCI_NVME_AEI_NOTICE_MAX) {
915 EPRINTLN("%s unknown AEN notice type %u",
916 __func__, aen->event_data);
917 status = NVME_SC_INTERNAL_DEVICE_ERROR;
921 if ((PCI_NVME_AEI_NOTICE_MASK(aen->event_data) & mask) == 0)
923 switch (aen->event_data) {
924 case PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED:
925 lid = NVME_LOG_CHANGED_NAMESPACE;
927 case PCI_NVME_AEI_NOTICE_FW_ACTIVATION:
928 lid = NVME_LOG_FIRMWARE_SLOT;
930 case PCI_NVME_AEI_NOTICE_TELEMETRY_CHANGE:
931 lid = NVME_LOG_TELEMETRY_CONTROLLER_INITIATED;
933 case PCI_NVME_AEI_NOTICE_ANA_CHANGE:
934 lid = NVME_LOG_ASYMMETRIC_NAMESPACE_ACCESS;
936 case PCI_NVME_AEI_NOTICE_PREDICT_LATENCY_CHANGE:
937 lid = NVME_LOG_PREDICTABLE_LATENCY_EVENT_AGGREGATE;
939 case PCI_NVME_AEI_NOTICE_LBA_STATUS_ALERT:
940 lid = NVME_LOG_LBA_STATUS_INFORMATION;
942 case PCI_NVME_AEI_NOTICE_ENDURANCE_GROUP_CHANGE:
943 lid = NVME_LOG_ENDURANCE_GROUP_EVENT_AGGREGATE;
951 EPRINTLN("%s unknown AEN type %u", __func__, atype);
952 status = NVME_SC_INTERNAL_DEVICE_ERROR;
957 aer = pci_nvme_aer_get(sc);
960 DPRINTF("%s: CID=%#x CDW0=%#x", __func__, aer->cid, (lid << 16) | (aen->event_data << 8) | atype);
961 pci_nvme_cq_update(sc, &sc->compl_queues[0],
962 (lid << 16) | (aen->event_data << 8) | atype, /* cdw0 */
970 pci_generate_msix(sc->nsc_pi, 0);
977 struct pci_nvme_softc *sc;
981 pthread_mutex_lock(&sc->aen_mtx);
983 pci_nvme_aen_process(sc);
984 pthread_cond_wait(&sc->aen_cond, &sc->aen_mtx);
986 pthread_mutex_unlock(&sc->aen_mtx);
993 pci_nvme_reset_locked(struct pci_nvme_softc *sc)
997 DPRINTF("%s", __func__);
999 sc->regs.cap_lo = (ZERO_BASED(sc->max_qentries) & NVME_CAP_LO_REG_MQES_MASK) |
1000 (1 << NVME_CAP_LO_REG_CQR_SHIFT) |
1001 (60 << NVME_CAP_LO_REG_TO_SHIFT);
1003 sc->regs.cap_hi = 1 << NVME_CAP_HI_REG_CSS_NVM_SHIFT;
1005 sc->regs.vs = NVME_REV(1,4); /* NVMe v1.4 */
1009 assert(sc->submit_queues != NULL);
1011 for (i = 0; i < sc->num_squeues + 1; i++) {
1012 sc->submit_queues[i].qbase = NULL;
1013 sc->submit_queues[i].size = 0;
1014 sc->submit_queues[i].cqid = 0;
1015 sc->submit_queues[i].tail = 0;
1016 sc->submit_queues[i].head = 0;
1019 assert(sc->compl_queues != NULL);
1021 for (i = 0; i < sc->num_cqueues + 1; i++) {
1022 sc->compl_queues[i].qbase = NULL;
1023 sc->compl_queues[i].size = 0;
1024 sc->compl_queues[i].tail = 0;
1025 sc->compl_queues[i].head = 0;
1028 sc->num_q_is_set = false;
1030 pci_nvme_aer_destroy(sc);
1031 pci_nvme_aen_destroy(sc);
1034 * Clear CSTS.RDY last to prevent the host from enabling Controller
1035 * before cleanup completes
1041 pci_nvme_reset(struct pci_nvme_softc *sc)
1043 pthread_mutex_lock(&sc->mtx);
1044 pci_nvme_reset_locked(sc);
1045 pthread_mutex_unlock(&sc->mtx);
1049 pci_nvme_init_controller(struct pci_nvme_softc *sc)
1051 uint16_t acqs, asqs;
1053 DPRINTF("%s", __func__);
1056 * NVMe 2.0 states that "enabling a controller while this field is
1057 * cleared to 0h produces undefined results" for both ACQS and
1058 * ASQS. If zero, set CFS and do not become ready.
1060 asqs = ONE_BASED(sc->regs.aqa & NVME_AQA_REG_ASQS_MASK);
1062 EPRINTLN("%s: illegal ASQS value %#x (aqa=%#x)", __func__,
1063 asqs - 1, sc->regs.aqa);
1064 sc->regs.csts |= NVME_CSTS_CFS;
1067 sc->submit_queues[0].size = asqs;
1068 sc->submit_queues[0].qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1069 sc->regs.asq, sizeof(struct nvme_command) * asqs);
1070 if (sc->submit_queues[0].qbase == NULL) {
1071 EPRINTLN("%s: ASQ vm_map_gpa(%lx) failed", __func__,
1073 sc->regs.csts |= NVME_CSTS_CFS;
1077 DPRINTF("%s mapping Admin-SQ guest 0x%lx, host: %p",
1078 __func__, sc->regs.asq, sc->submit_queues[0].qbase);
1080 acqs = ONE_BASED((sc->regs.aqa >> NVME_AQA_REG_ACQS_SHIFT) &
1081 NVME_AQA_REG_ACQS_MASK);
1083 EPRINTLN("%s: illegal ACQS value %#x (aqa=%#x)", __func__,
1084 acqs - 1, sc->regs.aqa);
1085 sc->regs.csts |= NVME_CSTS_CFS;
1088 sc->compl_queues[0].size = acqs;
1089 sc->compl_queues[0].qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1090 sc->regs.acq, sizeof(struct nvme_completion) * acqs);
1091 if (sc->compl_queues[0].qbase == NULL) {
1092 EPRINTLN("%s: ACQ vm_map_gpa(%lx) failed", __func__,
1094 sc->regs.csts |= NVME_CSTS_CFS;
1097 sc->compl_queues[0].intr_en = NVME_CQ_INTEN;
1099 DPRINTF("%s mapping Admin-CQ guest 0x%lx, host: %p",
1100 __func__, sc->regs.acq, sc->compl_queues[0].qbase);
1106 nvme_prp_memcpy(struct vmctx *ctx, uint64_t prp1, uint64_t prp2, uint8_t *b,
1107 size_t len, enum nvme_copy_dir dir)
1112 if (len > (8 * 1024)) {
1116 /* Copy from the start of prp1 to the end of the physical page */
1117 bytes = PAGE_SIZE - (prp1 & PAGE_MASK);
1118 bytes = MIN(bytes, len);
1120 p = vm_map_gpa(ctx, prp1, bytes);
1125 if (dir == NVME_COPY_TO_PRP)
1126 memcpy(p, b, bytes);
1128 memcpy(b, p, bytes);
1137 len = MIN(len, PAGE_SIZE);
1139 p = vm_map_gpa(ctx, prp2, len);
1144 if (dir == NVME_COPY_TO_PRP)
1153 * Write a Completion Queue Entry update
1155 * Write the completion and update the doorbell value
1158 pci_nvme_cq_update(struct pci_nvme_softc *sc,
1159 struct nvme_completion_queue *cq,
1165 struct nvme_submission_queue *sq = &sc->submit_queues[sqid];
1166 struct nvme_completion *cqe;
1168 assert(cq->qbase != NULL);
1170 pthread_mutex_lock(&cq->mtx);
1172 cqe = &cq->qbase[cq->tail];
1174 /* Flip the phase bit */
1175 status |= (cqe->status ^ NVME_STATUS_P) & NVME_STATUS_P_MASK;
1178 cqe->sqhd = sq->head;
1181 cqe->status = status;
1184 if (cq->tail >= cq->size) {
1188 pthread_mutex_unlock(&cq->mtx);
1192 nvme_opc_delete_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command,
1193 struct nvme_completion* compl)
1195 uint16_t qid = command->cdw10 & 0xffff;
1197 DPRINTF("%s DELETE_IO_SQ %u", __func__, qid);
1198 if (qid == 0 || qid > sc->num_squeues ||
1199 (sc->submit_queues[qid].qbase == NULL)) {
1200 WPRINTF("%s NOT PERMITTED queue id %u / num_squeues %u",
1201 __func__, qid, sc->num_squeues);
1202 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1203 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1207 sc->submit_queues[qid].qbase = NULL;
1208 sc->submit_queues[qid].cqid = 0;
1209 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1214 nvme_opc_create_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command,
1215 struct nvme_completion* compl)
1217 if (command->cdw11 & NVME_CMD_CDW11_PC) {
1218 uint16_t qid = command->cdw10 & 0xffff;
1219 struct nvme_submission_queue *nsq;
1221 if ((qid == 0) || (qid > sc->num_squeues) ||
1222 (sc->submit_queues[qid].qbase != NULL)) {
1223 WPRINTF("%s queue index %u > num_squeues %u",
1224 __func__, qid, sc->num_squeues);
1225 pci_nvme_status_tc(&compl->status,
1226 NVME_SCT_COMMAND_SPECIFIC,
1227 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1231 nsq = &sc->submit_queues[qid];
1232 nsq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff);
1233 DPRINTF("%s size=%u (max=%u)", __func__, nsq->size, sc->max_qentries);
1234 if ((nsq->size < 2) || (nsq->size > sc->max_qentries)) {
1236 * Queues must specify at least two entries
1237 * NOTE: "MAXIMUM QUEUE SIZE EXCEEDED" was renamed to
1238 * "INVALID QUEUE SIZE" in the NVM Express 1.3 Spec
1240 pci_nvme_status_tc(&compl->status,
1241 NVME_SCT_COMMAND_SPECIFIC,
1242 NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED);
1245 nsq->head = nsq->tail = 0;
1247 nsq->cqid = (command->cdw11 >> 16) & 0xffff;
1248 if ((nsq->cqid == 0) || (nsq->cqid > sc->num_cqueues)) {
1249 pci_nvme_status_tc(&compl->status,
1250 NVME_SCT_COMMAND_SPECIFIC,
1251 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1255 if (sc->compl_queues[nsq->cqid].qbase == NULL) {
1256 pci_nvme_status_tc(&compl->status,
1257 NVME_SCT_COMMAND_SPECIFIC,
1258 NVME_SC_COMPLETION_QUEUE_INVALID);
1262 nsq->qpriority = (command->cdw11 >> 1) & 0x03;
1264 nsq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1265 sizeof(struct nvme_command) * (size_t)nsq->size);
1267 DPRINTF("%s sq %u size %u gaddr %p cqid %u", __func__,
1268 qid, nsq->size, nsq->qbase, nsq->cqid);
1270 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1272 DPRINTF("%s completed creating IOSQ qid %u",
1276 * Guest sent non-cont submission queue request.
1277 * This setting is unsupported by this emulation.
1279 WPRINTF("%s unsupported non-contig (list-based) "
1280 "create i/o submission queue", __func__);
1282 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1288 nvme_opc_delete_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command,
1289 struct nvme_completion* compl)
1291 uint16_t qid = command->cdw10 & 0xffff;
1294 DPRINTF("%s DELETE_IO_CQ %u", __func__, qid);
1295 if (qid == 0 || qid > sc->num_cqueues ||
1296 (sc->compl_queues[qid].qbase == NULL)) {
1297 WPRINTF("%s queue index %u / num_cqueues %u",
1298 __func__, qid, sc->num_cqueues);
1299 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1300 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1304 /* Deleting an Active CQ is an error */
1305 for (sqid = 1; sqid < sc->num_squeues + 1; sqid++)
1306 if (sc->submit_queues[sqid].cqid == qid) {
1307 pci_nvme_status_tc(&compl->status,
1308 NVME_SCT_COMMAND_SPECIFIC,
1309 NVME_SC_INVALID_QUEUE_DELETION);
1313 sc->compl_queues[qid].qbase = NULL;
1314 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1319 nvme_opc_create_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command,
1320 struct nvme_completion* compl)
1322 struct nvme_completion_queue *ncq;
1323 uint16_t qid = command->cdw10 & 0xffff;
1325 /* Only support Physically Contiguous queues */
1326 if ((command->cdw11 & NVME_CMD_CDW11_PC) == 0) {
1327 WPRINTF("%s unsupported non-contig (list-based) "
1328 "create i/o completion queue",
1331 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1335 if ((qid == 0) || (qid > sc->num_cqueues) ||
1336 (sc->compl_queues[qid].qbase != NULL)) {
1337 WPRINTF("%s queue index %u > num_cqueues %u",
1338 __func__, qid, sc->num_cqueues);
1339 pci_nvme_status_tc(&compl->status,
1340 NVME_SCT_COMMAND_SPECIFIC,
1341 NVME_SC_INVALID_QUEUE_IDENTIFIER);
1345 ncq = &sc->compl_queues[qid];
1346 ncq->intr_en = (command->cdw11 & NVME_CMD_CDW11_IEN) >> 1;
1347 ncq->intr_vec = (command->cdw11 >> 16) & 0xffff;
1348 if (ncq->intr_vec > (sc->max_queues + 1)) {
1349 pci_nvme_status_tc(&compl->status,
1350 NVME_SCT_COMMAND_SPECIFIC,
1351 NVME_SC_INVALID_INTERRUPT_VECTOR);
1355 ncq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff);
1356 if ((ncq->size < 2) || (ncq->size > sc->max_qentries)) {
1358 * Queues must specify at least two entries
1359 * NOTE: "MAXIMUM QUEUE SIZE EXCEEDED" was renamed to
1360 * "INVALID QUEUE SIZE" in the NVM Express 1.3 Spec
1362 pci_nvme_status_tc(&compl->status,
1363 NVME_SCT_COMMAND_SPECIFIC,
1364 NVME_SC_MAXIMUM_QUEUE_SIZE_EXCEEDED);
1367 ncq->head = ncq->tail = 0;
1368 ncq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
1370 sizeof(struct nvme_command) * (size_t)ncq->size);
1372 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1379 nvme_opc_get_log_page(struct pci_nvme_softc* sc, struct nvme_command* command,
1380 struct nvme_completion* compl)
1386 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1389 * Command specifies the number of dwords to return in fields NUMDU
1390 * and NUMDL. This is a zero-based value.
1392 logpage = command->cdw10 & 0xFF;
1393 logsize = ((command->cdw11 << 16) | (command->cdw10 >> 16)) + 1;
1394 logsize *= sizeof(uint32_t);
1395 logoff = ((uint64_t)(command->cdw13) << 32) | command->cdw12;
1397 DPRINTF("%s log page %u len %u", __func__, logpage, logsize);
1400 case NVME_LOG_ERROR:
1401 if (logoff >= sizeof(sc->err_log)) {
1402 pci_nvme_status_genc(&compl->status,
1403 NVME_SC_INVALID_FIELD);
1407 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1408 command->prp2, (uint8_t *)&sc->err_log + logoff,
1409 MIN(logsize - logoff, sizeof(sc->err_log)),
1412 case NVME_LOG_HEALTH_INFORMATION:
1413 if (logoff >= sizeof(sc->health_log)) {
1414 pci_nvme_status_genc(&compl->status,
1415 NVME_SC_INVALID_FIELD);
1419 pthread_mutex_lock(&sc->mtx);
1420 memcpy(&sc->health_log.data_units_read, &sc->read_data_units,
1421 sizeof(sc->health_log.data_units_read));
1422 memcpy(&sc->health_log.data_units_written, &sc->write_data_units,
1423 sizeof(sc->health_log.data_units_written));
1424 memcpy(&sc->health_log.host_read_commands, &sc->read_commands,
1425 sizeof(sc->health_log.host_read_commands));
1426 memcpy(&sc->health_log.host_write_commands, &sc->write_commands,
1427 sizeof(sc->health_log.host_write_commands));
1428 pthread_mutex_unlock(&sc->mtx);
1430 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1431 command->prp2, (uint8_t *)&sc->health_log + logoff,
1432 MIN(logsize - logoff, sizeof(sc->health_log)),
1435 case NVME_LOG_FIRMWARE_SLOT:
1436 if (logoff >= sizeof(sc->fw_log)) {
1437 pci_nvme_status_genc(&compl->status,
1438 NVME_SC_INVALID_FIELD);
1442 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1443 command->prp2, (uint8_t *)&sc->fw_log + logoff,
1444 MIN(logsize - logoff, sizeof(sc->fw_log)),
1447 case NVME_LOG_CHANGED_NAMESPACE:
1448 if (logoff >= sizeof(sc->ns_log)) {
1449 pci_nvme_status_genc(&compl->status,
1450 NVME_SC_INVALID_FIELD);
1454 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1455 command->prp2, (uint8_t *)&sc->ns_log + logoff,
1456 MIN(logsize - logoff, sizeof(sc->ns_log)),
1458 memset(&sc->ns_log, 0, sizeof(sc->ns_log));
1461 DPRINTF("%s get log page %x command not supported",
1464 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1465 NVME_SC_INVALID_LOG_PAGE);
1472 nvme_opc_identify(struct pci_nvme_softc* sc, struct nvme_command* command,
1473 struct nvme_completion* compl)
1478 DPRINTF("%s identify 0x%x nsid 0x%x", __func__,
1479 command->cdw10 & 0xFF, command->nsid);
1482 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
1484 switch (command->cdw10 & 0xFF) {
1485 case 0x00: /* return Identify Namespace data structure */
1486 /* Global NS only valid with NS Management */
1487 if (command->nsid == NVME_GLOBAL_NAMESPACE_TAG) {
1488 pci_nvme_status_genc(&status,
1489 NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1492 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1493 command->prp2, (uint8_t *)&sc->nsdata, sizeof(sc->nsdata),
1496 case 0x01: /* return Identify Controller data structure */
1497 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
1498 command->prp2, (uint8_t *)&sc->ctrldata,
1499 sizeof(sc->ctrldata),
1502 case 0x02: /* list of 1024 active NSIDs > CDW1.NSID */
1503 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1504 sizeof(uint32_t) * 1024);
1505 /* All unused entries shall be zero */
1506 memset(dest, 0, sizeof(uint32_t) * 1024);
1507 ((uint32_t *)dest)[0] = 1;
1509 case 0x03: /* list of NSID structures in CDW1.NSID, 4096 bytes */
1510 if (command->nsid != 1) {
1511 pci_nvme_status_genc(&status,
1512 NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
1515 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1516 sizeof(uint32_t) * 1024);
1517 /* All bytes after the descriptor shall be zero */
1518 memset(dest, 0, sizeof(uint32_t) * 1024);
1520 /* Return NIDT=1 (i.e. EUI64) descriptor */
1521 ((uint8_t *)dest)[0] = 1;
1522 ((uint8_t *)dest)[1] = sizeof(uint64_t);
1523 memcpy(((uint8_t *)dest) + 4, sc->nsdata.eui64, sizeof(uint64_t));
1527 * Controller list is optional but used by UNH tests. Return
1528 * a valid but empty list.
1530 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
1531 sizeof(uint16_t) * 2048);
1532 memset(dest, 0, sizeof(uint16_t) * 2048);
1535 DPRINTF("%s unsupported identify command requested 0x%x",
1536 __func__, command->cdw10 & 0xFF);
1537 pci_nvme_status_genc(&status, NVME_SC_INVALID_FIELD);
1541 compl->status = status;
1546 nvme_fid_to_name(uint8_t fid)
1551 case NVME_FEAT_ARBITRATION:
1552 name = "Arbitration";
1554 case NVME_FEAT_POWER_MANAGEMENT:
1555 name = "Power Management";
1557 case NVME_FEAT_LBA_RANGE_TYPE:
1558 name = "LBA Range Type";
1560 case NVME_FEAT_TEMPERATURE_THRESHOLD:
1561 name = "Temperature Threshold";
1563 case NVME_FEAT_ERROR_RECOVERY:
1564 name = "Error Recovery";
1566 case NVME_FEAT_VOLATILE_WRITE_CACHE:
1567 name = "Volatile Write Cache";
1569 case NVME_FEAT_NUMBER_OF_QUEUES:
1570 name = "Number of Queues";
1572 case NVME_FEAT_INTERRUPT_COALESCING:
1573 name = "Interrupt Coalescing";
1575 case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
1576 name = "Interrupt Vector Configuration";
1578 case NVME_FEAT_WRITE_ATOMICITY:
1579 name = "Write Atomicity Normal";
1581 case NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
1582 name = "Asynchronous Event Configuration";
1584 case NVME_FEAT_AUTONOMOUS_POWER_STATE_TRANSITION:
1585 name = "Autonomous Power State Transition";
1587 case NVME_FEAT_HOST_MEMORY_BUFFER:
1588 name = "Host Memory Buffer";
1590 case NVME_FEAT_TIMESTAMP:
1593 case NVME_FEAT_KEEP_ALIVE_TIMER:
1594 name = "Keep Alive Timer";
1596 case NVME_FEAT_HOST_CONTROLLED_THERMAL_MGMT:
1597 name = "Host Controlled Thermal Management";
1599 case NVME_FEAT_NON_OP_POWER_STATE_CONFIG:
1600 name = "Non-Operation Power State Config";
1602 case NVME_FEAT_READ_RECOVERY_LEVEL_CONFIG:
1603 name = "Read Recovery Level Config";
1605 case NVME_FEAT_PREDICTABLE_LATENCY_MODE_CONFIG:
1606 name = "Predictable Latency Mode Config";
1608 case NVME_FEAT_PREDICTABLE_LATENCY_MODE_WINDOW:
1609 name = "Predictable Latency Mode Window";
1611 case NVME_FEAT_LBA_STATUS_INFORMATION_ATTRIBUTES:
1612 name = "LBA Status Information Report Interval";
1614 case NVME_FEAT_HOST_BEHAVIOR_SUPPORT:
1615 name = "Host Behavior Support";
1617 case NVME_FEAT_SANITIZE_CONFIG:
1618 name = "Sanitize Config";
1620 case NVME_FEAT_ENDURANCE_GROUP_EVENT_CONFIGURATION:
1621 name = "Endurance Group Event Configuration";
1623 case NVME_FEAT_SOFTWARE_PROGRESS_MARKER:
1624 name = "Software Progress Marker";
1626 case NVME_FEAT_HOST_IDENTIFIER:
1627 name = "Host Identifier";
1629 case NVME_FEAT_RESERVATION_NOTIFICATION_MASK:
1630 name = "Reservation Notification Mask";
1632 case NVME_FEAT_RESERVATION_PERSISTENCE:
1633 name = "Reservation Persistence";
1635 case NVME_FEAT_NAMESPACE_WRITE_PROTECTION_CONFIG:
1636 name = "Namespace Write Protection Config";
1647 nvme_feature_invalid_cb(struct pci_nvme_softc *sc __unused,
1648 struct nvme_feature_obj *feat __unused,
1649 struct nvme_command *command __unused,
1650 struct nvme_completion *compl)
1652 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1656 nvme_feature_iv_config(struct pci_nvme_softc *sc,
1657 struct nvme_feature_obj *feat __unused,
1658 struct nvme_command *command,
1659 struct nvme_completion *compl)
1662 uint32_t cdw11 = command->cdw11;
1666 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1668 iv = cdw11 & 0xffff;
1669 cd = cdw11 & (1 << 16);
1671 if (iv > (sc->max_queues + 1)) {
1675 /* No Interrupt Coalescing (i.e. not Coalescing Disable) for Admin Q */
1676 if ((iv == 0) && !cd)
1679 /* Requested Interrupt Vector must be used by a CQ */
1680 for (i = 0; i < sc->num_cqueues + 1; i++) {
1681 if (sc->compl_queues[i].intr_vec == iv) {
1682 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1687 #define NVME_ASYNC_EVENT_ENDURANCE_GROUP (0x4000)
1689 nvme_feature_async_event(struct pci_nvme_softc *sc __unused,
1690 struct nvme_feature_obj *feat __unused,
1691 struct nvme_command *command,
1692 struct nvme_completion *compl)
1694 if (command->cdw11 & NVME_ASYNC_EVENT_ENDURANCE_GROUP)
1695 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1698 #define NVME_TEMP_THRESH_OVER 0
1699 #define NVME_TEMP_THRESH_UNDER 1
1701 nvme_feature_temperature(struct pci_nvme_softc *sc,
1702 struct nvme_feature_obj *feat __unused,
1703 struct nvme_command *command,
1704 struct nvme_completion *compl)
1706 uint16_t tmpth; /* Temperature Threshold */
1707 uint8_t tmpsel; /* Threshold Temperature Select */
1708 uint8_t thsel; /* Threshold Type Select */
1709 bool set_crit = false;
1712 tmpth = command->cdw11 & 0xffff;
1713 tmpsel = (command->cdw11 >> 16) & 0xf;
1714 thsel = (command->cdw11 >> 20) & 0x3;
1716 DPRINTF("%s: tmpth=%#x tmpsel=%#x thsel=%#x", __func__, tmpth, tmpsel, thsel);
1718 /* Check for unsupported values */
1719 if (((tmpsel != 0) && (tmpsel != 0xf)) ||
1720 (thsel > NVME_TEMP_THRESH_UNDER)) {
1721 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1725 if (((thsel == NVME_TEMP_THRESH_OVER) && (NVME_TEMPERATURE >= tmpth)) ||
1726 ((thsel == NVME_TEMP_THRESH_UNDER) && (NVME_TEMPERATURE <= tmpth)))
1729 pthread_mutex_lock(&sc->mtx);
1731 sc->health_log.critical_warning |=
1732 NVME_CRIT_WARN_ST_TEMPERATURE;
1734 sc->health_log.critical_warning &=
1735 ~NVME_CRIT_WARN_ST_TEMPERATURE;
1736 pthread_mutex_unlock(&sc->mtx);
1738 report_crit = sc->feat[NVME_FEAT_ASYNC_EVENT_CONFIGURATION].cdw11 &
1739 NVME_CRIT_WARN_ST_TEMPERATURE;
1741 if (set_crit && report_crit)
1742 pci_nvme_aen_post(sc, PCI_NVME_AE_TYPE_SMART,
1743 sc->health_log.critical_warning);
1745 DPRINTF("%s: set_crit=%c critical_warning=%#x status=%#x", __func__, set_crit ? 'T':'F', sc->health_log.critical_warning, compl->status);
1749 nvme_feature_num_queues(struct pci_nvme_softc *sc,
1750 struct nvme_feature_obj *feat __unused,
1751 struct nvme_command *command,
1752 struct nvme_completion *compl)
1754 uint16_t nqr; /* Number of Queues Requested */
1756 if (sc->num_q_is_set) {
1757 WPRINTF("%s: Number of Queues already set", __func__);
1758 pci_nvme_status_genc(&compl->status,
1759 NVME_SC_COMMAND_SEQUENCE_ERROR);
1763 nqr = command->cdw11 & 0xFFFF;
1764 if (nqr == 0xffff) {
1765 WPRINTF("%s: Illegal NSQR value %#x", __func__, nqr);
1766 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1770 sc->num_squeues = ONE_BASED(nqr);
1771 if (sc->num_squeues > sc->max_queues) {
1772 DPRINTF("NSQR=%u is greater than max %u", sc->num_squeues,
1774 sc->num_squeues = sc->max_queues;
1777 nqr = (command->cdw11 >> 16) & 0xFFFF;
1778 if (nqr == 0xffff) {
1779 WPRINTF("%s: Illegal NCQR value %#x", __func__, nqr);
1780 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1784 sc->num_cqueues = ONE_BASED(nqr);
1785 if (sc->num_cqueues > sc->max_queues) {
1786 DPRINTF("NCQR=%u is greater than max %u", sc->num_cqueues,
1788 sc->num_cqueues = sc->max_queues;
1791 /* Patch the command value which will be saved on callback's return */
1792 command->cdw11 = NVME_FEATURE_NUM_QUEUES(sc);
1793 compl->cdw0 = NVME_FEATURE_NUM_QUEUES(sc);
1795 sc->num_q_is_set = true;
1799 nvme_opc_set_features(struct pci_nvme_softc *sc, struct nvme_command *command,
1800 struct nvme_completion *compl)
1802 struct nvme_feature_obj *feat;
1803 uint32_t nsid = command->nsid;
1804 uint8_t fid = NVMEV(NVME_FEAT_SET_FID, command->cdw10);
1805 bool sv = NVMEV(NVME_FEAT_SET_SV, command->cdw10);
1807 DPRINTF("%s: Feature ID 0x%x (%s)", __func__, fid, nvme_fid_to_name(fid));
1809 if (fid >= NVME_FID_MAX) {
1810 DPRINTF("%s invalid feature 0x%x", __func__, fid);
1811 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1816 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1817 NVME_SC_FEATURE_NOT_SAVEABLE);
1821 feat = &sc->feat[fid];
1823 if (feat->namespace_specific && (nsid == NVME_GLOBAL_NAMESPACE_TAG)) {
1824 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1828 if (!feat->namespace_specific &&
1829 !((nsid == 0) || (nsid == NVME_GLOBAL_NAMESPACE_TAG))) {
1830 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1831 NVME_SC_FEATURE_NOT_NS_SPECIFIC);
1836 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1839 feat->set(sc, feat, command, compl);
1841 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1842 NVME_SC_FEATURE_NOT_CHANGEABLE);
1846 DPRINTF("%s: status=%#x cdw11=%#x", __func__, compl->status, command->cdw11);
1847 if (compl->status == NVME_SC_SUCCESS) {
1848 feat->cdw11 = command->cdw11;
1849 if ((fid == NVME_FEAT_ASYNC_EVENT_CONFIGURATION) &&
1850 (command->cdw11 != 0))
1851 pci_nvme_aen_notify(sc);
1857 #define NVME_FEATURES_SEL_SUPPORTED 0x3
1858 #define NVME_FEATURES_NS_SPECIFIC (1 << 1)
1861 nvme_opc_get_features(struct pci_nvme_softc* sc, struct nvme_command* command,
1862 struct nvme_completion* compl)
1864 struct nvme_feature_obj *feat;
1865 uint8_t fid = command->cdw10 & 0xFF;
1866 uint8_t sel = (command->cdw10 >> 8) & 0x7;
1868 DPRINTF("%s: Feature ID 0x%x (%s)", __func__, fid, nvme_fid_to_name(fid));
1870 if (fid >= NVME_FID_MAX) {
1871 DPRINTF("%s invalid feature 0x%x", __func__, fid);
1872 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1877 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1879 feat = &sc->feat[fid];
1881 feat->get(sc, feat, command, compl);
1884 if (compl->status == NVME_SC_SUCCESS) {
1885 if ((sel == NVME_FEATURES_SEL_SUPPORTED) && feat->namespace_specific)
1886 compl->cdw0 = NVME_FEATURES_NS_SPECIFIC;
1888 compl->cdw0 = feat->cdw11;
1895 nvme_opc_format_nvm(struct pci_nvme_softc* sc, struct nvme_command* command,
1896 struct nvme_completion* compl)
1898 uint8_t ses, lbaf, pi;
1900 /* Only supports Secure Erase Setting - User Data Erase */
1901 ses = (command->cdw10 >> 9) & 0x7;
1903 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1907 /* Only supports a single LBA Format */
1908 lbaf = command->cdw10 & 0xf;
1910 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1911 NVME_SC_INVALID_FORMAT);
1915 /* Doesn't support Protection Information */
1916 pi = (command->cdw10 >> 5) & 0x7;
1918 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
1922 if (sc->nvstore.type == NVME_STOR_RAM) {
1923 if (sc->nvstore.ctx)
1924 free(sc->nvstore.ctx);
1925 sc->nvstore.ctx = calloc(1, sc->nvstore.size);
1926 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1928 struct pci_nvme_ioreq *req;
1931 req = pci_nvme_get_ioreq(sc);
1933 pci_nvme_status_genc(&compl->status,
1934 NVME_SC_INTERNAL_DEVICE_ERROR);
1935 WPRINTF("%s: unable to allocate IO req", __func__);
1938 req->nvme_sq = &sc->submit_queues[0];
1940 req->opc = command->opc;
1941 req->cid = command->cid;
1942 req->nsid = command->nsid;
1944 req->io_req.br_offset = 0;
1945 req->io_req.br_resid = sc->nvstore.size;
1946 req->io_req.br_callback = pci_nvme_io_done;
1948 err = blockif_delete(sc->nvstore.ctx, &req->io_req);
1950 pci_nvme_status_genc(&compl->status,
1951 NVME_SC_INTERNAL_DEVICE_ERROR);
1952 pci_nvme_release_ioreq(sc, req);
1954 compl->status = NVME_NO_STATUS;
1961 nvme_opc_abort(struct pci_nvme_softc *sc __unused, struct nvme_command *command,
1962 struct nvme_completion *compl)
1964 DPRINTF("%s submission queue %u, command ID 0x%x", __func__,
1965 command->cdw10 & 0xFFFF, (command->cdw10 >> 16) & 0xFFFF);
1967 /* TODO: search for the command ID and abort it */
1970 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
1975 nvme_opc_async_event_req(struct pci_nvme_softc* sc,
1976 struct nvme_command* command, struct nvme_completion* compl)
1978 DPRINTF("%s async event request count=%u aerl=%u cid=%#x", __func__,
1979 sc->aer_count, sc->ctrldata.aerl, command->cid);
1981 /* Don't exceed the Async Event Request Limit (AERL). */
1982 if (pci_nvme_aer_limit_reached(sc)) {
1983 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
1984 NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
1988 if (pci_nvme_aer_add(sc, command->cid)) {
1989 pci_nvme_status_tc(&compl->status, NVME_SCT_GENERIC,
1990 NVME_SC_INTERNAL_DEVICE_ERROR);
1995 * Raise events when they happen based on the Set Features cmd.
1996 * These events happen async, so only set completion successful if
1997 * there is an event reflective of the request to get event.
1999 compl->status = NVME_NO_STATUS;
2000 pci_nvme_aen_notify(sc);
2006 pci_nvme_handle_admin_cmd(struct pci_nvme_softc* sc, uint64_t value)
2008 struct nvme_completion compl;
2009 struct nvme_command *cmd;
2010 struct nvme_submission_queue *sq;
2011 struct nvme_completion_queue *cq;
2014 DPRINTF("%s index %u", __func__, (uint32_t)value);
2016 sq = &sc->submit_queues[0];
2017 cq = &sc->compl_queues[0];
2019 pthread_mutex_lock(&sq->mtx);
2022 DPRINTF("sqhead %u, tail %u", sqhead, sq->tail);
2024 while (sqhead != atomic_load_acq_short(&sq->tail)) {
2025 cmd = &(sq->qbase)[sqhead];
2030 case NVME_OPC_DELETE_IO_SQ:
2031 DPRINTF("%s command DELETE_IO_SQ", __func__);
2032 nvme_opc_delete_io_sq(sc, cmd, &compl);
2034 case NVME_OPC_CREATE_IO_SQ:
2035 DPRINTF("%s command CREATE_IO_SQ", __func__);
2036 nvme_opc_create_io_sq(sc, cmd, &compl);
2038 case NVME_OPC_DELETE_IO_CQ:
2039 DPRINTF("%s command DELETE_IO_CQ", __func__);
2040 nvme_opc_delete_io_cq(sc, cmd, &compl);
2042 case NVME_OPC_CREATE_IO_CQ:
2043 DPRINTF("%s command CREATE_IO_CQ", __func__);
2044 nvme_opc_create_io_cq(sc, cmd, &compl);
2046 case NVME_OPC_GET_LOG_PAGE:
2047 DPRINTF("%s command GET_LOG_PAGE", __func__);
2048 nvme_opc_get_log_page(sc, cmd, &compl);
2050 case NVME_OPC_IDENTIFY:
2051 DPRINTF("%s command IDENTIFY", __func__);
2052 nvme_opc_identify(sc, cmd, &compl);
2054 case NVME_OPC_ABORT:
2055 DPRINTF("%s command ABORT", __func__);
2056 nvme_opc_abort(sc, cmd, &compl);
2058 case NVME_OPC_SET_FEATURES:
2059 DPRINTF("%s command SET_FEATURES", __func__);
2060 nvme_opc_set_features(sc, cmd, &compl);
2062 case NVME_OPC_GET_FEATURES:
2063 DPRINTF("%s command GET_FEATURES", __func__);
2064 nvme_opc_get_features(sc, cmd, &compl);
2066 case NVME_OPC_FIRMWARE_ACTIVATE:
2067 DPRINTF("%s command FIRMWARE_ACTIVATE", __func__);
2068 pci_nvme_status_tc(&compl.status,
2069 NVME_SCT_COMMAND_SPECIFIC,
2070 NVME_SC_INVALID_FIRMWARE_SLOT);
2072 case NVME_OPC_ASYNC_EVENT_REQUEST:
2073 DPRINTF("%s command ASYNC_EVENT_REQ", __func__);
2074 nvme_opc_async_event_req(sc, cmd, &compl);
2076 case NVME_OPC_FORMAT_NVM:
2077 DPRINTF("%s command FORMAT_NVM", __func__);
2078 if ((sc->ctrldata.oacs &
2079 (1 << NVME_CTRLR_DATA_OACS_FORMAT_SHIFT)) == 0) {
2080 pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_OPCODE);
2083 nvme_opc_format_nvm(sc, cmd, &compl);
2085 case NVME_OPC_SECURITY_SEND:
2086 case NVME_OPC_SECURITY_RECEIVE:
2087 case NVME_OPC_SANITIZE:
2088 case NVME_OPC_GET_LBA_STATUS:
2089 DPRINTF("%s command OPC=%#x (unsupported)", __func__,
2091 /* Valid but unsupported opcodes */
2092 pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_FIELD);
2095 DPRINTF("%s command OPC=%#X (not implemented)",
2098 pci_nvme_status_genc(&compl.status, NVME_SC_INVALID_OPCODE);
2100 sqhead = (sqhead + 1) % sq->size;
2102 if (NVME_COMPLETION_VALID(compl)) {
2103 pci_nvme_cq_update(sc, &sc->compl_queues[0],
2111 DPRINTF("setting sqhead %u", sqhead);
2114 if (cq->head != cq->tail)
2115 pci_generate_msix(sc->nsc_pi, 0);
2117 pthread_mutex_unlock(&sq->mtx);
2121 * Update the Write and Read statistics reported in SMART data
2123 * NVMe defines "data unit" as thousand's of 512 byte blocks and is rounded up.
2124 * E.g. 1 data unit is 1 - 1,000 512 byte blocks. 3 data units are 2,001 - 3,000
2125 * 512 byte blocks. Rounding up is achieved by initializing the remainder to 999.
2128 pci_nvme_stats_write_read_update(struct pci_nvme_softc *sc, uint8_t opc,
2129 size_t bytes, uint16_t status)
2132 pthread_mutex_lock(&sc->mtx);
2134 case NVME_OPC_WRITE:
2135 sc->write_commands++;
2136 if (status != NVME_SC_SUCCESS)
2138 sc->write_dunits_remainder += (bytes / 512);
2139 while (sc->write_dunits_remainder >= 1000) {
2140 sc->write_data_units++;
2141 sc->write_dunits_remainder -= 1000;
2145 sc->read_commands++;
2146 if (status != NVME_SC_SUCCESS)
2148 sc->read_dunits_remainder += (bytes / 512);
2149 while (sc->read_dunits_remainder >= 1000) {
2150 sc->read_data_units++;
2151 sc->read_dunits_remainder -= 1000;
2155 DPRINTF("%s: Invalid OPC 0x%02x for stats", __func__, opc);
2158 pthread_mutex_unlock(&sc->mtx);
2162 * Check if the combination of Starting LBA (slba) and number of blocks
2163 * exceeds the range of the underlying storage.
2165 * Because NVMe specifies the SLBA in blocks as a uint64_t and blockif stores
2166 * the capacity in bytes as a uint64_t, care must be taken to avoid integer
2170 pci_nvme_out_of_range(struct pci_nvme_blockstore *nvstore, uint64_t slba,
2173 size_t offset, bytes;
2175 /* Overflow check of multiplying Starting LBA by the sector size */
2176 if (slba >> (64 - nvstore->sectsz_bits))
2179 offset = slba << nvstore->sectsz_bits;
2180 bytes = nblocks << nvstore->sectsz_bits;
2182 /* Overflow check of Number of Logical Blocks */
2183 if ((nvstore->size <= offset) || ((nvstore->size - offset) < bytes))
2190 pci_nvme_append_iov_req(struct pci_nvme_softc *sc __unused,
2191 struct pci_nvme_ioreq *req, uint64_t gpaddr, size_t size, uint64_t offset)
2194 bool range_is_contiguous;
2199 if (req->io_req.br_iovcnt == NVME_MAX_IOVEC) {
2204 * Minimize the number of IOVs by concatenating contiguous address
2205 * ranges. If the IOV count is zero, there is no previous range to
2208 if (req->io_req.br_iovcnt == 0)
2209 range_is_contiguous = false;
2211 range_is_contiguous = (req->prev_gpaddr + req->prev_size) == gpaddr;
2213 if (range_is_contiguous) {
2214 iovidx = req->io_req.br_iovcnt - 1;
2216 req->io_req.br_iov[iovidx].iov_base =
2217 paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
2218 req->prev_gpaddr, size);
2219 if (req->io_req.br_iov[iovidx].iov_base == NULL)
2222 req->prev_size += size;
2223 req->io_req.br_resid += size;
2225 req->io_req.br_iov[iovidx].iov_len = req->prev_size;
2227 iovidx = req->io_req.br_iovcnt;
2229 req->io_req.br_offset = offset;
2230 req->io_req.br_resid = 0;
2231 req->io_req.br_param = req;
2234 req->io_req.br_iov[iovidx].iov_base =
2235 paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
2237 if (req->io_req.br_iov[iovidx].iov_base == NULL)
2240 req->io_req.br_iov[iovidx].iov_len = size;
2242 req->prev_gpaddr = gpaddr;
2243 req->prev_size = size;
2244 req->io_req.br_resid += size;
2246 req->io_req.br_iovcnt++;
2253 pci_nvme_set_completion(struct pci_nvme_softc *sc,
2254 struct nvme_submission_queue *sq, int sqid, uint16_t cid, uint16_t status)
2256 struct nvme_completion_queue *cq = &sc->compl_queues[sq->cqid];
2258 DPRINTF("%s sqid %d cqid %u cid %u status: 0x%x 0x%x",
2259 __func__, sqid, sq->cqid, cid, NVME_STATUS_GET_SCT(status),
2260 NVME_STATUS_GET_SC(status));
2262 pci_nvme_cq_update(sc, cq, 0, cid, sqid, status);
2264 if (cq->head != cq->tail) {
2265 if (cq->intr_en & NVME_CQ_INTEN) {
2266 pci_generate_msix(sc->nsc_pi, cq->intr_vec);
2268 DPRINTF("%s: CQ%u interrupt disabled",
2269 __func__, sq->cqid);
2275 pci_nvme_release_ioreq(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req)
2278 req->nvme_sq = NULL;
2281 pthread_mutex_lock(&sc->mtx);
2283 STAILQ_INSERT_TAIL(&sc->ioreqs_free, req, link);
2286 /* when no more IO pending, can set to ready if device reset/enabled */
2287 if (sc->pending_ios == 0 &&
2288 NVME_CC_GET_EN(sc->regs.cc) && !(NVME_CSTS_GET_RDY(sc->regs.csts)))
2289 sc->regs.csts |= NVME_CSTS_RDY;
2291 pthread_mutex_unlock(&sc->mtx);
2293 sem_post(&sc->iosemlock);
2296 static struct pci_nvme_ioreq *
2297 pci_nvme_get_ioreq(struct pci_nvme_softc *sc)
2299 struct pci_nvme_ioreq *req = NULL;
2301 sem_wait(&sc->iosemlock);
2302 pthread_mutex_lock(&sc->mtx);
2304 req = STAILQ_FIRST(&sc->ioreqs_free);
2305 assert(req != NULL);
2306 STAILQ_REMOVE_HEAD(&sc->ioreqs_free, link);
2312 pthread_mutex_unlock(&sc->mtx);
2314 req->io_req.br_iovcnt = 0;
2315 req->io_req.br_offset = 0;
2316 req->io_req.br_resid = 0;
2317 req->io_req.br_param = req;
2318 req->prev_gpaddr = 0;
2325 pci_nvme_io_done(struct blockif_req *br, int err)
2327 struct pci_nvme_ioreq *req = br->br_param;
2328 struct nvme_submission_queue *sq = req->nvme_sq;
2329 uint16_t code, status;
2331 DPRINTF("%s error %d %s", __func__, err, strerror(err));
2333 /* TODO return correct error */
2334 code = err ? NVME_SC_DATA_TRANSFER_ERROR : NVME_SC_SUCCESS;
2336 pci_nvme_status_genc(&status, code);
2338 pci_nvme_set_completion(req->sc, sq, req->sqid, req->cid, status);
2339 pci_nvme_stats_write_read_update(req->sc, req->opc,
2340 req->bytes, status);
2341 pci_nvme_release_ioreq(req->sc, req);
2345 * Implements the Flush command. The specification states:
2346 * If a volatile write cache is not present, Flush commands complete
2347 * successfully and have no effect
2348 * in the description of the Volatile Write Cache (VWC) field of the Identify
2349 * Controller data. Therefore, set status to Success if the command is
2350 * not supported (i.e. RAM or as indicated by the blockif).
2353 nvme_opc_flush(struct pci_nvme_softc *sc __unused,
2354 struct nvme_command *cmd __unused,
2355 struct pci_nvme_blockstore *nvstore,
2356 struct pci_nvme_ioreq *req,
2359 bool pending = false;
2361 if (nvstore->type == NVME_STOR_RAM) {
2362 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2366 req->io_req.br_callback = pci_nvme_io_done;
2368 err = blockif_flush(nvstore->ctx, &req->io_req);
2374 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2377 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2385 nvme_write_read_ram(struct pci_nvme_softc *sc,
2386 struct pci_nvme_blockstore *nvstore,
2387 uint64_t prp1, uint64_t prp2,
2388 size_t offset, uint64_t bytes,
2391 uint8_t *buf = nvstore->ctx;
2392 enum nvme_copy_dir dir;
2396 dir = NVME_COPY_TO_PRP;
2398 dir = NVME_COPY_FROM_PRP;
2401 if (nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, prp1, prp2,
2402 buf + offset, bytes, dir))
2403 pci_nvme_status_genc(&status,
2404 NVME_SC_DATA_TRANSFER_ERROR);
2406 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2412 nvme_write_read_blockif(struct pci_nvme_softc *sc,
2413 struct pci_nvme_blockstore *nvstore,
2414 struct pci_nvme_ioreq *req,
2415 uint64_t prp1, uint64_t prp2,
2416 size_t offset, uint64_t bytes,
2421 uint16_t status = NVME_NO_STATUS;
2423 size = MIN(PAGE_SIZE - (prp1 % PAGE_SIZE), bytes);
2424 if (pci_nvme_append_iov_req(sc, req, prp1, size, offset)) {
2434 } else if (bytes <= PAGE_SIZE) {
2436 if (pci_nvme_append_iov_req(sc, req, prp2, size, offset)) {
2441 void *vmctx = sc->nsc_pi->pi_vmctx;
2442 uint64_t *prp_list = &prp2;
2443 uint64_t *last = prp_list;
2445 /* PRP2 is pointer to a physical region page list */
2447 /* Last entry in list points to the next list */
2448 if ((prp_list == last) && (bytes > PAGE_SIZE)) {
2449 uint64_t prp = *prp_list;
2451 prp_list = paddr_guest2host(vmctx, prp,
2452 PAGE_SIZE - (prp % PAGE_SIZE));
2453 if (prp_list == NULL) {
2457 last = prp_list + (NVME_PRP2_ITEMS - 1);
2460 size = MIN(bytes, PAGE_SIZE);
2462 if (pci_nvme_append_iov_req(sc, req, *prp_list, size,
2474 req->io_req.br_callback = pci_nvme_io_done;
2476 err = blockif_write(nvstore->ctx, &req->io_req);
2478 err = blockif_read(nvstore->ctx, &req->io_req);
2481 pci_nvme_status_genc(&status, NVME_SC_DATA_TRANSFER_ERROR);
2487 nvme_opc_write_read(struct pci_nvme_softc *sc,
2488 struct nvme_command *cmd,
2489 struct pci_nvme_blockstore *nvstore,
2490 struct pci_nvme_ioreq *req,
2493 uint64_t lba, nblocks, bytes;
2495 bool is_write = cmd->opc == NVME_OPC_WRITE;
2496 bool pending = false;
2498 lba = ((uint64_t)cmd->cdw11 << 32) | cmd->cdw10;
2499 nblocks = (cmd->cdw12 & 0xFFFF) + 1;
2500 bytes = nblocks << nvstore->sectsz_bits;
2501 if (bytes > NVME_MAX_DATA_SIZE) {
2502 WPRINTF("%s command would exceed MDTS", __func__);
2503 pci_nvme_status_genc(status, NVME_SC_INVALID_FIELD);
2507 if (pci_nvme_out_of_range(nvstore, lba, nblocks)) {
2508 WPRINTF("%s command would exceed LBA range(slba=%#lx nblocks=%#lx)",
2509 __func__, lba, nblocks);
2510 pci_nvme_status_genc(status, NVME_SC_LBA_OUT_OF_RANGE);
2514 offset = lba << nvstore->sectsz_bits;
2517 req->io_req.br_offset = lba;
2519 /* PRP bits 1:0 must be zero */
2520 cmd->prp1 &= ~0x3UL;
2521 cmd->prp2 &= ~0x3UL;
2523 if (nvstore->type == NVME_STOR_RAM) {
2524 *status = nvme_write_read_ram(sc, nvstore, cmd->prp1,
2525 cmd->prp2, offset, bytes, is_write);
2527 *status = nvme_write_read_blockif(sc, nvstore, req,
2528 cmd->prp1, cmd->prp2, offset, bytes, is_write);
2530 if (*status == NVME_NO_STATUS)
2535 pci_nvme_stats_write_read_update(sc, cmd->opc, bytes, *status);
2541 pci_nvme_dealloc_sm(struct blockif_req *br, int err)
2543 struct pci_nvme_ioreq *req = br->br_param;
2544 struct pci_nvme_softc *sc = req->sc;
2550 pci_nvme_status_genc(&status, NVME_SC_INTERNAL_DEVICE_ERROR);
2551 } else if ((req->prev_gpaddr + 1) == (req->prev_size)) {
2552 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2554 struct iovec *iov = req->io_req.br_iov;
2557 iov += req->prev_gpaddr;
2559 /* The iov_* values already include the sector size */
2560 req->io_req.br_offset = (off_t)iov->iov_base;
2561 req->io_req.br_resid = iov->iov_len;
2562 if (blockif_delete(sc->nvstore.ctx, &req->io_req)) {
2563 pci_nvme_status_genc(&status,
2564 NVME_SC_INTERNAL_DEVICE_ERROR);
2570 pci_nvme_set_completion(sc, req->nvme_sq, req->sqid, req->cid,
2572 pci_nvme_release_ioreq(sc, req);
2577 nvme_opc_dataset_mgmt(struct pci_nvme_softc *sc,
2578 struct nvme_command *cmd,
2579 struct pci_nvme_blockstore *nvstore,
2580 struct pci_nvme_ioreq *req,
2583 struct nvme_dsm_range *range = NULL;
2584 uint32_t nr, r, non_zero, dr;
2586 bool pending = false;
2588 if ((sc->ctrldata.oncs & NVME_ONCS_DSM) == 0) {
2589 pci_nvme_status_genc(status, NVME_SC_INVALID_OPCODE);
2593 nr = cmd->cdw10 & 0xff;
2595 /* copy locally because a range entry could straddle PRPs */
2596 range = calloc(1, NVME_MAX_DSM_TRIM);
2597 if (range == NULL) {
2598 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2601 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, cmd->prp1, cmd->prp2,
2602 (uint8_t *)range, NVME_MAX_DSM_TRIM, NVME_COPY_FROM_PRP);
2604 /* Check for invalid ranges and the number of non-zero lengths */
2606 for (r = 0; r <= nr; r++) {
2607 if (pci_nvme_out_of_range(nvstore,
2608 range[r].starting_lba, range[r].length)) {
2609 pci_nvme_status_genc(status, NVME_SC_LBA_OUT_OF_RANGE);
2612 if (range[r].length != 0)
2616 if (cmd->cdw11 & NVME_DSM_ATTR_DEALLOCATE) {
2617 size_t offset, bytes;
2618 int sectsz_bits = sc->nvstore.sectsz_bits;
2621 * DSM calls are advisory only, and compliant controllers
2622 * may choose to take no actions (i.e. return Success).
2624 if (!nvstore->deallocate) {
2625 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2629 /* If all ranges have a zero length, return Success */
2630 if (non_zero == 0) {
2631 pci_nvme_status_genc(status, NVME_SC_SUCCESS);
2636 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2640 offset = range[0].starting_lba << sectsz_bits;
2641 bytes = range[0].length << sectsz_bits;
2644 * If the request is for more than a single range, store
2645 * the ranges in the br_iov. Optimize for the common case
2646 * of a single range.
2648 * Note that NVMe Number of Ranges is a zero based value
2650 req->io_req.br_iovcnt = 0;
2651 req->io_req.br_offset = offset;
2652 req->io_req.br_resid = bytes;
2655 req->io_req.br_callback = pci_nvme_io_done;
2657 struct iovec *iov = req->io_req.br_iov;
2659 for (r = 0, dr = 0; r <= nr; r++) {
2660 offset = range[r].starting_lba << sectsz_bits;
2661 bytes = range[r].length << sectsz_bits;
2665 if ((nvstore->size - offset) < bytes) {
2666 pci_nvme_status_genc(status,
2667 NVME_SC_LBA_OUT_OF_RANGE);
2670 iov[dr].iov_base = (void *)offset;
2671 iov[dr].iov_len = bytes;
2674 req->io_req.br_callback = pci_nvme_dealloc_sm;
2677 * Use prev_gpaddr to track the current entry and
2678 * prev_size to track the number of entries
2680 req->prev_gpaddr = 0;
2681 req->prev_size = dr;
2684 err = blockif_delete(nvstore->ctx, &req->io_req);
2686 pci_nvme_status_genc(status, NVME_SC_INTERNAL_DEVICE_ERROR);
2696 pci_nvme_handle_io_cmd(struct pci_nvme_softc* sc, uint16_t idx)
2698 struct nvme_submission_queue *sq;
2702 /* handle all submissions up to sq->tail index */
2703 sq = &sc->submit_queues[idx];
2705 pthread_mutex_lock(&sq->mtx);
2708 DPRINTF("nvme_handle_io qid %u head %u tail %u cmdlist %p",
2709 idx, sqhead, sq->tail, sq->qbase);
2711 while (sqhead != atomic_load_acq_short(&sq->tail)) {
2712 struct nvme_command *cmd;
2713 struct pci_nvme_ioreq *req;
2721 cmd = &sq->qbase[sqhead];
2722 sqhead = (sqhead + 1) % sq->size;
2724 nsid = le32toh(cmd->nsid);
2725 if ((nsid == 0) || (nsid > sc->ctrldata.nn)) {
2726 pci_nvme_status_genc(&status,
2727 NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
2729 NVME_STATUS_DNR_MASK << NVME_STATUS_DNR_SHIFT;
2733 req = pci_nvme_get_ioreq(sc);
2735 pci_nvme_status_genc(&status,
2736 NVME_SC_INTERNAL_DEVICE_ERROR);
2737 WPRINTF("%s: unable to allocate IO req", __func__);
2742 req->opc = cmd->opc;
2743 req->cid = cmd->cid;
2744 req->nsid = cmd->nsid;
2747 case NVME_OPC_FLUSH:
2748 pending = nvme_opc_flush(sc, cmd, &sc->nvstore,
2751 case NVME_OPC_WRITE:
2753 pending = nvme_opc_write_read(sc, cmd, &sc->nvstore,
2756 case NVME_OPC_WRITE_ZEROES:
2757 /* TODO: write zeroes
2758 WPRINTF("%s write zeroes lba 0x%lx blocks %u",
2759 __func__, lba, cmd->cdw12 & 0xFFFF); */
2760 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
2762 case NVME_OPC_DATASET_MANAGEMENT:
2763 pending = nvme_opc_dataset_mgmt(sc, cmd, &sc->nvstore,
2767 WPRINTF("%s unhandled io command 0x%x",
2768 __func__, cmd->opc);
2769 pci_nvme_status_genc(&status, NVME_SC_INVALID_OPCODE);
2773 pci_nvme_set_completion(sc, sq, idx, cmd->cid, status);
2775 pci_nvme_release_ioreq(sc, req);
2781 pthread_mutex_unlock(&sq->mtx);
2785 pci_nvme_handle_doorbell(struct pci_nvme_softc* sc,
2786 uint64_t idx, int is_sq, uint64_t value)
2788 DPRINTF("nvme doorbell %lu, %s, val 0x%lx",
2789 idx, is_sq ? "SQ" : "CQ", value & 0xFFFF);
2792 if (idx > sc->num_squeues) {
2793 WPRINTF("%s queue index %lu overflow from "
2795 __func__, idx, sc->num_squeues);
2799 atomic_store_short(&sc->submit_queues[idx].tail,
2803 pci_nvme_handle_admin_cmd(sc, value);
2805 /* submission queue; handle new entries in SQ */
2806 if (idx > sc->num_squeues) {
2807 WPRINTF("%s SQ index %lu overflow from "
2809 __func__, idx, sc->num_squeues);
2812 pci_nvme_handle_io_cmd(sc, (uint16_t)idx);
2815 if (idx > sc->num_cqueues) {
2816 WPRINTF("%s queue index %lu overflow from "
2818 __func__, idx, sc->num_cqueues);
2822 atomic_store_short(&sc->compl_queues[idx].head,
2828 pci_nvme_bar0_reg_dumps(const char *func, uint64_t offset, int iswrite)
2830 const char *s = iswrite ? "WRITE" : "READ";
2833 case NVME_CR_CAP_LOW:
2834 DPRINTF("%s %s NVME_CR_CAP_LOW", func, s);
2836 case NVME_CR_CAP_HI:
2837 DPRINTF("%s %s NVME_CR_CAP_HI", func, s);
2840 DPRINTF("%s %s NVME_CR_VS", func, s);
2843 DPRINTF("%s %s NVME_CR_INTMS", func, s);
2846 DPRINTF("%s %s NVME_CR_INTMC", func, s);
2849 DPRINTF("%s %s NVME_CR_CC", func, s);
2852 DPRINTF("%s %s NVME_CR_CSTS", func, s);
2855 DPRINTF("%s %s NVME_CR_NSSR", func, s);
2858 DPRINTF("%s %s NVME_CR_AQA", func, s);
2860 case NVME_CR_ASQ_LOW:
2861 DPRINTF("%s %s NVME_CR_ASQ_LOW", func, s);
2863 case NVME_CR_ASQ_HI:
2864 DPRINTF("%s %s NVME_CR_ASQ_HI", func, s);
2866 case NVME_CR_ACQ_LOW:
2867 DPRINTF("%s %s NVME_CR_ACQ_LOW", func, s);
2869 case NVME_CR_ACQ_HI:
2870 DPRINTF("%s %s NVME_CR_ACQ_HI", func, s);
2873 DPRINTF("unknown nvme bar-0 offset 0x%lx", offset);
2879 pci_nvme_write_bar_0(struct pci_nvme_softc *sc, uint64_t offset, int size,
2884 if (offset >= NVME_DOORBELL_OFFSET) {
2885 uint64_t belloffset = offset - NVME_DOORBELL_OFFSET;
2886 uint64_t idx = belloffset / 8; /* door bell size = 2*int */
2887 int is_sq = (belloffset % 8) < 4;
2889 if ((sc->regs.csts & NVME_CSTS_RDY) == 0) {
2890 WPRINTF("doorbell write prior to RDY (offset=%#lx)\n",
2895 if (belloffset > ((sc->max_queues+1) * 8 - 4)) {
2896 WPRINTF("guest attempted an overflow write offset "
2897 "0x%lx, val 0x%lx in %s",
2898 offset, value, __func__);
2903 if (sc->submit_queues[idx].qbase == NULL)
2905 } else if (sc->compl_queues[idx].qbase == NULL)
2908 pci_nvme_handle_doorbell(sc, idx, is_sq, value);
2912 DPRINTF("nvme-write offset 0x%lx, size %d, value 0x%lx",
2913 offset, size, value);
2916 WPRINTF("guest wrote invalid size %d (offset 0x%lx, "
2917 "val 0x%lx) to bar0 in %s",
2918 size, offset, value, __func__);
2919 /* TODO: shutdown device */
2923 pci_nvme_bar0_reg_dumps(__func__, offset, 1);
2925 pthread_mutex_lock(&sc->mtx);
2928 case NVME_CR_CAP_LOW:
2929 case NVME_CR_CAP_HI:
2936 /* MSI-X, so ignore */
2939 /* MSI-X, so ignore */
2942 ccreg = (uint32_t)value;
2944 DPRINTF("%s NVME_CR_CC en %x css %x shn %x iosqes %u "
2947 NVME_CC_GET_EN(ccreg), NVME_CC_GET_CSS(ccreg),
2948 NVME_CC_GET_SHN(ccreg), NVME_CC_GET_IOSQES(ccreg),
2949 NVME_CC_GET_IOCQES(ccreg));
2951 if (NVME_CC_GET_SHN(ccreg)) {
2952 /* perform shutdown - flush out data to backend */
2953 sc->regs.csts &= ~(NVME_CSTS_REG_SHST_MASK <<
2954 NVME_CSTS_REG_SHST_SHIFT);
2955 sc->regs.csts |= NVME_SHST_COMPLETE <<
2956 NVME_CSTS_REG_SHST_SHIFT;
2958 if (NVME_CC_GET_EN(ccreg) != NVME_CC_GET_EN(sc->regs.cc)) {
2959 if (NVME_CC_GET_EN(ccreg) == 0)
2960 /* transition 1-> causes controller reset */
2961 pci_nvme_reset_locked(sc);
2963 pci_nvme_init_controller(sc);
2966 /* Insert the iocqes, iosqes and en bits from the write */
2967 sc->regs.cc &= ~NVME_CC_WRITE_MASK;
2968 sc->regs.cc |= ccreg & NVME_CC_WRITE_MASK;
2969 if (NVME_CC_GET_EN(ccreg) == 0) {
2970 /* Insert the ams, mps and css bit fields */
2971 sc->regs.cc &= ~NVME_CC_NEN_WRITE_MASK;
2972 sc->regs.cc |= ccreg & NVME_CC_NEN_WRITE_MASK;
2973 sc->regs.csts &= ~NVME_CSTS_RDY;
2974 } else if ((sc->pending_ios == 0) &&
2975 !(sc->regs.csts & NVME_CSTS_CFS)) {
2976 sc->regs.csts |= NVME_CSTS_RDY;
2982 /* ignore writes; don't support subsystem reset */
2985 sc->regs.aqa = (uint32_t)value;
2987 case NVME_CR_ASQ_LOW:
2988 sc->regs.asq = (sc->regs.asq & (0xFFFFFFFF00000000)) |
2989 (0xFFFFF000 & value);
2991 case NVME_CR_ASQ_HI:
2992 sc->regs.asq = (sc->regs.asq & (0x00000000FFFFFFFF)) |
2995 case NVME_CR_ACQ_LOW:
2996 sc->regs.acq = (sc->regs.acq & (0xFFFFFFFF00000000)) |
2997 (0xFFFFF000 & value);
2999 case NVME_CR_ACQ_HI:
3000 sc->regs.acq = (sc->regs.acq & (0x00000000FFFFFFFF)) |
3004 DPRINTF("%s unknown offset 0x%lx, value 0x%lx size %d",
3005 __func__, offset, value, size);
3007 pthread_mutex_unlock(&sc->mtx);
3011 pci_nvme_write(struct pci_devinst *pi, int baridx, uint64_t offset, int size,
3014 struct pci_nvme_softc* sc = pi->pi_arg;
3016 if (baridx == pci_msix_table_bar(pi) ||
3017 baridx == pci_msix_pba_bar(pi)) {
3018 DPRINTF("nvme-write baridx %d, msix: off 0x%lx, size %d, "
3019 " value 0x%lx", baridx, offset, size, value);
3021 pci_emul_msix_twrite(pi, offset, size, value);
3027 pci_nvme_write_bar_0(sc, offset, size, value);
3031 DPRINTF("%s unknown baridx %d, val 0x%lx",
3032 __func__, baridx, value);
3036 static uint64_t pci_nvme_read_bar_0(struct pci_nvme_softc* sc,
3037 uint64_t offset, int size)
3041 pci_nvme_bar0_reg_dumps(__func__, offset, 0);
3043 if (offset < NVME_DOORBELL_OFFSET) {
3044 void *p = &(sc->regs);
3045 pthread_mutex_lock(&sc->mtx);
3046 memcpy(&value, (void *)((uintptr_t)p + offset), size);
3047 pthread_mutex_unlock(&sc->mtx);
3050 WPRINTF("pci_nvme: read invalid offset %ld", offset);
3061 value &= 0xFFFFFFFF;
3065 DPRINTF(" nvme-read offset 0x%lx, size %d -> value 0x%x",
3066 offset, size, (uint32_t)value);
3074 pci_nvme_read(struct pci_devinst *pi, int baridx, uint64_t offset, int size)
3076 struct pci_nvme_softc* sc = pi->pi_arg;
3078 if (baridx == pci_msix_table_bar(pi) ||
3079 baridx == pci_msix_pba_bar(pi)) {
3080 DPRINTF("nvme-read bar: %d, msix: regoff 0x%lx, size %d",
3081 baridx, offset, size);
3083 return pci_emul_msix_tread(pi, offset, size);
3088 return pci_nvme_read_bar_0(sc, offset, size);
3091 DPRINTF("unknown bar %d, 0x%lx", baridx, offset);
3098 pci_nvme_parse_config(struct pci_nvme_softc *sc, nvlist_t *nvl)
3100 char bident[sizeof("XXX:XXX")];
3104 sc->max_queues = NVME_QUEUES;
3105 sc->max_qentries = NVME_MAX_QENTRIES;
3106 sc->ioslots = NVME_IOSLOTS;
3107 sc->num_squeues = sc->max_queues;
3108 sc->num_cqueues = sc->max_queues;
3109 sc->dataset_management = NVME_DATASET_MANAGEMENT_AUTO;
3111 snprintf(sc->ctrldata.sn, sizeof(sc->ctrldata.sn),
3112 "NVME-%d-%d", sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
3114 value = get_config_value_node(nvl, "maxq");
3116 sc->max_queues = atoi(value);
3117 value = get_config_value_node(nvl, "qsz");
3118 if (value != NULL) {
3119 sc->max_qentries = atoi(value);
3120 if (sc->max_qentries <= 0) {
3121 EPRINTLN("nvme: Invalid qsz option %d",
3126 value = get_config_value_node(nvl, "ioslots");
3127 if (value != NULL) {
3128 sc->ioslots = atoi(value);
3129 if (sc->ioslots <= 0) {
3130 EPRINTLN("Invalid ioslots option %d", sc->ioslots);
3134 value = get_config_value_node(nvl, "sectsz");
3136 sectsz = atoi(value);
3137 value = get_config_value_node(nvl, "ser");
3138 if (value != NULL) {
3140 * This field indicates the Product Serial Number in
3141 * 7-bit ASCII, unused bytes should be space characters.
3144 cpywithpad((char *)sc->ctrldata.sn,
3145 sizeof(sc->ctrldata.sn), value, ' ');
3147 value = get_config_value_node(nvl, "eui64");
3149 sc->nvstore.eui64 = htobe64(strtoull(value, NULL, 0));
3150 value = get_config_value_node(nvl, "dsm");
3151 if (value != NULL) {
3152 if (strcmp(value, "auto") == 0)
3153 sc->dataset_management = NVME_DATASET_MANAGEMENT_AUTO;
3154 else if (strcmp(value, "enable") == 0)
3155 sc->dataset_management = NVME_DATASET_MANAGEMENT_ENABLE;
3156 else if (strcmp(value, "disable") == 0)
3157 sc->dataset_management = NVME_DATASET_MANAGEMENT_DISABLE;
3160 value = get_config_value_node(nvl, "bootindex");
3161 if (value != NULL) {
3162 if (pci_emul_add_boot_device(sc->nsc_pi, atoi(value))) {
3163 EPRINTLN("Invalid bootindex %d", atoi(value));
3168 value = get_config_value_node(nvl, "ram");
3169 if (value != NULL) {
3170 uint64_t sz = strtoull(value, NULL, 10);
3172 sc->nvstore.type = NVME_STOR_RAM;
3173 sc->nvstore.size = sz * 1024 * 1024;
3174 sc->nvstore.ctx = calloc(1, sc->nvstore.size);
3175 sc->nvstore.sectsz = 4096;
3176 sc->nvstore.sectsz_bits = 12;
3177 if (sc->nvstore.ctx == NULL) {
3178 EPRINTLN("nvme: Unable to allocate RAM");
3182 snprintf(bident, sizeof(bident), "%u:%u",
3183 sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
3184 sc->nvstore.ctx = blockif_open(nvl, bident);
3185 if (sc->nvstore.ctx == NULL) {
3186 EPRINTLN("nvme: Could not open backing file: %s",
3190 sc->nvstore.type = NVME_STOR_BLOCKIF;
3191 sc->nvstore.size = blockif_size(sc->nvstore.ctx);
3194 if (sectsz == 512 || sectsz == 4096 || sectsz == 8192)
3195 sc->nvstore.sectsz = sectsz;
3196 else if (sc->nvstore.type != NVME_STOR_RAM)
3197 sc->nvstore.sectsz = blockif_sectsz(sc->nvstore.ctx);
3198 for (sc->nvstore.sectsz_bits = 9;
3199 (1U << sc->nvstore.sectsz_bits) < sc->nvstore.sectsz;
3200 sc->nvstore.sectsz_bits++);
3202 if (sc->max_queues <= 0 || sc->max_queues > NVME_QUEUES)
3203 sc->max_queues = NVME_QUEUES;
3209 pci_nvme_resized(struct blockif_ctxt *bctxt __unused, void *arg,
3212 struct pci_nvme_softc *sc;
3213 struct pci_nvme_blockstore *nvstore;
3214 struct nvme_namespace_data *nd;
3217 nvstore = &sc->nvstore;
3220 nvstore->size = new_size;
3221 pci_nvme_init_nsdata_size(nvstore, nd);
3223 /* Add changed NSID to list */
3224 sc->ns_log.ns[0] = 1;
3225 sc->ns_log.ns[1] = 0;
3227 pci_nvme_aen_post(sc, PCI_NVME_AE_TYPE_NOTICE,
3228 PCI_NVME_AEI_NOTICE_NS_ATTR_CHANGED);
3232 pci_nvme_init(struct pci_devinst *pi, nvlist_t *nvl)
3234 struct pci_nvme_softc *sc;
3235 uint32_t pci_membar_sz;
3240 sc = calloc(1, sizeof(struct pci_nvme_softc));
3244 error = pci_nvme_parse_config(sc, nvl);
3250 STAILQ_INIT(&sc->ioreqs_free);
3251 sc->ioreqs = calloc(sc->ioslots, sizeof(struct pci_nvme_ioreq));
3252 for (uint32_t i = 0; i < sc->ioslots; i++) {
3253 STAILQ_INSERT_TAIL(&sc->ioreqs_free, &sc->ioreqs[i], link);
3256 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0A0A);
3257 pci_set_cfgdata16(pi, PCIR_VENDOR, 0xFB5D);
3258 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
3259 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_NVM);
3260 pci_set_cfgdata8(pi, PCIR_PROGIF,
3261 PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0);
3264 * Allocate size of NVMe registers + doorbell space for all queues.
3266 * The specification requires a minimum memory I/O window size of 16K.
3267 * The Windows driver will refuse to start a device with a smaller
3270 pci_membar_sz = sizeof(struct nvme_registers) +
3271 2 * sizeof(uint32_t) * (sc->max_queues + 1);
3272 pci_membar_sz = MAX(pci_membar_sz, NVME_MMIO_SPACE_MIN);
3274 DPRINTF("nvme membar size: %u", pci_membar_sz);
3276 error = pci_emul_alloc_bar(pi, 0, PCIBAR_MEM64, pci_membar_sz);
3278 WPRINTF("%s pci alloc mem bar failed", __func__);
3282 error = pci_emul_add_msixcap(pi, sc->max_queues + 1, NVME_MSIX_BAR);
3284 WPRINTF("%s pci add msixcap failed", __func__);
3288 error = pci_emul_add_pciecap(pi, PCIEM_TYPE_ROOT_INT_EP);
3290 WPRINTF("%s pci add Express capability failed", __func__);
3294 pthread_mutex_init(&sc->mtx, NULL);
3295 sem_init(&sc->iosemlock, 0, sc->ioslots);
3296 blockif_register_resize_callback(sc->nvstore.ctx, pci_nvme_resized, sc);
3298 pci_nvme_init_queues(sc, sc->max_queues, sc->max_queues);
3300 * Controller data depends on Namespace data so initialize Namespace
3303 pci_nvme_init_nsdata(sc, &sc->nsdata, 1, &sc->nvstore);
3304 pci_nvme_init_ctrldata(sc);
3305 pci_nvme_init_logpages(sc);
3306 pci_nvme_init_features(sc);
3308 pci_nvme_aer_init(sc);
3309 pci_nvme_aen_init(sc);
3318 pci_nvme_legacy_config(nvlist_t *nvl, const char *opts)
3325 if (strncmp(opts, "ram=", 4) == 0) {
3326 cp = strchr(opts, ',');
3328 set_config_value_node(nvl, "ram", opts + 4);
3331 ram = strndup(opts + 4, cp - opts - 4);
3332 set_config_value_node(nvl, "ram", ram);
3334 return (pci_parse_legacy_config(nvl, cp + 1));
3336 return (blockif_legacy_config(nvl, opts));
3339 static const struct pci_devemu pci_de_nvme = {
3341 .pe_init = pci_nvme_init,
3342 .pe_legacy_config = pci_nvme_legacy_config,
3343 .pe_barwrite = pci_nvme_write,
3344 .pe_barread = pci_nvme_read
3346 PCI_EMUL_SET(pci_de_nvme);