2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2017 Shunsuke Mie
5 * Copyright (c) 2018 Leon Dang
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * bhyve PCIe-NVMe device emulation.
33 * -s <n>,nvme,devpath,maxq=#,qsz=#,ioslots=#,sectsz=#,ser=A-Z
40 * maxq = max number of queues
41 * qsz = max elements in each queue
42 * ioslots = max number of concurrent io requests
43 * sectsz = sector size (defaults to blockif sector size)
44 * ser = serial number (20-chars max)
49 - create async event for smart and log
53 #include <sys/cdefs.h>
54 __FBSDID("$FreeBSD$");
56 #include <sys/types.h>
60 #include <semaphore.h>
68 #include <machine/atomic.h>
69 #include <machine/vmm.h>
72 #include <dev/nvme/nvme.h>
79 static int nvme_debug = 0;
80 #define DPRINTF(params) if (nvme_debug) printf params
81 #define WPRINTF(params) printf params
83 /* defaults; can be overridden */
84 #define NVME_MSIX_BAR 4
86 #define NVME_IOSLOTS 8
88 /* The NVMe spec defines bits 13:4 in BAR0 as reserved */
89 #define NVME_MMIO_SPACE_MIN (1 << 14)
91 #define NVME_QUEUES 16
92 #define NVME_MAX_QENTRIES 2048
94 #define NVME_PRP2_ITEMS (PAGE_SIZE/sizeof(uint64_t))
95 #define NVME_MAX_BLOCKIOVS 512
99 /* Convert a zero-based value into a one-based value */
100 #define ONE_BASED(zero) ((zero) + 1)
101 /* Convert a one-based value into a zero-based value */
102 #define ZERO_BASED(one) ((one) - 1)
104 /* Encode number of SQ's and CQ's for Set/Get Features */
105 #define NVME_FEATURE_NUM_QUEUES(sc) \
106 (ZERO_BASED((sc)->num_squeues) & 0xffff) | \
107 (ZERO_BASED((sc)->num_cqueues) & 0xffff) << 16;
109 #define NVME_DOORBELL_OFFSET offsetof(struct nvme_registers, doorbell)
111 enum nvme_controller_register_offsets {
112 NVME_CR_CAP_LOW = 0x00,
113 NVME_CR_CAP_HI = 0x04,
115 NVME_CR_INTMS = 0x0c,
116 NVME_CR_INTMC = 0x10,
121 NVME_CR_ASQ_LOW = 0x28,
122 NVME_CR_ASQ_HI = 0x2c,
123 NVME_CR_ACQ_LOW = 0x30,
124 NVME_CR_ACQ_HI = 0x34,
127 enum nvme_cmd_cdw11 {
128 NVME_CMD_CDW11_PC = 0x0001,
129 NVME_CMD_CDW11_IEN = 0x0002,
130 NVME_CMD_CDW11_IV = 0xFFFF0000,
133 #define NVME_CQ_INTEN 0x01
134 #define NVME_CQ_INTCOAL 0x02
136 struct nvme_completion_queue {
137 struct nvme_completion *qbase;
139 uint16_t tail; /* nvme progress */
140 uint16_t head; /* guest progress */
146 struct nvme_submission_queue {
147 struct nvme_command *qbase;
149 uint16_t head; /* nvme progress */
150 uint16_t tail; /* guest progress */
151 uint16_t cqid; /* completion queue id */
152 int busy; /* queue is being processed */
156 enum nvme_storage_type {
157 NVME_STOR_BLOCKIF = 0,
161 struct pci_nvme_blockstore {
162 enum nvme_storage_type type;
166 uint32_t sectsz_bits;
169 struct pci_nvme_ioreq {
170 struct pci_nvme_softc *sc;
171 struct pci_nvme_ioreq *next;
172 struct nvme_submission_queue *nvme_sq;
175 /* command information */
180 uint64_t prev_gpaddr;
184 * lock if all iovs consumed (big IO);
185 * complete transaction before continuing
190 struct blockif_req io_req;
192 /* pad to fit up to 512 page descriptors from guest IO request */
193 struct iovec iovpadding[NVME_MAX_BLOCKIOVS-BLOCKIF_IOV_MAX];
196 struct pci_nvme_softc {
197 struct pci_devinst *nsc_pi;
201 struct nvme_registers regs;
203 struct nvme_namespace_data nsdata;
204 struct nvme_controller_data ctrldata;
205 struct nvme_error_information_entry err_log;
206 struct nvme_health_information_page health_log;
207 struct nvme_firmware_page fw_log;
209 struct pci_nvme_blockstore nvstore;
211 uint16_t max_qentries; /* max entries per queue */
212 uint32_t max_queues; /* max number of IO SQ's or CQ's */
213 uint32_t num_cqueues;
214 uint32_t num_squeues;
216 struct pci_nvme_ioreq *ioreqs;
217 struct pci_nvme_ioreq *ioreqs_free; /* free list of ioreqs */
218 uint32_t pending_ios;
223 * Memory mapped Submission and Completion queues
224 * Each array includes both Admin and IO queues
226 struct nvme_completion_queue *compl_queues;
227 struct nvme_submission_queue *submit_queues;
229 /* controller features */
230 uint32_t intr_coales_aggr_time; /* 0x08: uS to delay intr */
231 uint32_t intr_coales_aggr_thresh; /* 0x08: compl-Q entries */
232 uint32_t async_ev_config; /* 0x0B: async event config */
236 static void pci_nvme_io_partial(struct blockif_req *br, int err);
238 /* Controller Configuration utils */
239 #define NVME_CC_GET_EN(cc) \
240 ((cc) >> NVME_CC_REG_EN_SHIFT & NVME_CC_REG_EN_MASK)
241 #define NVME_CC_GET_CSS(cc) \
242 ((cc) >> NVME_CC_REG_CSS_SHIFT & NVME_CC_REG_CSS_MASK)
243 #define NVME_CC_GET_SHN(cc) \
244 ((cc) >> NVME_CC_REG_SHN_SHIFT & NVME_CC_REG_SHN_MASK)
245 #define NVME_CC_GET_IOSQES(cc) \
246 ((cc) >> NVME_CC_REG_IOSQES_SHIFT & NVME_CC_REG_IOSQES_MASK)
247 #define NVME_CC_GET_IOCQES(cc) \
248 ((cc) >> NVME_CC_REG_IOCQES_SHIFT & NVME_CC_REG_IOCQES_MASK)
250 #define NVME_CC_WRITE_MASK \
251 ((NVME_CC_REG_EN_MASK << NVME_CC_REG_EN_SHIFT) | \
252 (NVME_CC_REG_IOSQES_MASK << NVME_CC_REG_IOSQES_SHIFT) | \
253 (NVME_CC_REG_IOCQES_MASK << NVME_CC_REG_IOCQES_SHIFT))
255 #define NVME_CC_NEN_WRITE_MASK \
256 ((NVME_CC_REG_CSS_MASK << NVME_CC_REG_CSS_SHIFT) | \
257 (NVME_CC_REG_MPS_MASK << NVME_CC_REG_MPS_SHIFT) | \
258 (NVME_CC_REG_AMS_MASK << NVME_CC_REG_AMS_SHIFT))
260 /* Controller Status utils */
261 #define NVME_CSTS_GET_RDY(sts) \
262 ((sts) >> NVME_CSTS_REG_RDY_SHIFT & NVME_CSTS_REG_RDY_MASK)
264 #define NVME_CSTS_RDY (1 << NVME_CSTS_REG_RDY_SHIFT)
266 /* Completion Queue status word utils */
267 #define NVME_STATUS_P (1 << NVME_STATUS_P_SHIFT)
268 #define NVME_STATUS_MASK \
269 ((NVME_STATUS_SCT_MASK << NVME_STATUS_SCT_SHIFT) |\
270 (NVME_STATUS_SC_MASK << NVME_STATUS_SC_SHIFT))
273 cpywithpad(char *dst, size_t dst_size, const char *src, char pad)
277 len = strnlen(src, dst_size);
278 memset(dst, pad, dst_size);
279 memcpy(dst, src, len);
283 pci_nvme_status_tc(uint16_t *status, uint16_t type, uint16_t code)
286 *status &= ~NVME_STATUS_MASK;
287 *status |= (type & NVME_STATUS_SCT_MASK) << NVME_STATUS_SCT_SHIFT |
288 (code & NVME_STATUS_SC_MASK) << NVME_STATUS_SC_SHIFT;
292 pci_nvme_status_genc(uint16_t *status, uint16_t code)
295 pci_nvme_status_tc(status, NVME_SCT_GENERIC, code);
299 pci_nvme_toggle_phase(uint16_t *status, int prev)
303 *status &= ~NVME_STATUS_P;
305 *status |= NVME_STATUS_P;
309 pci_nvme_init_ctrldata(struct pci_nvme_softc *sc)
311 struct nvme_controller_data *cd = &sc->ctrldata;
316 cpywithpad((char *)cd->mn, sizeof(cd->mn), "bhyve-NVMe", ' ');
317 cpywithpad((char *)cd->fr, sizeof(cd->fr), "1.0", ' ');
319 /* Num of submission commands that we can handle at a time (2^rab) */
329 cd->mdts = 9; /* max data transfer size (2^mdts * CAP.MPSMIN) */
331 cd->ver = 0x00010300;
333 cd->oacs = 1 << NVME_CTRLR_DATA_OACS_FORMAT_SHIFT;
337 cd->lpa = 0; /* TODO: support some simple things like SMART */
338 cd->elpe = 0; /* max error log page entries */
339 cd->npss = 1; /* number of power states support */
341 /* Warning Composite Temperature Threshold */
344 cd->sqes = (6 << NVME_CTRLR_DATA_SQES_MAX_SHIFT) |
345 (6 << NVME_CTRLR_DATA_SQES_MIN_SHIFT);
346 cd->cqes = (4 << NVME_CTRLR_DATA_CQES_MAX_SHIFT) |
347 (4 << NVME_CTRLR_DATA_CQES_MIN_SHIFT);
348 cd->nn = 1; /* number of namespaces */
352 cd->power_state[0].mp = 10;
356 pci_nvme_init_nsdata(struct pci_nvme_softc *sc)
358 struct nvme_namespace_data *nd;
362 nd->nsze = sc->nvstore.size / sc->nvstore.sectsz;
366 /* Get LBA and backstore information from backing store */
367 nd->nlbaf = 0; /* NLBAF is a 0's based value (i.e. 1 LBA Format) */
368 /* LBA data-sz = 2^lbads */
369 nd->lbaf[0] = sc->nvstore.sectsz_bits << NVME_NS_DATA_LBAF_LBADS_SHIFT;
375 pci_nvme_init_logpages(struct pci_nvme_softc *sc)
378 memset(&sc->err_log, 0, sizeof(sc->err_log));
379 memset(&sc->health_log, 0, sizeof(sc->health_log));
380 memset(&sc->fw_log, 0, sizeof(sc->fw_log));
384 pci_nvme_reset_locked(struct pci_nvme_softc *sc)
386 DPRINTF(("%s\r\n", __func__));
388 sc->regs.cap_lo = (ZERO_BASED(sc->max_qentries) & NVME_CAP_LO_REG_MQES_MASK) |
389 (1 << NVME_CAP_LO_REG_CQR_SHIFT) |
390 (60 << NVME_CAP_LO_REG_TO_SHIFT);
392 sc->regs.cap_hi = 1 << NVME_CAP_HI_REG_CSS_NVM_SHIFT;
394 sc->regs.vs = 0x00010300; /* NVMe v1.3 */
399 sc->num_cqueues = sc->num_squeues = sc->max_queues;
400 if (sc->submit_queues != NULL) {
401 for (int i = 0; i < sc->num_squeues + 1; i++) {
403 * The Admin Submission Queue is at index 0.
404 * It must not be changed at reset otherwise the
405 * emulation will be out of sync with the guest.
408 sc->submit_queues[i].qbase = NULL;
409 sc->submit_queues[i].size = 0;
410 sc->submit_queues[i].cqid = 0;
412 sc->submit_queues[i].tail = 0;
413 sc->submit_queues[i].head = 0;
414 sc->submit_queues[i].busy = 0;
417 sc->submit_queues = calloc(sc->num_squeues + 1,
418 sizeof(struct nvme_submission_queue));
420 if (sc->compl_queues != NULL) {
421 for (int i = 0; i < sc->num_cqueues + 1; i++) {
422 /* See Admin Submission Queue note above */
424 sc->compl_queues[i].qbase = NULL;
425 sc->compl_queues[i].size = 0;
428 sc->compl_queues[i].tail = 0;
429 sc->compl_queues[i].head = 0;
432 sc->compl_queues = calloc(sc->num_cqueues + 1,
433 sizeof(struct nvme_completion_queue));
435 for (int i = 0; i < sc->num_cqueues + 1; i++)
436 pthread_mutex_init(&sc->compl_queues[i].mtx, NULL);
441 pci_nvme_reset(struct pci_nvme_softc *sc)
443 pthread_mutex_lock(&sc->mtx);
444 pci_nvme_reset_locked(sc);
445 pthread_mutex_unlock(&sc->mtx);
449 pci_nvme_init_controller(struct vmctx *ctx, struct pci_nvme_softc *sc)
453 DPRINTF(("%s\r\n", __func__));
455 asqs = (sc->regs.aqa & NVME_AQA_REG_ASQS_MASK) + 1;
456 sc->submit_queues[0].size = asqs;
457 sc->submit_queues[0].qbase = vm_map_gpa(ctx, sc->regs.asq,
458 sizeof(struct nvme_command) * asqs);
460 DPRINTF(("%s mapping Admin-SQ guest 0x%lx, host: %p\r\n",
461 __func__, sc->regs.asq, sc->submit_queues[0].qbase));
463 acqs = ((sc->regs.aqa >> NVME_AQA_REG_ACQS_SHIFT) &
464 NVME_AQA_REG_ACQS_MASK) + 1;
465 sc->compl_queues[0].size = acqs;
466 sc->compl_queues[0].qbase = vm_map_gpa(ctx, sc->regs.acq,
467 sizeof(struct nvme_completion) * acqs);
468 DPRINTF(("%s mapping Admin-CQ guest 0x%lx, host: %p\r\n",
469 __func__, sc->regs.acq, sc->compl_queues[0].qbase));
473 nvme_prp_memcpy(struct vmctx *ctx, uint64_t prp1, uint64_t prp2, uint8_t *src,
479 if (len > (8 * 1024)) {
483 /* Copy from the start of prp1 to the end of the physical page */
484 bytes = PAGE_SIZE - (prp1 & PAGE_MASK);
485 bytes = MIN(bytes, len);
487 dst = vm_map_gpa(ctx, prp1, bytes);
492 memcpy(dst, src, bytes);
501 len = MIN(len, PAGE_SIZE);
503 dst = vm_map_gpa(ctx, prp2, len);
508 memcpy(dst, src, len);
514 nvme_opc_delete_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command,
515 struct nvme_completion* compl)
517 uint16_t qid = command->cdw10 & 0xffff;
519 DPRINTF(("%s DELETE_IO_SQ %u\r\n", __func__, qid));
520 if (qid == 0 || qid > sc->num_squeues) {
521 WPRINTF(("%s NOT PERMITTED queue id %u / num_squeues %u\r\n",
522 __func__, qid, sc->num_squeues));
523 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
524 NVME_SC_INVALID_QUEUE_IDENTIFIER);
528 sc->submit_queues[qid].qbase = NULL;
529 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
534 nvme_opc_create_io_sq(struct pci_nvme_softc* sc, struct nvme_command* command,
535 struct nvme_completion* compl)
537 if (command->cdw11 & NVME_CMD_CDW11_PC) {
538 uint16_t qid = command->cdw10 & 0xffff;
539 struct nvme_submission_queue *nsq;
541 if ((qid == 0) || (qid > sc->num_squeues)) {
542 WPRINTF(("%s queue index %u > num_squeues %u\r\n",
543 __func__, qid, sc->num_squeues));
544 pci_nvme_status_tc(&compl->status,
545 NVME_SCT_COMMAND_SPECIFIC,
546 NVME_SC_INVALID_QUEUE_IDENTIFIER);
550 nsq = &sc->submit_queues[qid];
551 nsq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff);
553 nsq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
554 sizeof(struct nvme_command) * (size_t)nsq->size);
555 nsq->cqid = (command->cdw11 >> 16) & 0xffff;
556 nsq->qpriority = (command->cdw11 >> 1) & 0x03;
558 DPRINTF(("%s sq %u size %u gaddr %p cqid %u\r\n", __func__,
559 qid, nsq->size, nsq->qbase, nsq->cqid));
561 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
563 DPRINTF(("%s completed creating IOSQ qid %u\r\n",
567 * Guest sent non-cont submission queue request.
568 * This setting is unsupported by this emulation.
570 WPRINTF(("%s unsupported non-contig (list-based) "
571 "create i/o submission queue\r\n", __func__));
573 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
579 nvme_opc_delete_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command,
580 struct nvme_completion* compl)
582 uint16_t qid = command->cdw10 & 0xffff;
584 DPRINTF(("%s DELETE_IO_CQ %u\r\n", __func__, qid));
585 if (qid == 0 || qid > sc->num_cqueues) {
586 WPRINTF(("%s queue index %u / num_cqueues %u\r\n",
587 __func__, qid, sc->num_cqueues));
588 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
589 NVME_SC_INVALID_QUEUE_IDENTIFIER);
593 sc->compl_queues[qid].qbase = NULL;
594 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
599 nvme_opc_create_io_cq(struct pci_nvme_softc* sc, struct nvme_command* command,
600 struct nvme_completion* compl)
602 if (command->cdw11 & NVME_CMD_CDW11_PC) {
603 uint16_t qid = command->cdw10 & 0xffff;
604 struct nvme_completion_queue *ncq;
606 if ((qid == 0) || (qid > sc->num_cqueues)) {
607 WPRINTF(("%s queue index %u > num_cqueues %u\r\n",
608 __func__, qid, sc->num_cqueues));
609 pci_nvme_status_tc(&compl->status,
610 NVME_SCT_COMMAND_SPECIFIC,
611 NVME_SC_INVALID_QUEUE_IDENTIFIER);
615 ncq = &sc->compl_queues[qid];
616 ncq->intr_en = (command->cdw11 & NVME_CMD_CDW11_IEN) >> 1;
617 ncq->intr_vec = (command->cdw11 >> 16) & 0xffff;
618 ncq->size = ONE_BASED((command->cdw10 >> 16) & 0xffff);
620 ncq->qbase = vm_map_gpa(sc->nsc_pi->pi_vmctx,
622 sizeof(struct nvme_command) * (size_t)ncq->size);
624 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
627 * Non-contig completion queue unsupported.
629 WPRINTF(("%s unsupported non-contig (list-based) "
630 "create i/o completion queue\r\n",
633 /* 0x12 = Invalid Use of Controller Memory Buffer */
634 pci_nvme_status_genc(&compl->status, 0x12);
641 nvme_opc_get_log_page(struct pci_nvme_softc* sc, struct nvme_command* command,
642 struct nvme_completion* compl)
644 uint32_t logsize = (1 + ((command->cdw10 >> 16) & 0xFFF)) * 2;
645 uint8_t logpage = command->cdw10 & 0xFF;
647 DPRINTF(("%s log page %u len %u\r\n", __func__, logpage, logsize));
649 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
653 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
654 command->prp2, (uint8_t *)&sc->err_log, logsize);
656 case NVME_LOG_HEALTH_INFORMATION:
657 /* TODO: present some smart info */
658 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
659 command->prp2, (uint8_t *)&sc->health_log, logsize);
661 case NVME_LOG_FIRMWARE_SLOT:
662 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
663 command->prp2, (uint8_t *)&sc->fw_log, logsize);
666 WPRINTF(("%s get log page %x command not supported\r\n",
669 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
670 NVME_SC_INVALID_LOG_PAGE);
677 nvme_opc_identify(struct pci_nvme_softc* sc, struct nvme_command* command,
678 struct nvme_completion* compl)
682 DPRINTF(("%s identify 0x%x nsid 0x%x\r\n", __func__,
683 command->cdw10 & 0xFF, command->nsid));
685 switch (command->cdw10 & 0xFF) {
686 case 0x00: /* return Identify Namespace data structure */
687 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
688 command->prp2, (uint8_t *)&sc->nsdata, sizeof(sc->nsdata));
690 case 0x01: /* return Identify Controller data structure */
691 nvme_prp_memcpy(sc->nsc_pi->pi_vmctx, command->prp1,
692 command->prp2, (uint8_t *)&sc->ctrldata,
693 sizeof(sc->ctrldata));
695 case 0x02: /* list of 1024 active NSIDs > CDW1.NSID */
696 dest = vm_map_gpa(sc->nsc_pi->pi_vmctx, command->prp1,
697 sizeof(uint32_t) * 1024);
698 ((uint32_t *)dest)[0] = 1;
699 ((uint32_t *)dest)[1] = 0;
702 pci_nvme_status_genc(&compl->status,
703 NVME_SC_INVALID_NAMESPACE_OR_FORMAT);
705 case 0x03: /* list of NSID structures in CDW1.NSID, 4096 bytes */
712 DPRINTF(("%s unsupported identify command requested 0x%x\r\n",
713 __func__, command->cdw10 & 0xFF));
714 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
718 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
723 nvme_set_feature_queues(struct pci_nvme_softc* sc, struct nvme_command* command,
724 struct nvme_completion* compl)
726 uint16_t nqr; /* Number of Queues Requested */
728 nqr = command->cdw11 & 0xFFFF;
730 WPRINTF(("%s: Illegal NSQR value %#x\n", __func__, nqr));
731 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
735 sc->num_squeues = ONE_BASED(nqr);
736 if (sc->num_squeues > sc->max_queues) {
737 DPRINTF(("NSQR=%u is greater than max %u\n", sc->num_squeues,
739 sc->num_squeues = sc->max_queues;
742 nqr = (command->cdw11 >> 16) & 0xFFFF;
744 WPRINTF(("%s: Illegal NCQR value %#x\n", __func__, nqr));
745 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
749 sc->num_cqueues = ONE_BASED(nqr);
750 if (sc->num_cqueues > sc->max_queues) {
751 DPRINTF(("NCQR=%u is greater than max %u\n", sc->num_cqueues,
753 sc->num_cqueues = sc->max_queues;
756 compl->cdw0 = NVME_FEATURE_NUM_QUEUES(sc);
762 nvme_opc_set_features(struct pci_nvme_softc* sc, struct nvme_command* command,
763 struct nvme_completion* compl)
765 int feature = command->cdw10 & 0xFF;
768 DPRINTF(("%s feature 0x%x\r\n", __func__, feature));
772 case NVME_FEAT_ARBITRATION:
773 DPRINTF((" arbitration 0x%x\r\n", command->cdw11));
775 case NVME_FEAT_POWER_MANAGEMENT:
776 DPRINTF((" power management 0x%x\r\n", command->cdw11));
778 case NVME_FEAT_LBA_RANGE_TYPE:
779 DPRINTF((" lba range 0x%x\r\n", command->cdw11));
781 case NVME_FEAT_TEMPERATURE_THRESHOLD:
782 DPRINTF((" temperature threshold 0x%x\r\n", command->cdw11));
784 case NVME_FEAT_ERROR_RECOVERY:
785 DPRINTF((" error recovery 0x%x\r\n", command->cdw11));
787 case NVME_FEAT_VOLATILE_WRITE_CACHE:
788 DPRINTF((" volatile write cache 0x%x\r\n", command->cdw11));
790 case NVME_FEAT_NUMBER_OF_QUEUES:
791 nvme_set_feature_queues(sc, command, compl);
793 case NVME_FEAT_INTERRUPT_COALESCING:
794 DPRINTF((" interrupt coalescing 0x%x\r\n", command->cdw11));
797 sc->intr_coales_aggr_time = ((command->cdw11 >> 8) & 0xFF)*100;
799 sc->intr_coales_aggr_thresh = command->cdw11 & 0xFF;
801 case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
802 iv = command->cdw11 & 0xFFFF;
804 DPRINTF((" interrupt vector configuration 0x%x\r\n",
807 for (uint32_t i = 0; i < sc->num_cqueues + 1; i++) {
808 if (sc->compl_queues[i].intr_vec == iv) {
809 if (command->cdw11 & (1 << 16))
810 sc->compl_queues[i].intr_en |=
813 sc->compl_queues[i].intr_en &=
818 case NVME_FEAT_WRITE_ATOMICITY:
819 DPRINTF((" write atomicity 0x%x\r\n", command->cdw11));
821 case NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
822 DPRINTF((" async event configuration 0x%x\r\n",
824 sc->async_ev_config = command->cdw11;
826 case NVME_FEAT_SOFTWARE_PROGRESS_MARKER:
827 DPRINTF((" software progress marker 0x%x\r\n",
831 DPRINTF((" autonomous power state transition 0x%x\r\n",
835 WPRINTF(("%s invalid feature\r\n", __func__));
836 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
840 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
845 nvme_opc_get_features(struct pci_nvme_softc* sc, struct nvme_command* command,
846 struct nvme_completion* compl)
848 int feature = command->cdw10 & 0xFF;
850 DPRINTF(("%s feature 0x%x\r\n", __func__, feature));
855 case NVME_FEAT_ARBITRATION:
856 DPRINTF((" arbitration\r\n"));
858 case NVME_FEAT_POWER_MANAGEMENT:
859 DPRINTF((" power management\r\n"));
861 case NVME_FEAT_LBA_RANGE_TYPE:
862 DPRINTF((" lba range\r\n"));
864 case NVME_FEAT_TEMPERATURE_THRESHOLD:
865 DPRINTF((" temperature threshold\r\n"));
866 switch ((command->cdw11 >> 20) & 0x3) {
868 /* Over temp threshold */
869 compl->cdw0 = 0xFFFF;
872 /* Under temp threshold */
876 WPRINTF((" invalid threshold type select\r\n"));
877 pci_nvme_status_genc(&compl->status,
878 NVME_SC_INVALID_FIELD);
882 case NVME_FEAT_ERROR_RECOVERY:
883 DPRINTF((" error recovery\r\n"));
885 case NVME_FEAT_VOLATILE_WRITE_CACHE:
886 DPRINTF((" volatile write cache\r\n"));
888 case NVME_FEAT_NUMBER_OF_QUEUES:
889 compl->cdw0 = NVME_FEATURE_NUM_QUEUES(sc);
891 DPRINTF((" number of queues (submit %u, completion %u)\r\n",
892 compl->cdw0 & 0xFFFF,
893 (compl->cdw0 >> 16) & 0xFFFF));
896 case NVME_FEAT_INTERRUPT_COALESCING:
897 DPRINTF((" interrupt coalescing\r\n"));
899 case NVME_FEAT_INTERRUPT_VECTOR_CONFIGURATION:
900 DPRINTF((" interrupt vector configuration\r\n"));
902 case NVME_FEAT_WRITE_ATOMICITY:
903 DPRINTF((" write atomicity\r\n"));
905 case NVME_FEAT_ASYNC_EVENT_CONFIGURATION:
906 DPRINTF((" async event configuration\r\n"));
907 sc->async_ev_config = command->cdw11;
909 case NVME_FEAT_SOFTWARE_PROGRESS_MARKER:
910 DPRINTF((" software progress marker\r\n"));
913 DPRINTF((" autonomous power state transition\r\n"));
916 WPRINTF(("%s invalid feature 0x%x\r\n", __func__, feature));
917 pci_nvme_status_genc(&compl->status, NVME_SC_INVALID_FIELD);
921 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
926 nvme_opc_abort(struct pci_nvme_softc* sc, struct nvme_command* command,
927 struct nvme_completion* compl)
929 DPRINTF(("%s submission queue %u, command ID 0x%x\r\n", __func__,
930 command->cdw10 & 0xFFFF, (command->cdw10 >> 16) & 0xFFFF));
932 /* TODO: search for the command ID and abort it */
935 pci_nvme_status_genc(&compl->status, NVME_SC_SUCCESS);
940 nvme_opc_async_event_req(struct pci_nvme_softc* sc,
941 struct nvme_command* command, struct nvme_completion* compl)
943 DPRINTF(("%s async event request 0x%x\r\n", __func__, command->cdw11));
946 * TODO: raise events when they happen based on the Set Features cmd.
947 * These events happen async, so only set completion successful if
948 * there is an event reflective of the request to get event.
950 pci_nvme_status_tc(&compl->status, NVME_SCT_COMMAND_SPECIFIC,
951 NVME_SC_ASYNC_EVENT_REQUEST_LIMIT_EXCEEDED);
956 pci_nvme_handle_admin_cmd(struct pci_nvme_softc* sc, uint64_t value)
958 struct nvme_completion compl;
959 struct nvme_command *cmd;
960 struct nvme_submission_queue *sq;
961 struct nvme_completion_queue *cq;
965 DPRINTF(("%s index %u\r\n", __func__, (uint32_t)value));
967 sq = &sc->submit_queues[0];
969 sqhead = atomic_load_acq_short(&sq->head);
971 if (atomic_testandset_int(&sq->busy, 1)) {
972 DPRINTF(("%s SQ busy, head %u, tail %u\r\n",
973 __func__, sqhead, sq->tail));
977 DPRINTF(("sqhead %u, tail %u\r\n", sqhead, sq->tail));
979 while (sqhead != atomic_load_acq_short(&sq->tail)) {
980 cmd = &(sq->qbase)[sqhead];
984 case NVME_OPC_DELETE_IO_SQ:
985 DPRINTF(("%s command DELETE_IO_SQ\r\n", __func__));
986 do_intr |= nvme_opc_delete_io_sq(sc, cmd, &compl);
988 case NVME_OPC_CREATE_IO_SQ:
989 DPRINTF(("%s command CREATE_IO_SQ\r\n", __func__));
990 do_intr |= nvme_opc_create_io_sq(sc, cmd, &compl);
992 case NVME_OPC_DELETE_IO_CQ:
993 DPRINTF(("%s command DELETE_IO_CQ\r\n", __func__));
994 do_intr |= nvme_opc_delete_io_cq(sc, cmd, &compl);
996 case NVME_OPC_CREATE_IO_CQ:
997 DPRINTF(("%s command CREATE_IO_CQ\r\n", __func__));
998 do_intr |= nvme_opc_create_io_cq(sc, cmd, &compl);
1000 case NVME_OPC_GET_LOG_PAGE:
1001 DPRINTF(("%s command GET_LOG_PAGE\r\n", __func__));
1002 do_intr |= nvme_opc_get_log_page(sc, cmd, &compl);
1004 case NVME_OPC_IDENTIFY:
1005 DPRINTF(("%s command IDENTIFY\r\n", __func__));
1006 do_intr |= nvme_opc_identify(sc, cmd, &compl);
1008 case NVME_OPC_ABORT:
1009 DPRINTF(("%s command ABORT\r\n", __func__));
1010 do_intr |= nvme_opc_abort(sc, cmd, &compl);
1012 case NVME_OPC_SET_FEATURES:
1013 DPRINTF(("%s command SET_FEATURES\r\n", __func__));
1014 do_intr |= nvme_opc_set_features(sc, cmd, &compl);
1016 case NVME_OPC_GET_FEATURES:
1017 DPRINTF(("%s command GET_FEATURES\r\n", __func__));
1018 do_intr |= nvme_opc_get_features(sc, cmd, &compl);
1020 case NVME_OPC_ASYNC_EVENT_REQUEST:
1021 DPRINTF(("%s command ASYNC_EVENT_REQ\r\n", __func__));
1022 /* XXX dont care, unhandled for now
1023 do_intr |= nvme_opc_async_event_req(sc, cmd, &compl);
1027 WPRINTF(("0x%x command is not implemented\r\n",
1031 /* for now skip async event generation */
1032 if (cmd->opc != NVME_OPC_ASYNC_EVENT_REQUEST) {
1033 struct nvme_completion *cp;
1036 cq = &sc->compl_queues[0];
1038 cp = &(cq->qbase)[cq->tail];
1039 cp->cdw0 = compl.cdw0;
1044 phase = NVME_STATUS_GET_P(cp->status);
1045 cp->status = compl.status;
1046 pci_nvme_toggle_phase(&cp->status, phase);
1048 cq->tail = (cq->tail + 1) % cq->size;
1050 sqhead = (sqhead + 1) % sq->size;
1053 DPRINTF(("setting sqhead %u\r\n", sqhead));
1054 atomic_store_short(&sq->head, sqhead);
1055 atomic_store_int(&sq->busy, 0);
1058 pci_generate_msix(sc->nsc_pi, 0);
1063 pci_nvme_append_iov_req(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req,
1064 uint64_t gpaddr, size_t size, int do_write, uint64_t lba)
1069 /* concatenate contig block-iovs to minimize number of iovs */
1070 if ((req->prev_gpaddr + req->prev_size) == gpaddr) {
1071 iovidx = req->io_req.br_iovcnt - 1;
1073 req->io_req.br_iov[iovidx].iov_base =
1074 paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
1075 req->prev_gpaddr, size);
1077 req->prev_size += size;
1078 req->io_req.br_resid += size;
1080 req->io_req.br_iov[iovidx].iov_len = req->prev_size;
1082 pthread_mutex_lock(&req->mtx);
1084 iovidx = req->io_req.br_iovcnt;
1085 if (iovidx == NVME_MAX_BLOCKIOVS) {
1088 DPRINTF(("large I/O, doing partial req\r\n"));
1091 req->io_req.br_iovcnt = 0;
1093 req->io_req.br_callback = pci_nvme_io_partial;
1096 err = blockif_read(sc->nvstore.ctx,
1099 err = blockif_write(sc->nvstore.ctx,
1102 /* wait until req completes before cont */
1104 pthread_cond_wait(&req->cv, &req->mtx);
1107 req->io_req.br_offset = lba;
1108 req->io_req.br_resid = 0;
1109 req->io_req.br_param = req;
1112 req->io_req.br_iov[iovidx].iov_base =
1113 paddr_guest2host(req->sc->nsc_pi->pi_vmctx,
1116 req->io_req.br_iov[iovidx].iov_len = size;
1118 req->prev_gpaddr = gpaddr;
1119 req->prev_size = size;
1120 req->io_req.br_resid += size;
1122 req->io_req.br_iovcnt++;
1124 pthread_mutex_unlock(&req->mtx);
1127 /* RAM buffer: read/write directly */
1128 void *p = sc->nvstore.ctx;
1131 if ((lba + size) > sc->nvstore.size) {
1132 WPRINTF(("%s write would overflow RAM\r\n", __func__));
1136 p = (void *)((uintptr_t)p + (uintptr_t)lba);
1137 gptr = paddr_guest2host(sc->nsc_pi->pi_vmctx, gpaddr, size);
1139 memcpy(p, gptr, size);
1141 memcpy(gptr, p, size);
1147 pci_nvme_set_completion(struct pci_nvme_softc *sc,
1148 struct nvme_submission_queue *sq, int sqid, uint16_t cid,
1149 uint32_t cdw0, uint16_t status, int ignore_busy)
1151 struct nvme_completion_queue *cq = &sc->compl_queues[sq->cqid];
1152 struct nvme_completion *compl;
1156 DPRINTF(("%s sqid %d cqid %u cid %u status: 0x%x 0x%x\r\n",
1157 __func__, sqid, sq->cqid, cid, NVME_STATUS_GET_SCT(status),
1158 NVME_STATUS_GET_SC(status)));
1160 pthread_mutex_lock(&cq->mtx);
1162 assert(cq->qbase != NULL);
1164 compl = &cq->qbase[cq->tail];
1166 compl->sqhd = atomic_load_acq_short(&sq->head);
1171 phase = NVME_STATUS_GET_P(compl->status);
1172 compl->status = status;
1173 pci_nvme_toggle_phase(&compl->status, phase);
1175 cq->tail = (cq->tail + 1) % cq->size;
1177 if (cq->intr_en & NVME_CQ_INTEN)
1180 pthread_mutex_unlock(&cq->mtx);
1182 if (ignore_busy || !atomic_load_acq_int(&sq->busy))
1184 pci_generate_msix(sc->nsc_pi, cq->intr_vec);
1188 pci_nvme_release_ioreq(struct pci_nvme_softc *sc, struct pci_nvme_ioreq *req)
1191 req->nvme_sq = NULL;
1194 pthread_mutex_lock(&sc->mtx);
1196 req->next = sc->ioreqs_free;
1197 sc->ioreqs_free = req;
1200 /* when no more IO pending, can set to ready if device reset/enabled */
1201 if (sc->pending_ios == 0 &&
1202 NVME_CC_GET_EN(sc->regs.cc) && !(NVME_CSTS_GET_RDY(sc->regs.csts)))
1203 sc->regs.csts |= NVME_CSTS_RDY;
1205 pthread_mutex_unlock(&sc->mtx);
1207 sem_post(&sc->iosemlock);
1210 static struct pci_nvme_ioreq *
1211 pci_nvme_get_ioreq(struct pci_nvme_softc *sc)
1213 struct pci_nvme_ioreq *req = NULL;;
1215 sem_wait(&sc->iosemlock);
1216 pthread_mutex_lock(&sc->mtx);
1218 req = sc->ioreqs_free;
1219 assert(req != NULL);
1221 sc->ioreqs_free = req->next;
1228 pthread_mutex_unlock(&sc->mtx);
1230 req->io_req.br_iovcnt = 0;
1231 req->io_req.br_offset = 0;
1232 req->io_req.br_resid = 0;
1233 req->io_req.br_param = req;
1234 req->prev_gpaddr = 0;
1241 pci_nvme_io_done(struct blockif_req *br, int err)
1243 struct pci_nvme_ioreq *req = br->br_param;
1244 struct nvme_submission_queue *sq = req->nvme_sq;
1245 uint16_t code, status;
1247 DPRINTF(("%s error %d %s\r\n", __func__, err, strerror(err)));
1249 /* TODO return correct error */
1250 code = err ? NVME_SC_DATA_TRANSFER_ERROR : NVME_SC_SUCCESS;
1251 pci_nvme_status_genc(&status, code);
1253 pci_nvme_set_completion(req->sc, sq, req->sqid, req->cid, 0, status, 0);
1254 pci_nvme_release_ioreq(req->sc, req);
1258 pci_nvme_io_partial(struct blockif_req *br, int err)
1260 struct pci_nvme_ioreq *req = br->br_param;
1262 DPRINTF(("%s error %d %s\r\n", __func__, err, strerror(err)));
1264 pthread_cond_signal(&req->cv);
1269 pci_nvme_handle_io_cmd(struct pci_nvme_softc* sc, uint16_t idx)
1271 struct nvme_submission_queue *sq;
1276 /* handle all submissions up to sq->tail index */
1277 sq = &sc->submit_queues[idx];
1279 if (atomic_testandset_int(&sq->busy, 1)) {
1280 DPRINTF(("%s sqid %u busy\r\n", __func__, idx));
1284 sqhead = atomic_load_acq_short(&sq->head);
1286 DPRINTF(("nvme_handle_io qid %u head %u tail %u cmdlist %p\r\n",
1287 idx, sqhead, sq->tail, sq->qbase));
1289 while (sqhead != atomic_load_acq_short(&sq->tail)) {
1290 struct nvme_command *cmd;
1291 struct pci_nvme_ioreq *req = NULL;
1293 uint64_t nblocks, bytes, size, cpsz;
1295 /* TODO: support scatter gather list handling */
1297 cmd = &sq->qbase[sqhead];
1298 sqhead = (sqhead + 1) % sq->size;
1300 lba = ((uint64_t)cmd->cdw11 << 32) | cmd->cdw10;
1302 if (cmd->opc == NVME_OPC_FLUSH) {
1303 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
1304 pci_nvme_set_completion(sc, sq, idx, cmd->cid, 0,
1308 } else if (cmd->opc == 0x08) {
1309 /* TODO: write zeroes */
1310 WPRINTF(("%s write zeroes lba 0x%lx blocks %u\r\n",
1311 __func__, lba, cmd->cdw12 & 0xFFFF));
1312 pci_nvme_status_genc(&status, NVME_SC_SUCCESS);
1313 pci_nvme_set_completion(sc, sq, idx, cmd->cid, 0,
1319 nblocks = (cmd->cdw12 & 0xFFFF) + 1;
1321 bytes = nblocks * sc->nvstore.sectsz;
1323 if (sc->nvstore.type == NVME_STOR_BLOCKIF) {
1324 req = pci_nvme_get_ioreq(sc);
1330 * If data starts mid-page and flows into the next page, then
1331 * increase page count
1334 DPRINTF(("[h%u:t%u:n%u] %s starting LBA 0x%lx blocks %lu "
1336 sqhead==0 ? sq->size-1 : sqhead-1, sq->tail, sq->size,
1337 cmd->opc == NVME_OPC_WRITE ?
1339 lba, nblocks, bytes));
1341 cmd->prp1 &= ~(0x03UL);
1342 cmd->prp2 &= ~(0x03UL);
1344 DPRINTF((" prp1 0x%lx prp2 0x%lx\r\n", cmd->prp1, cmd->prp2));
1347 lba *= sc->nvstore.sectsz;
1349 cpsz = PAGE_SIZE - (cmd->prp1 % PAGE_SIZE);
1355 req->io_req.br_offset = ((uint64_t)cmd->cdw11 << 32) |
1357 req->opc = cmd->opc;
1358 req->cid = cmd->cid;
1359 req->nsid = cmd->nsid;
1362 err = pci_nvme_append_iov_req(sc, req, cmd->prp1, cpsz,
1363 cmd->opc == NVME_OPC_WRITE, lba);
1370 if (size <= PAGE_SIZE) {
1371 /* prp2 is second (and final) page in transfer */
1373 err = pci_nvme_append_iov_req(sc, req, cmd->prp2,
1375 cmd->opc == NVME_OPC_WRITE,
1381 /* prp2 is pointer to a physical region page list */
1382 prp_list = paddr_guest2host(sc->nsc_pi->pi_vmctx,
1383 cmd->prp2, PAGE_SIZE);
1387 cpsz = MIN(size, PAGE_SIZE);
1390 * Move to linked physical region page list
1393 if (i == (NVME_PRP2_ITEMS-1) &&
1395 assert((prp_list[i] & (PAGE_SIZE-1)) == 0);
1396 prp_list = paddr_guest2host(
1397 sc->nsc_pi->pi_vmctx,
1398 prp_list[i], PAGE_SIZE);
1401 if (prp_list[i] == 0) {
1402 WPRINTF(("PRP2[%d] = 0 !!!\r\n", i));
1407 err = pci_nvme_append_iov_req(sc, req,
1409 cmd->opc == NVME_OPC_WRITE, lba);
1420 if (sc->nvstore.type == NVME_STOR_RAM) {
1421 uint16_t code, status;
1423 code = err ? NVME_SC_LBA_OUT_OF_RANGE :
1425 pci_nvme_status_genc(&status, code);
1427 pci_nvme_set_completion(sc, sq, idx, cmd->cid, 0,
1437 req->io_req.br_callback = pci_nvme_io_done;
1442 err = blockif_read(sc->nvstore.ctx, &req->io_req);
1444 case NVME_OPC_WRITE:
1445 err = blockif_write(sc->nvstore.ctx, &req->io_req);
1448 WPRINTF(("%s unhandled io command 0x%x\r\n",
1449 __func__, cmd->opc));
1457 pci_nvme_status_genc(&status,
1458 NVME_SC_DATA_TRANSFER_ERROR);
1460 pci_nvme_set_completion(sc, sq, idx, cmd->cid, 0,
1462 pci_nvme_release_ioreq(sc, req);
1466 atomic_store_short(&sq->head, sqhead);
1467 atomic_store_int(&sq->busy, 0);
1471 pci_nvme_handle_doorbell(struct vmctx *ctx, struct pci_nvme_softc* sc,
1472 uint64_t idx, int is_sq, uint64_t value)
1474 DPRINTF(("nvme doorbell %lu, %s, val 0x%lx\r\n",
1475 idx, is_sq ? "SQ" : "CQ", value & 0xFFFF));
1478 atomic_store_short(&sc->submit_queues[idx].tail,
1482 pci_nvme_handle_admin_cmd(sc, value);
1484 /* submission queue; handle new entries in SQ */
1485 if (idx > sc->num_squeues) {
1486 WPRINTF(("%s SQ index %lu overflow from "
1487 "guest (max %u)\r\n",
1488 __func__, idx, sc->num_squeues));
1491 pci_nvme_handle_io_cmd(sc, (uint16_t)idx);
1494 if (idx > sc->num_cqueues) {
1495 WPRINTF(("%s queue index %lu overflow from "
1496 "guest (max %u)\r\n",
1497 __func__, idx, sc->num_cqueues));
1501 sc->compl_queues[idx].head = (uint16_t)value;
1506 pci_nvme_bar0_reg_dumps(const char *func, uint64_t offset, int iswrite)
1508 const char *s = iswrite ? "WRITE" : "READ";
1511 case NVME_CR_CAP_LOW:
1512 DPRINTF(("%s %s NVME_CR_CAP_LOW\r\n", func, s));
1514 case NVME_CR_CAP_HI:
1515 DPRINTF(("%s %s NVME_CR_CAP_HI\r\n", func, s));
1518 DPRINTF(("%s %s NVME_CR_VS\r\n", func, s));
1521 DPRINTF(("%s %s NVME_CR_INTMS\r\n", func, s));
1524 DPRINTF(("%s %s NVME_CR_INTMC\r\n", func, s));
1527 DPRINTF(("%s %s NVME_CR_CC\r\n", func, s));
1530 DPRINTF(("%s %s NVME_CR_CSTS\r\n", func, s));
1533 DPRINTF(("%s %s NVME_CR_NSSR\r\n", func, s));
1536 DPRINTF(("%s %s NVME_CR_AQA\r\n", func, s));
1538 case NVME_CR_ASQ_LOW:
1539 DPRINTF(("%s %s NVME_CR_ASQ_LOW\r\n", func, s));
1541 case NVME_CR_ASQ_HI:
1542 DPRINTF(("%s %s NVME_CR_ASQ_HI\r\n", func, s));
1544 case NVME_CR_ACQ_LOW:
1545 DPRINTF(("%s %s NVME_CR_ACQ_LOW\r\n", func, s));
1547 case NVME_CR_ACQ_HI:
1548 DPRINTF(("%s %s NVME_CR_ACQ_HI\r\n", func, s));
1551 DPRINTF(("unknown nvme bar-0 offset 0x%lx\r\n", offset));
1557 pci_nvme_write_bar_0(struct vmctx *ctx, struct pci_nvme_softc* sc,
1558 uint64_t offset, int size, uint64_t value)
1562 if (offset >= NVME_DOORBELL_OFFSET) {
1563 uint64_t belloffset = offset - NVME_DOORBELL_OFFSET;
1564 uint64_t idx = belloffset / 8; /* door bell size = 2*int */
1565 int is_sq = (belloffset % 8) < 4;
1567 if (belloffset > ((sc->max_queues+1) * 8 - 4)) {
1568 WPRINTF(("guest attempted an overflow write offset "
1569 "0x%lx, val 0x%lx in %s",
1570 offset, value, __func__));
1574 pci_nvme_handle_doorbell(ctx, sc, idx, is_sq, value);
1578 DPRINTF(("nvme-write offset 0x%lx, size %d, value 0x%lx\r\n",
1579 offset, size, value));
1582 WPRINTF(("guest wrote invalid size %d (offset 0x%lx, "
1583 "val 0x%lx) to bar0 in %s",
1584 size, offset, value, __func__));
1585 /* TODO: shutdown device */
1589 pci_nvme_bar0_reg_dumps(__func__, offset, 1);
1591 pthread_mutex_lock(&sc->mtx);
1594 case NVME_CR_CAP_LOW:
1595 case NVME_CR_CAP_HI:
1602 /* MSI-X, so ignore */
1605 /* MSI-X, so ignore */
1608 ccreg = (uint32_t)value;
1610 DPRINTF(("%s NVME_CR_CC en %x css %x shn %x iosqes %u "
1613 NVME_CC_GET_EN(ccreg), NVME_CC_GET_CSS(ccreg),
1614 NVME_CC_GET_SHN(ccreg), NVME_CC_GET_IOSQES(ccreg),
1615 NVME_CC_GET_IOCQES(ccreg)));
1617 if (NVME_CC_GET_SHN(ccreg)) {
1618 /* perform shutdown - flush out data to backend */
1619 sc->regs.csts &= ~(NVME_CSTS_REG_SHST_MASK <<
1620 NVME_CSTS_REG_SHST_SHIFT);
1621 sc->regs.csts |= NVME_SHST_COMPLETE <<
1622 NVME_CSTS_REG_SHST_SHIFT;
1624 if (NVME_CC_GET_EN(ccreg) != NVME_CC_GET_EN(sc->regs.cc)) {
1625 if (NVME_CC_GET_EN(ccreg) == 0)
1626 /* transition 1-> causes controller reset */
1627 pci_nvme_reset_locked(sc);
1629 pci_nvme_init_controller(ctx, sc);
1632 /* Insert the iocqes, iosqes and en bits from the write */
1633 sc->regs.cc &= ~NVME_CC_WRITE_MASK;
1634 sc->regs.cc |= ccreg & NVME_CC_WRITE_MASK;
1635 if (NVME_CC_GET_EN(ccreg) == 0) {
1636 /* Insert the ams, mps and css bit fields */
1637 sc->regs.cc &= ~NVME_CC_NEN_WRITE_MASK;
1638 sc->regs.cc |= ccreg & NVME_CC_NEN_WRITE_MASK;
1639 sc->regs.csts &= ~NVME_CSTS_RDY;
1640 } else if (sc->pending_ios == 0) {
1641 sc->regs.csts |= NVME_CSTS_RDY;
1647 /* ignore writes; don't support subsystem reset */
1650 sc->regs.aqa = (uint32_t)value;
1652 case NVME_CR_ASQ_LOW:
1653 sc->regs.asq = (sc->regs.asq & (0xFFFFFFFF00000000)) |
1654 (0xFFFFF000 & value);
1656 case NVME_CR_ASQ_HI:
1657 sc->regs.asq = (sc->regs.asq & (0x00000000FFFFFFFF)) |
1660 case NVME_CR_ACQ_LOW:
1661 sc->regs.acq = (sc->regs.acq & (0xFFFFFFFF00000000)) |
1662 (0xFFFFF000 & value);
1664 case NVME_CR_ACQ_HI:
1665 sc->regs.acq = (sc->regs.acq & (0x00000000FFFFFFFF)) |
1669 DPRINTF(("%s unknown offset 0x%lx, value 0x%lx size %d\r\n",
1670 __func__, offset, value, size));
1672 pthread_mutex_unlock(&sc->mtx);
1676 pci_nvme_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
1677 int baridx, uint64_t offset, int size, uint64_t value)
1679 struct pci_nvme_softc* sc = pi->pi_arg;
1681 if (baridx == pci_msix_table_bar(pi) ||
1682 baridx == pci_msix_pba_bar(pi)) {
1683 DPRINTF(("nvme-write baridx %d, msix: off 0x%lx, size %d, "
1684 " value 0x%lx\r\n", baridx, offset, size, value));
1686 pci_emul_msix_twrite(pi, offset, size, value);
1692 pci_nvme_write_bar_0(ctx, sc, offset, size, value);
1696 DPRINTF(("%s unknown baridx %d, val 0x%lx\r\n",
1697 __func__, baridx, value));
1701 static uint64_t pci_nvme_read_bar_0(struct pci_nvme_softc* sc,
1702 uint64_t offset, int size)
1706 pci_nvme_bar0_reg_dumps(__func__, offset, 0);
1708 if (offset < NVME_DOORBELL_OFFSET) {
1709 void *p = &(sc->regs);
1710 pthread_mutex_lock(&sc->mtx);
1711 memcpy(&value, (void *)((uintptr_t)p + offset), size);
1712 pthread_mutex_unlock(&sc->mtx);
1715 WPRINTF(("pci_nvme: read invalid offset %ld\r\n", offset));
1726 value &= 0xFFFFFFFF;
1730 DPRINTF((" nvme-read offset 0x%lx, size %d -> value 0x%x\r\n",
1731 offset, size, (uint32_t)value));
1739 pci_nvme_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
1740 uint64_t offset, int size)
1742 struct pci_nvme_softc* sc = pi->pi_arg;
1744 if (baridx == pci_msix_table_bar(pi) ||
1745 baridx == pci_msix_pba_bar(pi)) {
1746 DPRINTF(("nvme-read bar: %d, msix: regoff 0x%lx, size %d\r\n",
1747 baridx, offset, size));
1749 return pci_emul_msix_tread(pi, offset, size);
1754 return pci_nvme_read_bar_0(sc, offset, size);
1757 DPRINTF(("unknown bar %d, 0x%lx\r\n", baridx, offset));
1765 pci_nvme_parse_opts(struct pci_nvme_softc *sc, char *opts)
1767 char bident[sizeof("XX:X:X")];
1768 char *uopt, *xopts, *config;
1772 sc->max_queues = NVME_QUEUES;
1773 sc->max_qentries = NVME_MAX_QENTRIES;
1774 sc->ioslots = NVME_IOSLOTS;
1775 sc->num_squeues = sc->max_queues;
1776 sc->num_cqueues = sc->max_queues;
1779 uopt = strdup(opts);
1781 snprintf(sc->ctrldata.sn, sizeof(sc->ctrldata.sn),
1782 "NVME-%d-%d", sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
1783 for (xopts = strtok(uopt, ",");
1785 xopts = strtok(NULL, ",")) {
1787 if ((config = strchr(xopts, '=')) != NULL)
1790 if (!strcmp("maxq", xopts)) {
1791 sc->max_queues = atoi(config);
1792 } else if (!strcmp("qsz", xopts)) {
1793 sc->max_qentries = atoi(config);
1794 } else if (!strcmp("ioslots", xopts)) {
1795 sc->ioslots = atoi(config);
1796 } else if (!strcmp("sectsz", xopts)) {
1797 sectsz = atoi(config);
1798 } else if (!strcmp("ser", xopts)) {
1800 * This field indicates the Product Serial Number in
1801 * 7-bit ASCII, unused bytes should be space characters.
1804 cpywithpad((char *)sc->ctrldata.sn,
1805 sizeof(sc->ctrldata.sn), config, ' ');
1806 } else if (!strcmp("ram", xopts)) {
1807 uint64_t sz = strtoull(&xopts[4], NULL, 10);
1809 sc->nvstore.type = NVME_STOR_RAM;
1810 sc->nvstore.size = sz * 1024 * 1024;
1811 sc->nvstore.ctx = calloc(1, sc->nvstore.size);
1812 sc->nvstore.sectsz = 4096;
1813 sc->nvstore.sectsz_bits = 12;
1814 if (sc->nvstore.ctx == NULL) {
1815 perror("Unable to allocate RAM");
1819 } else if (optidx == 0) {
1820 snprintf(bident, sizeof(bident), "%d:%d",
1821 sc->nsc_pi->pi_slot, sc->nsc_pi->pi_func);
1822 sc->nvstore.ctx = blockif_open(xopts, bident);
1823 if (sc->nvstore.ctx == NULL) {
1824 perror("Could not open backing file");
1828 sc->nvstore.type = NVME_STOR_BLOCKIF;
1829 sc->nvstore.size = blockif_size(sc->nvstore.ctx);
1831 fprintf(stderr, "Invalid option %s\n", xopts);
1840 if (sc->nvstore.ctx == NULL || sc->nvstore.size == 0) {
1841 fprintf(stderr, "backing store not specified\n");
1844 if (sectsz == 512 || sectsz == 4096 || sectsz == 8192)
1845 sc->nvstore.sectsz = sectsz;
1846 else if (sc->nvstore.type != NVME_STOR_RAM)
1847 sc->nvstore.sectsz = blockif_sectsz(sc->nvstore.ctx);
1848 for (sc->nvstore.sectsz_bits = 9;
1849 (1 << sc->nvstore.sectsz_bits) < sc->nvstore.sectsz;
1850 sc->nvstore.sectsz_bits++);
1852 if (sc->max_queues <= 0 || sc->max_queues > NVME_QUEUES)
1853 sc->max_queues = NVME_QUEUES;
1855 if (sc->max_qentries <= 0) {
1856 fprintf(stderr, "Invalid qsz option\n");
1859 if (sc->ioslots <= 0) {
1860 fprintf(stderr, "Invalid ioslots option\n");
1868 pci_nvme_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
1870 struct pci_nvme_softc *sc;
1871 uint32_t pci_membar_sz;
1876 sc = calloc(1, sizeof(struct pci_nvme_softc));
1880 error = pci_nvme_parse_opts(sc, opts);
1886 sc->ioreqs = calloc(sc->ioslots, sizeof(struct pci_nvme_ioreq));
1887 for (int i = 0; i < sc->ioslots; i++) {
1888 if (i < (sc->ioslots-1))
1889 sc->ioreqs[i].next = &sc->ioreqs[i+1];
1890 pthread_mutex_init(&sc->ioreqs[i].mtx, NULL);
1891 pthread_cond_init(&sc->ioreqs[i].cv, NULL);
1893 sc->ioreqs_free = sc->ioreqs;
1894 sc->intr_coales_aggr_thresh = 1;
1896 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0A0A);
1897 pci_set_cfgdata16(pi, PCIR_VENDOR, 0xFB5D);
1898 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
1899 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_NVM);
1900 pci_set_cfgdata8(pi, PCIR_PROGIF,
1901 PCIP_STORAGE_NVM_ENTERPRISE_NVMHCI_1_0);
1904 * Allocate size of NVMe registers + doorbell space for all queues.
1906 * The specification requires a minimum memory I/O window size of 16K.
1907 * The Windows driver will refuse to start a device with a smaller
1910 pci_membar_sz = sizeof(struct nvme_registers) +
1911 2 * sizeof(uint32_t) * (sc->max_queues + 1);
1912 pci_membar_sz = MAX(pci_membar_sz, NVME_MMIO_SPACE_MIN);
1914 DPRINTF(("nvme membar size: %u\r\n", pci_membar_sz));
1916 error = pci_emul_alloc_bar(pi, 0, PCIBAR_MEM64, pci_membar_sz);
1918 WPRINTF(("%s pci alloc mem bar failed\r\n", __func__));
1922 error = pci_emul_add_msixcap(pi, sc->max_queues + 1, NVME_MSIX_BAR);
1924 WPRINTF(("%s pci add msixcap failed\r\n", __func__));
1928 pthread_mutex_init(&sc->mtx, NULL);
1929 sem_init(&sc->iosemlock, 0, sc->ioslots);
1932 pci_nvme_init_ctrldata(sc);
1933 pci_nvme_init_nsdata(sc);
1934 pci_nvme_init_logpages(sc);
1936 pci_lintr_request(pi);
1943 struct pci_devemu pci_de_nvme = {
1945 .pe_init = pci_nvme_init,
1946 .pe_barwrite = pci_nvme_write,
1947 .pe_barread = pci_nvme_read
1949 PCI_EMUL_SET(pci_de_nvme);