2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
5 * Copyright (c) 2015-2016 Alexander Motin <mav@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/linker_set.h>
39 #include <sys/ioctl.h>
42 #include <sys/endian.h>
44 #include <machine/vmm_snapshot.h>
56 #include <pthread_np.h>
65 #define DEF_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
66 #define MAX_PORTS 32 /* AHCI supports 32 ports */
68 #define PxSIG_ATA 0x00000101 /* ATA drive */
69 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
72 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
73 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
74 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
75 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
76 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
77 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
78 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
79 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
85 #define TEST_UNIT_READY 0x00
86 #define REQUEST_SENSE 0x03
88 #define START_STOP_UNIT 0x1B
89 #define PREVENT_ALLOW 0x1E
90 #define READ_CAPACITY 0x25
92 #define POSITION_TO_ELEMENT 0x2B
94 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
95 #define MODE_SENSE_10 0x5A
96 #define REPORT_LUNS 0xA0
101 * SCSI mode page codes
103 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
104 #define MODEPAGE_CD_CAPABILITIES 0x2A
109 #define ATA_SF_ENAB_SATA_SF 0x10
110 #define ATA_SATA_SF_AN 0x05
111 #define ATA_SF_DIS_SATA_SF 0x90
118 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
120 #define DPRINTF(format, arg...)
122 #define WPRINTF(format, arg...) printf(format, ##arg)
124 #define AHCI_PORT_IDENT 20 + 1
127 struct blockif_req io_req;
128 struct ahci_port *io_pr;
129 STAILQ_ENTRY(ahci_ioreq) io_flist;
130 TAILQ_ENTRY(ahci_ioreq) io_blist;
140 struct blockif_ctxt *bctx;
141 struct pci_ahci_softc *pr_sc;
144 char ident[AHCI_PORT_IDENT];
151 uint8_t err_cfis[20];
178 struct ahci_ioreq *ioreq;
180 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
181 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
184 struct ahci_cmd_hdr {
189 uint32_t reserved[4];
192 struct ahci_prdt_entry {
195 #define DBCMASK 0x3fffff
199 struct pci_ahci_softc {
200 struct pci_devinst *asc_pi;
215 struct ahci_port port[MAX_PORTS];
217 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
219 static void ahci_handle_port(struct ahci_port *p);
221 static inline void lba_to_msf(uint8_t *buf, int lba)
224 buf[0] = (lba / 75) / 60;
225 buf[1] = (lba / 75) % 60;
230 * Generate HBA interrupts on global IS register write.
233 ahci_generate_intr(struct pci_ahci_softc *sc, uint32_t mask)
235 struct pci_devinst *pi = sc->asc_pi;
240 /* Update global IS from PxIS/PxIE. */
241 for (i = 0; i < sc->ports; i++) {
246 DPRINTF("%s(%08x) %08x", __func__, mask, sc->is);
248 /* If there is nothing enabled -- clear legacy interrupt and exit. */
249 if (sc->is == 0 || (sc->ghc & AHCI_GHC_IE) == 0) {
251 pci_lintr_deassert(pi);
257 /* If there is anything and no MSI -- assert legacy interrupt. */
258 nmsg = pci_msi_maxmsgnum(pi);
262 pci_lintr_assert(pi);
267 /* Assert respective MSIs for ports that were touched. */
268 for (i = 0; i < nmsg; i++) {
269 if (sc->ports <= nmsg || i < nmsg - 1)
272 mmask = 0xffffffff << i;
273 if (sc->is & mask && mmask & mask)
274 pci_generate_msi(pi, i);
279 * Generate HBA interrupt on specific port event.
282 ahci_port_intr(struct ahci_port *p)
284 struct pci_ahci_softc *sc = p->pr_sc;
285 struct pci_devinst *pi = sc->asc_pi;
288 DPRINTF("%s(%d) %08x/%08x %08x", __func__,
289 p->port, p->is, p->ie, sc->is);
291 /* If there is nothing enabled -- we are done. */
292 if ((p->is & p->ie) == 0)
295 /* In case of non-shared MSI always generate interrupt. */
296 nmsg = pci_msi_maxmsgnum(pi);
297 if (sc->ports <= nmsg || p->port < nmsg - 1) {
298 sc->is |= (1 << p->port);
299 if ((sc->ghc & AHCI_GHC_IE) == 0)
301 pci_generate_msi(pi, p->port);
305 /* If IS for this port is already set -- do nothing. */
306 if (sc->is & (1 << p->port))
309 sc->is |= (1 << p->port);
311 /* If interrupts are enabled -- generate one. */
312 if ((sc->ghc & AHCI_GHC_IE) == 0)
315 pci_generate_msi(pi, nmsg - 1);
316 } else if (!sc->lintr) {
318 pci_lintr_assert(pi);
323 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
325 int offset, len, irq;
327 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
331 case FIS_TYPE_REGD2H:
334 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_DHR : 0;
336 case FIS_TYPE_SETDEVBITS:
339 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_SDB : 0;
341 case FIS_TYPE_PIOSETUP:
344 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_PS : 0;
347 WPRINTF("unsupported fis type %d", ft);
350 if (fis[2] & ATA_S_ERROR) {
352 irq |= AHCI_P_IX_TFE;
354 memcpy(p->rfis + offset, fis, len);
364 ahci_write_fis_piosetup(struct ahci_port *p)
368 memset(fis, 0, sizeof(fis));
369 fis[0] = FIS_TYPE_PIOSETUP;
370 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
374 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
379 error = (tfd >> 8) & 0xff;
381 memset(fis, 0, sizeof(fis));
382 fis[0] = FIS_TYPE_SETDEVBITS;
386 if (fis[2] & ATA_S_ERROR) {
387 p->err_cfis[0] = slot;
388 p->err_cfis[2] = tfd;
389 p->err_cfis[3] = error;
390 memcpy(&p->err_cfis[4], cfis + 4, 16);
392 *(uint32_t *)(fis + 4) = (1 << slot);
393 p->sact &= ~(1 << slot);
397 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
401 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
406 error = (tfd >> 8) & 0xff;
407 memset(fis, 0, sizeof(fis));
408 fis[0] = FIS_TYPE_REGD2H;
422 if (fis[2] & ATA_S_ERROR) {
423 p->err_cfis[0] = 0x80;
424 p->err_cfis[2] = tfd & 0xff;
425 p->err_cfis[3] = error;
426 memcpy(&p->err_cfis[4], cfis + 4, 16);
428 p->ci &= ~(1 << slot);
430 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
434 ahci_write_fis_d2h_ncq(struct ahci_port *p, int slot)
438 p->tfd = ATA_S_READY | ATA_S_DSC;
439 memset(fis, 0, sizeof(fis));
440 fis[0] = FIS_TYPE_REGD2H;
441 fis[1] = 0; /* No interrupt */
442 fis[2] = p->tfd; /* Status */
443 fis[3] = 0; /* No error */
444 p->ci &= ~(1 << slot);
445 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
449 ahci_write_reset_fis_d2h(struct ahci_port *p)
453 memset(fis, 0, sizeof(fis));
454 fis[0] = FIS_TYPE_REGD2H;
462 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
466 ahci_check_stopped(struct ahci_port *p)
469 * If we are no longer processing the command list and nothing
470 * is in-flight, clear the running bit, the current command
471 * slot, the command issue and active bits.
473 if (!(p->cmd & AHCI_P_CMD_ST)) {
474 if (p->pending == 0) {
476 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
485 ahci_port_stop(struct ahci_port *p)
487 struct ahci_ioreq *aior;
492 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
494 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
496 * Try to cancel the outstanding blockif request.
498 error = blockif_cancel(p->bctx, &aior->io_req);
504 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
505 cfis[2] == ATA_READ_FPDMA_QUEUED ||
506 cfis[2] == ATA_SEND_FPDMA_QUEUED)
507 p->sact &= ~(1 << slot); /* NCQ */
509 p->ci &= ~(1 << slot);
512 * This command is now done.
514 p->pending &= ~(1 << slot);
517 * Delete the blockif request from the busy list
519 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
522 * Move the blockif request back to the free list
524 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
527 ahci_check_stopped(p);
531 ahci_port_reset(struct ahci_port *pr)
535 pr->xfermode = ATA_UDMA6;
536 pr->mult_sectors = 128;
539 pr->ssts = ATA_SS_DET_NO_DEVICE;
540 pr->sig = 0xFFFFFFFF;
544 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
545 if (pr->sctl & ATA_SC_SPD_MASK)
546 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
548 pr->ssts |= ATA_SS_SPD_GEN3;
549 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
552 pr->tfd |= ATA_S_READY;
554 pr->sig = PxSIG_ATAPI;
555 ahci_write_reset_fis_d2h(pr);
559 ahci_reset(struct pci_ahci_softc *sc)
563 sc->ghc = AHCI_GHC_AE;
567 pci_lintr_deassert(sc->asc_pi);
571 for (i = 0; i < sc->ports; i++) {
574 sc->port[i].cmd = (AHCI_P_CMD_SUD | AHCI_P_CMD_POD);
575 if (sc->port[i].bctx)
576 sc->port[i].cmd |= AHCI_P_CMD_CPS;
577 sc->port[i].sctl = 0;
578 ahci_port_reset(&sc->port[i]);
583 ata_string(uint8_t *dest, const char *src, int len)
587 for (i = 0; i < len; i++) {
589 dest[i ^ 1] = *src++;
596 atapi_string(uint8_t *dest, const char *src, int len)
600 for (i = 0; i < len; i++) {
609 * Build up the iovec based on the PRDT, 'done' and 'len'.
612 ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
613 struct ahci_prdt_entry *prdt, uint16_t prdtl)
615 struct blockif_req *breq = &aior->io_req;
616 int i, j, skip, todo, left, extra;
619 /* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
621 left = aior->len - aior->done;
623 for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
625 dbcsz = (prdt->dbc & DBCMASK) + 1;
626 /* Skip already done part of the PRDT */
634 breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
635 prdt->dba + skip, dbcsz);
636 breq->br_iov[j].iov_len = dbcsz;
643 /* If we got limited by IOV length, round I/O down to sector size. */
644 if (j == BLOCKIF_IOV_MAX) {
645 extra = todo % blockif_sectsz(p->bctx);
649 if (breq->br_iov[j - 1].iov_len > extra) {
650 breq->br_iov[j - 1].iov_len -= extra;
653 extra -= breq->br_iov[j - 1].iov_len;
659 breq->br_resid = todo;
661 aior->more = (aior->done < aior->len && i < prdtl);
665 ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
667 struct ahci_ioreq *aior;
668 struct blockif_req *breq;
669 struct ahci_prdt_entry *prdt;
670 struct ahci_cmd_hdr *hdr;
673 int err, first, ncq, readop;
675 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
676 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
681 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
682 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
683 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
684 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
687 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
688 cfis[2] == ATA_READ_FPDMA_QUEUED) {
689 lba = ((uint64_t)cfis[10] << 40) |
690 ((uint64_t)cfis[9] << 32) |
691 ((uint64_t)cfis[8] << 24) |
692 ((uint64_t)cfis[6] << 16) |
693 ((uint64_t)cfis[5] << 8) |
695 len = cfis[11] << 8 | cfis[3];
699 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
700 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
701 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
702 lba = ((uint64_t)cfis[10] << 40) |
703 ((uint64_t)cfis[9] << 32) |
704 ((uint64_t)cfis[8] << 24) |
705 ((uint64_t)cfis[6] << 16) |
706 ((uint64_t)cfis[5] << 8) |
708 len = cfis[13] << 8 | cfis[12];
712 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
713 (cfis[5] << 8) | cfis[4];
718 lba *= blockif_sectsz(p->bctx);
719 len *= blockif_sectsz(p->bctx);
721 /* Pull request off free list */
722 aior = STAILQ_FIRST(&p->iofhd);
723 assert(aior != NULL);
724 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
730 aior->readop = readop;
731 breq = &aior->io_req;
732 breq->br_offset = lba + done;
733 ahci_build_iov(p, aior, prdt, hdr->prdtl);
735 /* Mark this command in-flight. */
736 p->pending |= 1 << slot;
738 /* Stuff request onto busy list. */
739 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
742 ahci_write_fis_d2h_ncq(p, slot);
745 err = blockif_read(p->bctx, breq);
747 err = blockif_write(p->bctx, breq);
752 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
754 struct ahci_ioreq *aior;
755 struct blockif_req *breq;
759 * Pull request off free list
761 aior = STAILQ_FIRST(&p->iofhd);
762 assert(aior != NULL);
763 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
769 breq = &aior->io_req;
772 * Mark this command in-flight.
774 p->pending |= 1 << slot;
777 * Stuff request onto busy list
779 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
781 err = blockif_flush(p->bctx, breq);
786 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
789 struct ahci_cmd_hdr *hdr;
790 struct ahci_prdt_entry *prdt;
794 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
797 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
798 for (i = 0; i < hdr->prdtl && len; i++) {
803 dbcsz = (prdt->dbc & DBCMASK) + 1;
804 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
805 sublen = MIN(len, dbcsz);
806 memcpy(to, ptr, sublen);
814 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
816 struct ahci_ioreq *aior;
817 struct blockif_req *breq;
825 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
826 len = (uint16_t)cfis[13] << 8 | cfis[12];
829 } else { /* ATA_SEND_FPDMA_QUEUED */
830 len = (uint16_t)cfis[11] << 8 | cfis[3];
834 read_prdt(p, slot, cfis, buf, sizeof(buf));
838 elba = ((uint64_t)entry[5] << 40) |
839 ((uint64_t)entry[4] << 32) |
840 ((uint64_t)entry[3] << 24) |
841 ((uint64_t)entry[2] << 16) |
842 ((uint64_t)entry[1] << 8) |
844 elen = (uint16_t)entry[7] << 8 | entry[6];
850 ahci_write_fis_d2h_ncq(p, slot);
851 ahci_write_fis_sdb(p, slot, cfis,
852 ATA_S_READY | ATA_S_DSC);
854 ahci_write_fis_d2h(p, slot, cfis,
855 ATA_S_READY | ATA_S_DSC);
857 p->pending &= ~(1 << slot);
858 ahci_check_stopped(p);
867 * Pull request off free list
869 aior = STAILQ_FIRST(&p->iofhd);
870 assert(aior != NULL);
871 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
876 aior->more = (len != done);
878 breq = &aior->io_req;
879 breq->br_offset = elba * blockif_sectsz(p->bctx);
880 breq->br_resid = elen * blockif_sectsz(p->bctx);
883 * Mark this command in-flight.
885 p->pending |= 1 << slot;
888 * Stuff request onto busy list
890 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
893 ahci_write_fis_d2h_ncq(p, slot);
895 err = blockif_delete(p->bctx, breq);
900 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
903 struct ahci_cmd_hdr *hdr;
904 struct ahci_prdt_entry *prdt;
908 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
911 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
912 for (i = 0; i < hdr->prdtl && len; i++) {
917 dbcsz = (prdt->dbc & DBCMASK) + 1;
918 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
919 sublen = MIN(len, dbcsz);
920 memcpy(ptr, from, sublen);
925 hdr->prdbc = size - len;
929 ahci_checksum(uint8_t *buf, int size)
934 for (i = 0; i < size - 1; i++)
936 buf[size - 1] = 0x100 - sum;
940 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
942 struct ahci_cmd_hdr *hdr;
944 uint8_t *buf8 = (uint8_t *)buf;
945 uint16_t *buf16 = (uint16_t *)buf;
947 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
948 if (p->atapi || hdr->prdtl == 0 || cfis[5] != 0 ||
949 cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
950 ahci_write_fis_d2h(p, slot, cfis,
951 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
955 memset(buf, 0, sizeof(buf));
956 if (cfis[4] == 0x00) { /* Log directory */
957 buf16[0x00] = 1; /* Version -- 1 */
958 buf16[0x10] = 1; /* NCQ Command Error Log -- 1 page */
959 buf16[0x13] = 1; /* SATA NCQ Send and Receive Log -- 1 page */
960 } else if (cfis[4] == 0x10) { /* NCQ Command Error Log */
961 memcpy(buf8, p->err_cfis, sizeof(p->err_cfis));
962 ahci_checksum(buf8, sizeof(buf));
963 } else if (cfis[4] == 0x13) { /* SATA NCQ Send and Receive Log */
964 if (blockif_candelete(p->bctx) && !blockif_is_ro(p->bctx)) {
965 buf[0x00] = 1; /* SFQ DSM supported */
966 buf[0x01] = 1; /* SFQ DSM TRIM supported */
969 ahci_write_fis_d2h(p, slot, cfis,
970 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
974 if (cfis[2] == ATA_READ_LOG_EXT)
975 ahci_write_fis_piosetup(p);
976 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
977 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
981 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
983 struct ahci_cmd_hdr *hdr;
985 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
986 if (p->atapi || hdr->prdtl == 0) {
987 ahci_write_fis_d2h(p, slot, cfis,
988 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
992 int sectsz, psectsz, psectoff, candelete, ro;
996 ro = blockif_is_ro(p->bctx);
997 candelete = blockif_candelete(p->bctx);
998 sectsz = blockif_sectsz(p->bctx);
999 sectors = blockif_size(p->bctx) / sectsz;
1000 blockif_chs(p->bctx, &cyl, &heads, &sech);
1001 blockif_psectsz(p->bctx, &psectsz, &psectoff);
1002 memset(buf, 0, sizeof(buf));
1007 ata_string((uint8_t *)(buf+10), p->ident, 20);
1008 ata_string((uint8_t *)(buf+23), "001", 8);
1009 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
1010 buf[47] = (0x8000 | 128);
1012 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
1013 buf[50] = (1 << 14);
1014 buf[53] = (1 << 1 | 1 << 2);
1015 if (p->mult_sectors)
1016 buf[59] = (0x100 | p->mult_sectors);
1017 if (sectors <= 0x0fffffff) {
1019 buf[61] = (sectors >> 16);
1025 if (p->xfermode & ATA_WDMA0)
1026 buf[63] |= (1 << ((p->xfermode & 7) + 8));
1034 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
1036 buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
1037 (p->ssts & ATA_SS_SPD_MASK) >> 3);
1040 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
1041 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1042 buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1043 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
1044 buf[84] = (1 << 14);
1045 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
1046 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1047 buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1048 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
1049 buf[87] = (1 << 14);
1051 if (p->xfermode & ATA_UDMA0)
1052 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1054 buf[101] = (sectors >> 16);
1055 buf[102] = (sectors >> 32);
1056 buf[103] = (sectors >> 48);
1057 if (candelete && !ro) {
1058 buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
1060 buf[169] = ATA_SUPPORT_DSM_TRIM;
1064 if (psectsz > sectsz) {
1066 buf[106] |= ffsl(psectsz / sectsz) - 1;
1067 buf[209] |= (psectoff / sectsz);
1071 buf[117] = sectsz / 2;
1072 buf[118] = ((sectsz / 2) >> 16);
1074 buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1075 buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1078 ahci_checksum((uint8_t *)buf, sizeof(buf));
1079 ahci_write_fis_piosetup(p);
1080 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1081 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1086 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
1089 ahci_write_fis_d2h(p, slot, cfis,
1090 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1094 memset(buf, 0, sizeof(buf));
1095 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
1096 ata_string((uint8_t *)(buf+10), p->ident, 20);
1097 ata_string((uint8_t *)(buf+23), "001", 8);
1098 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
1099 buf[49] = (1 << 9 | 1 << 8);
1100 buf[50] = (1 << 14 | 1);
1101 buf[53] = (1 << 2 | 1 << 1);
1104 if (p->xfermode & ATA_WDMA0)
1105 buf[63] |= (1 << ((p->xfermode & 7) + 8));
1111 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
1112 buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
1115 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1116 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1117 buf[83] = (1 << 14);
1118 buf[84] = (1 << 14);
1119 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1120 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1121 buf[87] = (1 << 14);
1123 if (p->xfermode & ATA_UDMA0)
1124 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1127 ahci_checksum((uint8_t *)buf, sizeof(buf));
1128 ahci_write_fis_piosetup(p);
1129 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1130 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1135 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
1144 if (acmd[1] & 1) { /* VPD */
1145 if (acmd[2] == 0) { /* Supported VPD pages */
1153 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1155 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1156 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1157 ahci_write_fis_d2h(p, slot, cfis, tfd);
1169 atapi_string(buf + 8, "BHYVE", 8);
1170 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1171 atapi_string(buf + 32, "001", 4);
1177 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1178 write_prdt(p, slot, cfis, buf, len);
1179 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1183 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1188 sectors = blockif_size(p->bctx) / 2048;
1189 be32enc(buf, sectors - 1);
1190 be32enc(buf + 4, 2048);
1191 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1192 write_prdt(p, slot, cfis, buf, sizeof(buf));
1193 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1197 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1205 len = be16dec(acmd + 7);
1206 format = acmd[9] >> 6;
1212 uint8_t start_track, buf[20], *bp;
1214 msf = (acmd[1] >> 1) & 1;
1215 start_track = acmd[6];
1216 if (start_track > 1 && start_track != 0xaa) {
1218 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1220 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1221 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1222 ahci_write_fis_d2h(p, slot, cfis, tfd);
1228 if (start_track <= 1) {
1248 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1252 lba_to_msf(bp, sectors);
1255 be32enc(bp, sectors);
1259 be16enc(buf, size - 2);
1262 write_prdt(p, slot, cfis, buf, len);
1263 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1264 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1271 memset(buf, 0, sizeof(buf));
1275 if (len > sizeof(buf))
1277 write_prdt(p, slot, cfis, buf, len);
1278 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1279 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1286 uint8_t *bp, buf[50];
1288 msf = (acmd[1] >> 1) & 1;
1324 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1328 lba_to_msf(bp, sectors);
1331 be32enc(bp, sectors);
1354 be16enc(buf, size - 2);
1357 write_prdt(p, slot, cfis, buf, len);
1358 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1359 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1366 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1368 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1369 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1370 ahci_write_fis_d2h(p, slot, cfis, tfd);
1377 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1381 memset(buf, 0, sizeof(buf));
1384 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1385 write_prdt(p, slot, cfis, buf, sizeof(buf));
1386 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1390 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
1392 struct ahci_ioreq *aior;
1393 struct ahci_cmd_hdr *hdr;
1394 struct ahci_prdt_entry *prdt;
1395 struct blockif_req *breq;
1402 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1403 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1405 lba = be32dec(acmd + 2);
1406 if (acmd[0] == READ_10)
1407 len = be16dec(acmd + 7);
1409 len = be32dec(acmd + 6);
1411 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1412 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1418 * Pull request off free list
1420 aior = STAILQ_FIRST(&p->iofhd);
1421 assert(aior != NULL);
1422 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1428 breq = &aior->io_req;
1429 breq->br_offset = lba + done;
1430 ahci_build_iov(p, aior, prdt, hdr->prdtl);
1432 /* Mark this command in-flight. */
1433 p->pending |= 1 << slot;
1435 /* Stuff request onto busy list. */
1436 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1438 err = blockif_read(p->bctx, breq);
1443 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1451 if (len > sizeof(buf))
1453 memset(buf, 0, len);
1454 buf[0] = 0x70 | (1 << 7);
1455 buf[2] = p->sense_key;
1458 write_prdt(p, slot, cfis, buf, len);
1459 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1460 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1464 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1466 uint8_t *acmd = cfis + 0x40;
1469 switch (acmd[4] & 3) {
1473 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1474 tfd = ATA_S_READY | ATA_S_DSC;
1477 /* TODO eject media */
1478 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1479 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1481 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1484 ahci_write_fis_d2h(p, slot, cfis, tfd);
1488 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1496 len = be16dec(acmd + 7);
1498 code = acmd[2] & 0x3f;
1503 case MODEPAGE_RW_ERROR_RECOVERY:
1507 if (len > sizeof(buf))
1510 memset(buf, 0, sizeof(buf));
1511 be16enc(buf, 16 - 2);
1516 write_prdt(p, slot, cfis, buf, len);
1517 tfd = ATA_S_READY | ATA_S_DSC;
1520 case MODEPAGE_CD_CAPABILITIES:
1524 if (len > sizeof(buf))
1527 memset(buf, 0, sizeof(buf));
1528 be16enc(buf, 30 - 2);
1534 be16enc(&buf[18], 2);
1535 be16enc(&buf[20], 512);
1536 write_prdt(p, slot, cfis, buf, len);
1537 tfd = ATA_S_READY | ATA_S_DSC;
1546 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1548 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1553 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1555 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1558 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1559 ahci_write_fis_d2h(p, slot, cfis, tfd);
1563 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1571 /* we don't support asynchronous operation */
1572 if (!(acmd[1] & 1)) {
1573 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1575 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1580 len = be16dec(acmd + 7);
1581 if (len > sizeof(buf))
1584 memset(buf, 0, sizeof(buf));
1585 be16enc(buf, 8 - 2);
1589 write_prdt(p, slot, cfis, buf, len);
1590 tfd = ATA_S_READY | ATA_S_DSC;
1592 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1593 ahci_write_fis_d2h(p, slot, cfis, tfd);
1597 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1607 for (i = 0; i < 16; i++)
1608 DPRINTF("%02x ", acmd[i]);
1614 case TEST_UNIT_READY:
1615 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1616 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1619 atapi_inquiry(p, slot, cfis);
1622 atapi_read_capacity(p, slot, cfis);
1626 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1627 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1630 atapi_read_toc(p, slot, cfis);
1633 atapi_report_luns(p, slot, cfis);
1637 atapi_read(p, slot, cfis, 0);
1640 atapi_request_sense(p, slot, cfis);
1642 case START_STOP_UNIT:
1643 atapi_start_stop_unit(p, slot, cfis);
1646 atapi_mode_sense(p, slot, cfis);
1648 case GET_EVENT_STATUS_NOTIFICATION:
1649 atapi_get_event_status_notification(p, slot, cfis);
1652 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1653 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1655 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1656 ATA_S_READY | ATA_S_ERROR);
1662 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1665 p->tfd |= ATA_S_BUSY;
1667 case ATA_ATA_IDENTIFY:
1668 handle_identify(p, slot, cfis);
1670 case ATA_SETFEATURES:
1673 case ATA_SF_ENAB_SATA_SF:
1675 case ATA_SATA_SF_AN:
1676 p->tfd = ATA_S_DSC | ATA_S_READY;
1679 p->tfd = ATA_S_ERROR | ATA_S_READY;
1680 p->tfd |= (ATA_ERROR_ABORT << 8);
1684 case ATA_SF_ENAB_WCACHE:
1685 case ATA_SF_DIS_WCACHE:
1686 case ATA_SF_ENAB_RCACHE:
1687 case ATA_SF_DIS_RCACHE:
1688 p->tfd = ATA_S_DSC | ATA_S_READY;
1690 case ATA_SF_SETXFER:
1692 switch (cfis[12] & 0xf8) {
1698 p->xfermode = (cfis[12] & 0x7);
1701 p->tfd = ATA_S_DSC | ATA_S_READY;
1705 p->tfd = ATA_S_ERROR | ATA_S_READY;
1706 p->tfd |= (ATA_ERROR_ABORT << 8);
1709 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1713 if (cfis[12] != 0 &&
1714 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1715 p->tfd = ATA_S_ERROR | ATA_S_READY;
1716 p->tfd |= (ATA_ERROR_ABORT << 8);
1718 p->mult_sectors = cfis[12];
1719 p->tfd = ATA_S_DSC | ATA_S_READY;
1721 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1729 case ATA_READ_MUL48:
1730 case ATA_WRITE_MUL48:
1733 case ATA_READ_DMA48:
1734 case ATA_WRITE_DMA48:
1735 case ATA_READ_FPDMA_QUEUED:
1736 case ATA_WRITE_FPDMA_QUEUED:
1737 ahci_handle_rw(p, slot, cfis, 0);
1739 case ATA_FLUSHCACHE:
1740 case ATA_FLUSHCACHE48:
1741 ahci_handle_flush(p, slot, cfis);
1743 case ATA_DATA_SET_MANAGEMENT:
1744 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1745 cfis[13] == 0 && cfis[12] == 1) {
1746 ahci_handle_dsm_trim(p, slot, cfis, 0);
1749 ahci_write_fis_d2h(p, slot, cfis,
1750 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1752 case ATA_SEND_FPDMA_QUEUED:
1753 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1754 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1755 cfis[11] == 0 && cfis[3] == 1) {
1756 ahci_handle_dsm_trim(p, slot, cfis, 0);
1759 ahci_write_fis_d2h(p, slot, cfis,
1760 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1762 case ATA_READ_LOG_EXT:
1763 case ATA_READ_LOG_DMA_EXT:
1764 ahci_handle_read_log(p, slot, cfis);
1766 case ATA_SECURITY_FREEZE_LOCK:
1769 ahci_write_fis_d2h(p, slot, cfis,
1770 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1772 case ATA_CHECK_POWER_MODE:
1773 cfis[12] = 0xff; /* always on */
1774 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1776 case ATA_STANDBY_CMD:
1777 case ATA_STANDBY_IMMEDIATE:
1779 case ATA_IDLE_IMMEDIATE:
1781 case ATA_READ_VERIFY:
1782 case ATA_READ_VERIFY48:
1783 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1785 case ATA_ATAPI_IDENTIFY:
1786 handle_atapi_identify(p, slot, cfis);
1788 case ATA_PACKET_CMD:
1790 ahci_write_fis_d2h(p, slot, cfis,
1791 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1793 handle_packet_cmd(p, slot, cfis);
1796 WPRINTF("Unsupported cmd:%02x", cfis[2]);
1797 ahci_write_fis_d2h(p, slot, cfis,
1798 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1804 ahci_handle_slot(struct ahci_port *p, int slot)
1806 struct ahci_cmd_hdr *hdr;
1808 struct ahci_prdt_entry *prdt;
1810 struct pci_ahci_softc *sc;
1817 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1819 cfl = (hdr->flags & 0x1f) * 4;
1821 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1822 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1824 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1827 for (i = 0; i < cfl; i++) {
1830 DPRINTF("%02x ", cfis[i]);
1834 for (i = 0; i < hdr->prdtl; i++) {
1835 DPRINTF("%d@%08"PRIx64"", prdt->dbc & 0x3fffff, prdt->dba);
1840 if (cfis[0] != FIS_TYPE_REGH2D) {
1841 WPRINTF("Not a H2D FIS:%02x", cfis[0]);
1845 if (cfis[1] & 0x80) {
1846 ahci_handle_cmd(p, slot, cfis);
1848 if (cfis[15] & (1 << 2))
1850 else if (p->reset) {
1854 p->ci &= ~(1 << slot);
1859 ahci_handle_port(struct ahci_port *p)
1862 if (!(p->cmd & AHCI_P_CMD_ST))
1866 * Search for any new commands to issue ignoring those that
1867 * are already in-flight. Stop if device is busy or in error.
1869 for (; (p->ci & ~p->pending) != 0; p->ccs = ((p->ccs + 1) & 31)) {
1870 if ((p->tfd & (ATA_S_BUSY | ATA_S_DRQ)) != 0)
1872 if (p->waitforclear)
1874 if ((p->ci & ~p->pending & (1 << p->ccs)) != 0) {
1875 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1876 p->cmd |= p->ccs << AHCI_P_CMD_CCS_SHIFT;
1877 ahci_handle_slot(p, p->ccs);
1883 * blockif callback routine - this runs in the context of the blockif
1884 * i/o thread, so the mutex needs to be acquired.
1887 ata_ioreq_cb(struct blockif_req *br, int err)
1889 struct ahci_cmd_hdr *hdr;
1890 struct ahci_ioreq *aior;
1891 struct ahci_port *p;
1892 struct pci_ahci_softc *sc;
1897 DPRINTF("%s %d", __func__, err);
1900 aior = br->br_param;
1905 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1907 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1908 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1909 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1911 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1912 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1913 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1916 pthread_mutex_lock(&sc->mtx);
1919 * Delete the blockif request from the busy list
1921 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1924 * Move the blockif request back to the free list
1926 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1929 hdr->prdbc = aior->done;
1931 if (!err && aior->more) {
1933 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1935 ahci_handle_rw(p, slot, cfis, aior->done);
1940 tfd = ATA_S_READY | ATA_S_DSC;
1942 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1944 ahci_write_fis_sdb(p, slot, cfis, tfd);
1946 ahci_write_fis_d2h(p, slot, cfis, tfd);
1949 * This command is now complete.
1951 p->pending &= ~(1 << slot);
1953 ahci_check_stopped(p);
1954 ahci_handle_port(p);
1956 pthread_mutex_unlock(&sc->mtx);
1957 DPRINTF("%s exit", __func__);
1961 atapi_ioreq_cb(struct blockif_req *br, int err)
1963 struct ahci_cmd_hdr *hdr;
1964 struct ahci_ioreq *aior;
1965 struct ahci_port *p;
1966 struct pci_ahci_softc *sc;
1971 DPRINTF("%s %d", __func__, err);
1973 aior = br->br_param;
1978 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1980 pthread_mutex_lock(&sc->mtx);
1983 * Delete the blockif request from the busy list
1985 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1988 * Move the blockif request back to the free list
1990 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1993 hdr->prdbc = aior->done;
1995 if (!err && aior->more) {
1996 atapi_read(p, slot, cfis, aior->done);
2001 tfd = ATA_S_READY | ATA_S_DSC;
2003 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
2005 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
2007 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
2008 ahci_write_fis_d2h(p, slot, cfis, tfd);
2011 * This command is now complete.
2013 p->pending &= ~(1 << slot);
2015 ahci_check_stopped(p);
2016 ahci_handle_port(p);
2018 pthread_mutex_unlock(&sc->mtx);
2019 DPRINTF("%s exit", __func__);
2023 pci_ahci_ioreq_init(struct ahci_port *pr)
2025 struct ahci_ioreq *vr;
2028 pr->ioqsz = blockif_queuesz(pr->bctx);
2029 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
2030 STAILQ_INIT(&pr->iofhd);
2033 * Add all i/o request entries to the free queue
2035 for (i = 0; i < pr->ioqsz; i++) {
2039 vr->io_req.br_callback = ata_ioreq_cb;
2041 vr->io_req.br_callback = atapi_ioreq_cb;
2042 vr->io_req.br_param = vr;
2043 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
2046 TAILQ_INIT(&pr->iobhd);
2050 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2052 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2053 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2054 struct ahci_port *p = &sc->port[port];
2056 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"",
2057 port, offset, value);
2077 p->ie = value & 0xFDC000FF;
2082 p->cmd &= ~(AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2083 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2084 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2085 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK);
2086 p->cmd |= (AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2087 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2088 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2089 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK) & value;
2091 if (!(value & AHCI_P_CMD_ST)) {
2096 p->cmd |= AHCI_P_CMD_CR;
2097 clb = (uint64_t)p->clbu << 32 | p->clb;
2098 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
2099 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
2102 if (value & AHCI_P_CMD_FRE) {
2105 p->cmd |= AHCI_P_CMD_FR;
2106 fb = (uint64_t)p->fbu << 32 | p->fb;
2107 /* we don't support FBSCP, so rfis size is 256Bytes */
2108 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
2110 p->cmd &= ~AHCI_P_CMD_FR;
2113 if (value & AHCI_P_CMD_CLO) {
2114 p->tfd &= ~(ATA_S_BUSY | ATA_S_DRQ);
2115 p->cmd &= ~AHCI_P_CMD_CLO;
2118 if (value & AHCI_P_CMD_ICC_MASK) {
2119 p->cmd &= ~AHCI_P_CMD_ICC_MASK;
2122 ahci_handle_port(p);
2128 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"", offset);
2132 if (!(p->cmd & AHCI_P_CMD_ST)) {
2133 if (value & ATA_SC_DET_RESET)
2145 ahci_handle_port(p);
2155 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2157 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"",
2165 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"", offset);
2168 if (value & AHCI_GHC_HR) {
2172 if (value & AHCI_GHC_IE)
2173 sc->ghc |= AHCI_GHC_IE;
2175 sc->ghc &= ~AHCI_GHC_IE;
2176 ahci_generate_intr(sc, 0xffffffff);
2180 ahci_generate_intr(sc, value);
2188 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2189 int baridx, uint64_t offset, int size, uint64_t value)
2191 struct pci_ahci_softc *sc = pi->pi_arg;
2193 assert(baridx == 5);
2194 assert((offset % 4) == 0 && size == 4);
2196 pthread_mutex_lock(&sc->mtx);
2198 if (offset < AHCI_OFFSET)
2199 pci_ahci_host_write(sc, offset, value);
2200 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2201 pci_ahci_port_write(sc, offset, value);
2203 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"", offset);
2205 pthread_mutex_unlock(&sc->mtx);
2209 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2225 uint32_t *p = &sc->cap;
2226 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2234 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x",
2241 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2244 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2245 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2265 uint32_t *p= &sc->port[port].clb;
2266 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2275 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x",
2276 port, offset, value);
2282 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2283 uint64_t regoff, int size)
2285 struct pci_ahci_softc *sc = pi->pi_arg;
2289 assert(baridx == 5);
2290 assert(size == 1 || size == 2 || size == 4);
2291 assert((regoff & (size - 1)) == 0);
2293 pthread_mutex_lock(&sc->mtx);
2295 offset = regoff & ~0x3; /* round down to a multiple of 4 bytes */
2296 if (offset < AHCI_OFFSET)
2297 value = pci_ahci_host_read(sc, offset);
2298 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2299 value = pci_ahci_port_read(sc, offset);
2302 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"",
2305 value >>= 8 * (regoff & 0x3);
2307 pthread_mutex_unlock(&sc->mtx);
2313 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
2315 char bident[sizeof("XX:XX:XX")];
2316 struct blockif_ctxt *bctxt;
2317 struct pci_ahci_softc *sc;
2326 dbg = fopen("/tmp/log", "w+");
2329 sc = calloc(1, sizeof(struct pci_ahci_softc));
2332 pthread_mutex_init(&sc->mtx, NULL);
2337 for (p = 0; p < MAX_PORTS && opts != NULL; p++, opts = next) {
2338 /* Identify and cut off type of present port. */
2339 if (strncmp(opts, "hd:", 3) == 0) {
2342 } else if (strncmp(opts, "cd:", 3) == 0) {
2347 /* Find and cut off the next port options. */
2348 next = strstr(opts, ",hd:");
2349 next2 = strstr(opts, ",cd:");
2350 if (next == NULL || (next2 != NULL && next2 < next))
2361 * Attempt to open the backing image. Use the PCI slot/func
2362 * and the port number for the identifier string.
2364 snprintf(bident, sizeof(bident), "%d:%d:%d", pi->pi_slot,
2366 bctxt = blockif_open(opts, bident);
2367 if (bctxt == NULL) {
2372 sc->port[p].bctx = bctxt;
2373 sc->port[p].pr_sc = sc;
2374 sc->port[p].port = p;
2375 sc->port[p].atapi = atapi;
2378 * Create an identifier for the backing file.
2379 * Use parts of the md5 sum of the filename
2382 MD5Update(&mdctx, opts, strlen(opts));
2383 MD5Final(digest, &mdctx);
2384 snprintf(sc->port[p].ident, AHCI_PORT_IDENT,
2385 "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2386 digest[0], digest[1], digest[2], digest[3], digest[4],
2390 * Allocate blockif request structures and add them
2393 pci_ahci_ioreq_init(&sc->port[p]);
2396 if (sc->port[p].ioqsz < slots)
2397 slots = sc->port[p].ioqsz;
2401 /* Intel ICH8 AHCI */
2403 if (sc->ports < DEF_PORTS)
2404 sc->ports = DEF_PORTS;
2405 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2406 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2407 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2408 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2409 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2412 sc->cap2 = AHCI_CAP2_APST;
2415 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2416 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2417 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2418 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2419 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2420 p = MIN(sc->ports, 16);
2421 p = flsl(p) - ((p & (p - 1)) ? 0 : 1);
2422 pci_emul_add_msicap(pi, 1 << p);
2423 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2424 AHCI_OFFSET + sc->ports * AHCI_STEP);
2426 pci_lintr_request(pi);
2430 for (p = 0; p < sc->ports; p++) {
2431 if (sc->port[p].bctx != NULL)
2432 blockif_close(sc->port[p].bctx);
2441 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2444 return (pci_ahci_init(ctx, pi, opts, 0));
2448 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2451 return (pci_ahci_init(ctx, pi, opts, 1));
2454 #ifdef BHYVE_SNAPSHOT
2456 pci_ahci_snapshot_save_queues(struct ahci_port *port,
2457 struct vm_snapshot_meta *meta)
2461 struct ahci_ioreq *ioreq;
2463 STAILQ_FOREACH(ioreq, &port->iofhd, io_flist) {
2464 idx = ((void *) ioreq - (void *) port->ioreq) / sizeof(*ioreq);
2465 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2469 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2471 TAILQ_FOREACH(ioreq, &port->iobhd, io_blist) {
2472 idx = ((void *) ioreq - (void *) port->ioreq) / sizeof(*ioreq);
2473 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2476 * Snapshot only the busy requests; other requests are
2479 ret = blockif_snapshot_req(&ioreq->io_req, meta);
2481 fprintf(stderr, "%s: failed to snapshot req\r\n",
2488 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2495 pci_ahci_snapshot_restore_queues(struct ahci_port *port,
2496 struct vm_snapshot_meta *meta)
2500 struct ahci_ioreq *ioreq;
2502 /* Empty the free queue before restoring. */
2503 while (!STAILQ_EMPTY(&port->iofhd))
2504 STAILQ_REMOVE_HEAD(&port->iofhd, io_flist);
2506 /* Restore the free queue. */
2508 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2512 STAILQ_INSERT_TAIL(&port->iofhd, &port->ioreq[idx], io_flist);
2515 /* Restore the busy queue. */
2517 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2521 ioreq = &port->ioreq[idx];
2522 TAILQ_INSERT_TAIL(&port->iobhd, ioreq, io_blist);
2525 * Restore only the busy requests; other requests are
2528 ret = blockif_snapshot_req(&ioreq->io_req, meta);
2530 fprintf(stderr, "%s: failed to restore request\r\n",
2535 /* Re-enqueue the requests in the block interface. */
2537 ret = blockif_read(port->bctx, &ioreq->io_req);
2539 ret = blockif_write(port->bctx, &ioreq->io_req);
2543 "%s: failed to re-enqueue request\r\n",
2554 pci_ahci_snapshot(struct vm_snapshot_meta *meta)
2558 struct pci_devinst *pi;
2559 struct pci_ahci_softc *sc;
2560 struct ahci_port *port;
2561 struct ahci_cmd_hdr *hdr;
2562 struct ahci_ioreq *ioreq;
2564 pi = meta->dev_data;
2567 /* TODO: add mtx lock/unlock */
2569 SNAPSHOT_VAR_OR_LEAVE(sc->ports, meta, ret, done);
2570 SNAPSHOT_VAR_OR_LEAVE(sc->cap, meta, ret, done);
2571 SNAPSHOT_VAR_OR_LEAVE(sc->ghc, meta, ret, done);
2572 SNAPSHOT_VAR_OR_LEAVE(sc->is, meta, ret, done);
2573 SNAPSHOT_VAR_OR_LEAVE(sc->pi, meta, ret, done);
2574 SNAPSHOT_VAR_OR_LEAVE(sc->vs, meta, ret, done);
2575 SNAPSHOT_VAR_OR_LEAVE(sc->ccc_ctl, meta, ret, done);
2576 SNAPSHOT_VAR_OR_LEAVE(sc->ccc_pts, meta, ret, done);
2577 SNAPSHOT_VAR_OR_LEAVE(sc->em_loc, meta, ret, done);
2578 SNAPSHOT_VAR_OR_LEAVE(sc->em_ctl, meta, ret, done);
2579 SNAPSHOT_VAR_OR_LEAVE(sc->cap2, meta, ret, done);
2580 SNAPSHOT_VAR_OR_LEAVE(sc->bohc, meta, ret, done);
2581 SNAPSHOT_VAR_OR_LEAVE(sc->lintr, meta, ret, done);
2583 for (i = 0; i < MAX_PORTS; i++) {
2584 port = &sc->port[i];
2586 if (meta->op == VM_SNAPSHOT_SAVE)
2589 SNAPSHOT_VAR_OR_LEAVE(bctx, meta, ret, done);
2590 SNAPSHOT_VAR_OR_LEAVE(port->port, meta, ret, done);
2592 /* Mostly for restore; save is ensured by the lines above. */
2593 if (((bctx == NULL) && (port->bctx != NULL)) ||
2594 ((bctx != NULL) && (port->bctx == NULL))) {
2595 fprintf(stderr, "%s: ports not matching\r\n", __func__);
2600 if (port->bctx == NULL)
2603 if (port->port != i) {
2604 fprintf(stderr, "%s: ports not matching: "
2605 "actual: %d expected: %d\r\n",
2606 __func__, port->port, i);
2611 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(port->cmd_lst,
2612 AHCI_CL_SIZE * AHCI_MAX_SLOTS, false, meta, ret, done);
2613 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(port->rfis, 256, false, meta,
2616 SNAPSHOT_VAR_OR_LEAVE(port->ident, meta, ret, done);
2617 SNAPSHOT_VAR_OR_LEAVE(port->atapi, meta, ret, done);
2618 SNAPSHOT_VAR_OR_LEAVE(port->reset, meta, ret, done);
2619 SNAPSHOT_VAR_OR_LEAVE(port->waitforclear, meta, ret, done);
2620 SNAPSHOT_VAR_OR_LEAVE(port->mult_sectors, meta, ret, done);
2621 SNAPSHOT_VAR_OR_LEAVE(port->xfermode, meta, ret, done);
2622 SNAPSHOT_VAR_OR_LEAVE(port->err_cfis, meta, ret, done);
2623 SNAPSHOT_VAR_OR_LEAVE(port->sense_key, meta, ret, done);
2624 SNAPSHOT_VAR_OR_LEAVE(port->asc, meta, ret, done);
2625 SNAPSHOT_VAR_OR_LEAVE(port->ccs, meta, ret, done);
2626 SNAPSHOT_VAR_OR_LEAVE(port->pending, meta, ret, done);
2628 SNAPSHOT_VAR_OR_LEAVE(port->clb, meta, ret, done);
2629 SNAPSHOT_VAR_OR_LEAVE(port->clbu, meta, ret, done);
2630 SNAPSHOT_VAR_OR_LEAVE(port->fb, meta, ret, done);
2631 SNAPSHOT_VAR_OR_LEAVE(port->fbu, meta, ret, done);
2632 SNAPSHOT_VAR_OR_LEAVE(port->ie, meta, ret, done);
2633 SNAPSHOT_VAR_OR_LEAVE(port->cmd, meta, ret, done);
2634 SNAPSHOT_VAR_OR_LEAVE(port->unused0, meta, ret, done);
2635 SNAPSHOT_VAR_OR_LEAVE(port->tfd, meta, ret, done);
2636 SNAPSHOT_VAR_OR_LEAVE(port->sig, meta, ret, done);
2637 SNAPSHOT_VAR_OR_LEAVE(port->ssts, meta, ret, done);
2638 SNAPSHOT_VAR_OR_LEAVE(port->sctl, meta, ret, done);
2639 SNAPSHOT_VAR_OR_LEAVE(port->serr, meta, ret, done);
2640 SNAPSHOT_VAR_OR_LEAVE(port->sact, meta, ret, done);
2641 SNAPSHOT_VAR_OR_LEAVE(port->ci, meta, ret, done);
2642 SNAPSHOT_VAR_OR_LEAVE(port->sntf, meta, ret, done);
2643 SNAPSHOT_VAR_OR_LEAVE(port->fbs, meta, ret, done);
2644 SNAPSHOT_VAR_OR_LEAVE(port->ioqsz, meta, ret, done);
2646 for (j = 0; j < port->ioqsz; j++) {
2647 ioreq = &port->ioreq[j];
2649 /* blockif_req snapshot done only for busy requests. */
2650 hdr = (struct ahci_cmd_hdr *)(port->cmd_lst +
2651 ioreq->slot * AHCI_CL_SIZE);
2652 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(ioreq->cfis,
2653 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry),
2654 false, meta, ret, done);
2656 SNAPSHOT_VAR_OR_LEAVE(ioreq->len, meta, ret, done);
2657 SNAPSHOT_VAR_OR_LEAVE(ioreq->done, meta, ret, done);
2658 SNAPSHOT_VAR_OR_LEAVE(ioreq->slot, meta, ret, done);
2659 SNAPSHOT_VAR_OR_LEAVE(ioreq->more, meta, ret, done);
2660 SNAPSHOT_VAR_OR_LEAVE(ioreq->readop, meta, ret, done);
2663 /* Perform save / restore specific operations. */
2664 if (meta->op == VM_SNAPSHOT_SAVE) {
2665 ret = pci_ahci_snapshot_save_queues(port, meta);
2668 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
2669 ret = pci_ahci_snapshot_restore_queues(port, meta);
2677 ret = blockif_snapshot(port->bctx, meta);
2679 fprintf(stderr, "%s: failed to restore blockif\r\n",
2690 pci_ahci_pause(struct vmctx *ctx, struct pci_devinst *pi)
2692 struct pci_ahci_softc *sc;
2693 struct blockif_ctxt *bctxt;
2698 for (i = 0; i < MAX_PORTS; i++) {
2699 bctxt = sc->port[i].bctx;
2703 blockif_pause(bctxt);
2710 pci_ahci_resume(struct vmctx *ctx, struct pci_devinst *pi)
2712 struct pci_ahci_softc *sc;
2713 struct blockif_ctxt *bctxt;
2718 for (i = 0; i < MAX_PORTS; i++) {
2719 bctxt = sc->port[i].bctx;
2723 blockif_resume(bctxt);
2731 * Use separate emulation names to distinguish drive and atapi devices
2733 struct pci_devemu pci_de_ahci = {
2735 .pe_init = pci_ahci_hd_init,
2736 .pe_barwrite = pci_ahci_write,
2737 .pe_barread = pci_ahci_read,
2738 #ifdef BHYVE_SNAPSHOT
2739 .pe_snapshot = pci_ahci_snapshot,
2740 .pe_pause = pci_ahci_pause,
2741 .pe_resume = pci_ahci_resume,
2744 PCI_EMUL_SET(pci_de_ahci);
2746 struct pci_devemu pci_de_ahci_hd = {
2747 .pe_emu = "ahci-hd",
2748 .pe_init = pci_ahci_hd_init,
2749 .pe_barwrite = pci_ahci_write,
2750 .pe_barread = pci_ahci_read,
2751 #ifdef BHYVE_SNAPSHOT
2752 .pe_snapshot = pci_ahci_snapshot,
2753 .pe_pause = pci_ahci_pause,
2754 .pe_resume = pci_ahci_resume,
2757 PCI_EMUL_SET(pci_de_ahci_hd);
2759 struct pci_devemu pci_de_ahci_cd = {
2760 .pe_emu = "ahci-cd",
2761 .pe_init = pci_ahci_atapi_init,
2762 .pe_barwrite = pci_ahci_write,
2763 .pe_barread = pci_ahci_read,
2764 #ifdef BHYVE_SNAPSHOT
2765 .pe_snapshot = pci_ahci_snapshot,
2766 .pe_pause = pci_ahci_pause,
2767 .pe_resume = pci_ahci_resume,
2770 PCI_EMUL_SET(pci_de_ahci_cd);