2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
5 * Copyright (c) 2015-2016 Alexander Motin <mav@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/param.h>
31 #include <sys/linker_set.h>
34 #include <sys/ioctl.h>
37 #include <sys/endian.h>
49 #include <pthread_np.h>
63 #define DEF_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
64 #define MAX_PORTS 32 /* AHCI supports 32 ports */
66 #define PxSIG_ATA 0x00000101 /* ATA drive */
67 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
70 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
71 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
72 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
73 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
74 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
75 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
76 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
77 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
83 #define TEST_UNIT_READY 0x00
84 #define REQUEST_SENSE 0x03
86 #define START_STOP_UNIT 0x1B
87 #define PREVENT_ALLOW 0x1E
88 #define READ_CAPACITY 0x25
90 #define POSITION_TO_ELEMENT 0x2B
92 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
93 #define MODE_SENSE_10 0x5A
94 #define REPORT_LUNS 0xA0
99 * SCSI mode page codes
101 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
102 #define MODEPAGE_CD_CAPABILITIES 0x2A
107 #define ATA_SF_ENAB_SATA_SF 0x10
108 #define ATA_SATA_SF_AN 0x05
109 #define ATA_SF_DIS_SATA_SF 0x90
116 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
118 #define DPRINTF(format, arg...)
121 #define AHCI_PORT_IDENT 20 + 1
124 struct blockif_req io_req;
125 struct ahci_port *io_pr;
126 STAILQ_ENTRY(ahci_ioreq) io_flist;
127 TAILQ_ENTRY(ahci_ioreq) io_blist;
137 struct blockif_ctxt *bctx;
138 struct pci_ahci_softc *pr_sc;
139 struct ata_params ata_ident;
148 uint8_t err_cfis[20];
175 struct ahci_ioreq *ioreq;
177 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
178 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
181 struct ahci_cmd_hdr {
186 uint32_t reserved[4];
189 struct ahci_prdt_entry {
192 #define DBCMASK 0x3fffff
196 struct pci_ahci_softc {
197 struct pci_devinst *asc_pi;
212 struct ahci_port port[MAX_PORTS];
214 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
216 static void ahci_handle_port(struct ahci_port *p);
218 static inline void lba_to_msf(uint8_t *buf, int lba)
221 buf[0] = (lba / 75) / 60;
222 buf[1] = (lba / 75) % 60;
227 * Generate HBA interrupts on global IS register write.
230 ahci_generate_intr(struct pci_ahci_softc *sc, uint32_t mask)
232 struct pci_devinst *pi = sc->asc_pi;
237 /* Update global IS from PxIS/PxIE. */
238 for (i = 0; i < sc->ports; i++) {
243 DPRINTF("%s(%08x) %08x", __func__, mask, sc->is);
245 /* If there is nothing enabled -- clear legacy interrupt and exit. */
246 if (sc->is == 0 || (sc->ghc & AHCI_GHC_IE) == 0) {
248 pci_lintr_deassert(pi);
254 /* If there is anything and no MSI -- assert legacy interrupt. */
255 nmsg = pci_msi_maxmsgnum(pi);
259 pci_lintr_assert(pi);
264 /* Assert respective MSIs for ports that were touched. */
265 for (i = 0; i < nmsg; i++) {
266 if (sc->ports <= nmsg || i < nmsg - 1)
269 mmask = 0xffffffff << i;
270 if (sc->is & mask && mmask & mask)
271 pci_generate_msi(pi, i);
276 * Generate HBA interrupt on specific port event.
279 ahci_port_intr(struct ahci_port *p)
281 struct pci_ahci_softc *sc = p->pr_sc;
282 struct pci_devinst *pi = sc->asc_pi;
285 DPRINTF("%s(%d) %08x/%08x %08x", __func__,
286 p->port, p->is, p->ie, sc->is);
288 /* If there is nothing enabled -- we are done. */
289 if ((p->is & p->ie) == 0)
292 /* In case of non-shared MSI always generate interrupt. */
293 nmsg = pci_msi_maxmsgnum(pi);
294 if (sc->ports <= nmsg || p->port < nmsg - 1) {
295 sc->is |= (1 << p->port);
296 if ((sc->ghc & AHCI_GHC_IE) == 0)
298 pci_generate_msi(pi, p->port);
302 /* If IS for this port is already set -- do nothing. */
303 if (sc->is & (1 << p->port))
306 sc->is |= (1 << p->port);
308 /* If interrupts are enabled -- generate one. */
309 if ((sc->ghc & AHCI_GHC_IE) == 0)
312 pci_generate_msi(pi, nmsg - 1);
313 } else if (!sc->lintr) {
315 pci_lintr_assert(pi);
320 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
322 int offset, len, irq;
324 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
328 case FIS_TYPE_REGD2H:
331 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_DHR : 0;
333 case FIS_TYPE_SETDEVBITS:
336 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_SDB : 0;
338 case FIS_TYPE_PIOSETUP:
341 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_PS : 0;
344 EPRINTLN("unsupported fis type %d", ft);
347 if (fis[2] & ATA_S_ERROR) {
349 irq |= AHCI_P_IX_TFE;
351 memcpy(p->rfis + offset, fis, len);
361 ahci_write_fis_piosetup(struct ahci_port *p)
365 memset(fis, 0, sizeof(fis));
366 fis[0] = FIS_TYPE_PIOSETUP;
367 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
371 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
376 error = (tfd >> 8) & 0xff;
378 memset(fis, 0, sizeof(fis));
379 fis[0] = FIS_TYPE_SETDEVBITS;
383 if (fis[2] & ATA_S_ERROR) {
384 p->err_cfis[0] = slot;
385 p->err_cfis[2] = tfd;
386 p->err_cfis[3] = error;
387 memcpy(&p->err_cfis[4], cfis + 4, 16);
389 *(uint32_t *)(fis + 4) = (1 << slot);
390 p->sact &= ~(1 << slot);
394 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
398 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
403 error = (tfd >> 8) & 0xff;
404 memset(fis, 0, sizeof(fis));
405 fis[0] = FIS_TYPE_REGD2H;
419 if (fis[2] & ATA_S_ERROR) {
420 p->err_cfis[0] = 0x80;
421 p->err_cfis[2] = tfd & 0xff;
422 p->err_cfis[3] = error;
423 memcpy(&p->err_cfis[4], cfis + 4, 16);
425 p->ci &= ~(1 << slot);
427 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
431 ahci_write_fis_d2h_ncq(struct ahci_port *p, int slot)
435 p->tfd = ATA_S_READY | ATA_S_DSC;
436 memset(fis, 0, sizeof(fis));
437 fis[0] = FIS_TYPE_REGD2H;
438 fis[1] = 0; /* No interrupt */
439 fis[2] = p->tfd; /* Status */
440 fis[3] = 0; /* No error */
441 p->ci &= ~(1 << slot);
442 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
446 ahci_write_reset_fis_d2h(struct ahci_port *p)
450 memset(fis, 0, sizeof(fis));
451 fis[0] = FIS_TYPE_REGD2H;
459 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
463 ahci_check_stopped(struct ahci_port *p)
466 * If we are no longer processing the command list and nothing
467 * is in-flight, clear the running bit, the current command
468 * slot, the command issue and active bits.
470 if (!(p->cmd & AHCI_P_CMD_ST)) {
471 if (p->pending == 0) {
473 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
482 ahci_port_stop(struct ahci_port *p)
484 struct ahci_ioreq *aior;
489 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
491 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
493 * Try to cancel the outstanding blockif request.
495 error = blockif_cancel(p->bctx, &aior->io_req);
501 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
502 cfis[2] == ATA_READ_FPDMA_QUEUED ||
503 cfis[2] == ATA_SEND_FPDMA_QUEUED)
504 p->sact &= ~(1 << slot); /* NCQ */
506 p->ci &= ~(1 << slot);
509 * This command is now done.
511 p->pending &= ~(1 << slot);
514 * Delete the blockif request from the busy list
516 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
519 * Move the blockif request back to the free list
521 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
524 ahci_check_stopped(p);
528 ahci_port_reset(struct ahci_port *pr)
532 pr->xfermode = ATA_UDMA6;
533 pr->mult_sectors = 128;
536 pr->ssts = ATA_SS_DET_NO_DEVICE;
537 pr->sig = 0xFFFFFFFF;
541 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
542 if (pr->sctl & ATA_SC_SPD_MASK)
543 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
545 pr->ssts |= ATA_SS_SPD_GEN3;
546 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
549 pr->tfd |= ATA_S_READY;
551 pr->sig = PxSIG_ATAPI;
552 ahci_write_reset_fis_d2h(pr);
556 ahci_reset(struct pci_ahci_softc *sc)
560 sc->ghc = AHCI_GHC_AE;
564 pci_lintr_deassert(sc->asc_pi);
568 for (i = 0; i < sc->ports; i++) {
571 sc->port[i].cmd = (AHCI_P_CMD_SUD | AHCI_P_CMD_POD);
572 if (sc->port[i].bctx)
573 sc->port[i].cmd |= AHCI_P_CMD_CPS;
574 sc->port[i].sctl = 0;
575 ahci_port_reset(&sc->port[i]);
580 ata_string(uint8_t *dest, const char *src, int len)
584 for (i = 0; i < len; i++) {
586 dest[i ^ 1] = *src++;
593 atapi_string(uint8_t *dest, const char *src, int len)
597 for (i = 0; i < len; i++) {
606 * Build up the iovec based on the PRDT, 'done' and 'len'.
609 ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
610 struct ahci_prdt_entry *prdt, uint16_t prdtl)
612 struct blockif_req *breq = &aior->io_req;
613 uint32_t dbcsz, extra, left, skip, todo;
616 assert(aior->len >= aior->done);
618 /* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
620 left = aior->len - aior->done;
622 for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
624 dbcsz = (prdt->dbc & DBCMASK) + 1;
625 /* Skip already done part of the PRDT */
633 breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
634 prdt->dba + skip, dbcsz);
635 breq->br_iov[j].iov_len = dbcsz;
642 /* If we got limited by IOV length, round I/O down to sector size. */
643 if (j == BLOCKIF_IOV_MAX) {
644 extra = todo % blockif_sectsz(p->bctx);
648 if (breq->br_iov[j - 1].iov_len > extra) {
649 breq->br_iov[j - 1].iov_len -= extra;
652 extra -= breq->br_iov[j - 1].iov_len;
658 breq->br_resid = todo;
660 aior->more = (aior->done < aior->len && i < prdtl);
664 ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
666 struct ahci_ioreq *aior;
667 struct blockif_req *breq;
668 struct ahci_prdt_entry *prdt;
669 struct ahci_cmd_hdr *hdr;
672 int err, first, ncq, readop;
674 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
675 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
680 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
681 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
682 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
683 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
686 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
687 cfis[2] == ATA_READ_FPDMA_QUEUED) {
688 lba = ((uint64_t)cfis[10] << 40) |
689 ((uint64_t)cfis[9] << 32) |
690 ((uint64_t)cfis[8] << 24) |
691 ((uint64_t)cfis[6] << 16) |
692 ((uint64_t)cfis[5] << 8) |
694 len = cfis[11] << 8 | cfis[3];
698 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
699 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
700 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
701 lba = ((uint64_t)cfis[10] << 40) |
702 ((uint64_t)cfis[9] << 32) |
703 ((uint64_t)cfis[8] << 24) |
704 ((uint64_t)cfis[6] << 16) |
705 ((uint64_t)cfis[5] << 8) |
707 len = cfis[13] << 8 | cfis[12];
711 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
712 (cfis[5] << 8) | cfis[4];
717 lba *= blockif_sectsz(p->bctx);
718 len *= blockif_sectsz(p->bctx);
720 /* Pull request off free list */
721 aior = STAILQ_FIRST(&p->iofhd);
722 assert(aior != NULL);
723 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
729 aior->readop = readop;
730 breq = &aior->io_req;
731 breq->br_offset = lba + done;
732 ahci_build_iov(p, aior, prdt, hdr->prdtl);
734 /* Mark this command in-flight. */
735 p->pending |= 1 << slot;
737 /* Stuff request onto busy list. */
738 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
741 ahci_write_fis_d2h_ncq(p, slot);
744 err = blockif_read(p->bctx, breq);
746 err = blockif_write(p->bctx, breq);
751 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
753 struct ahci_ioreq *aior;
754 struct blockif_req *breq;
758 * Pull request off free list
760 aior = STAILQ_FIRST(&p->iofhd);
761 assert(aior != NULL);
762 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
768 breq = &aior->io_req;
771 * Mark this command in-flight.
773 p->pending |= 1 << slot;
776 * Stuff request onto busy list
778 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
780 err = blockif_flush(p->bctx, breq);
785 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis, void *buf,
788 struct ahci_cmd_hdr *hdr;
789 struct ahci_prdt_entry *prdt;
794 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
797 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
798 for (i = 0; i < hdr->prdtl && len; i++) {
803 dbcsz = (prdt->dbc & DBCMASK) + 1;
804 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
805 sublen = MIN(len, dbcsz);
806 memcpy(to, ptr, sublen);
814 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
816 struct ahci_ioreq *aior;
817 struct blockif_req *breq;
825 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
826 len = (uint16_t)cfis[13] << 8 | cfis[12];
829 } else { /* ATA_SEND_FPDMA_QUEUED */
830 len = (uint16_t)cfis[11] << 8 | cfis[3];
834 read_prdt(p, slot, cfis, buf, sizeof(buf));
838 elba = ((uint64_t)entry[5] << 40) |
839 ((uint64_t)entry[4] << 32) |
840 ((uint64_t)entry[3] << 24) |
841 ((uint64_t)entry[2] << 16) |
842 ((uint64_t)entry[1] << 8) |
844 elen = (uint16_t)entry[7] << 8 | entry[6];
850 ahci_write_fis_d2h_ncq(p, slot);
851 ahci_write_fis_sdb(p, slot, cfis,
852 ATA_S_READY | ATA_S_DSC);
854 ahci_write_fis_d2h(p, slot, cfis,
855 ATA_S_READY | ATA_S_DSC);
857 p->pending &= ~(1 << slot);
858 ahci_check_stopped(p);
867 * Pull request off free list
869 aior = STAILQ_FIRST(&p->iofhd);
870 assert(aior != NULL);
871 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
876 aior->more = (len != done);
878 breq = &aior->io_req;
879 breq->br_offset = elba * blockif_sectsz(p->bctx);
880 breq->br_resid = elen * blockif_sectsz(p->bctx);
883 * Mark this command in-flight.
885 p->pending |= 1 << slot;
888 * Stuff request onto busy list
890 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
893 ahci_write_fis_d2h_ncq(p, slot);
895 err = blockif_delete(p->bctx, breq);
900 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis, void *buf,
903 struct ahci_cmd_hdr *hdr;
904 struct ahci_prdt_entry *prdt;
909 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
912 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
913 for (i = 0; i < hdr->prdtl && len; i++) {
918 dbcsz = (prdt->dbc & DBCMASK) + 1;
919 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
920 sublen = MIN(len, dbcsz);
921 memcpy(ptr, from, sublen);
926 hdr->prdbc = size - len;
930 ahci_checksum(uint8_t *buf, int size)
935 for (i = 0; i < size - 1; i++)
937 buf[size - 1] = 0x100 - sum;
941 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
943 struct ahci_cmd_hdr *hdr;
945 uint8_t *buf8 = (uint8_t *)buf;
946 uint16_t *buf16 = (uint16_t *)buf;
948 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
949 if (p->atapi || hdr->prdtl == 0 || cfis[5] != 0 ||
950 cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
951 ahci_write_fis_d2h(p, slot, cfis,
952 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
956 memset(buf, 0, sizeof(buf));
957 if (cfis[4] == 0x00) { /* Log directory */
958 buf16[0x00] = 1; /* Version -- 1 */
959 buf16[0x10] = 1; /* NCQ Command Error Log -- 1 page */
960 buf16[0x13] = 1; /* SATA NCQ Send and Receive Log -- 1 page */
961 } else if (cfis[4] == 0x10) { /* NCQ Command Error Log */
962 memcpy(buf8, p->err_cfis, sizeof(p->err_cfis));
963 ahci_checksum(buf8, sizeof(buf));
964 } else if (cfis[4] == 0x13) { /* SATA NCQ Send and Receive Log */
965 if (blockif_candelete(p->bctx) && !blockif_is_ro(p->bctx)) {
966 buf[0x00] = 1; /* SFQ DSM supported */
967 buf[0x01] = 1; /* SFQ DSM TRIM supported */
970 ahci_write_fis_d2h(p, slot, cfis,
971 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
975 if (cfis[2] == ATA_READ_LOG_EXT)
976 ahci_write_fis_piosetup(p);
977 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
978 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
982 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
984 struct ahci_cmd_hdr *hdr;
986 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
987 if (p->atapi || hdr->prdtl == 0) {
988 ahci_write_fis_d2h(p, slot, cfis,
989 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
991 ahci_write_fis_piosetup(p);
992 write_prdt(p, slot, cfis, (void*)&p->ata_ident, sizeof(struct ata_params));
993 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
998 ata_identify_init(struct ahci_port* p, int atapi)
1000 struct ata_params* ata_ident = &p->ata_ident;
1003 ata_ident->config = ATA_PROTO_ATAPI | ATA_ATAPI_TYPE_CDROM |
1004 ATA_ATAPI_REMOVABLE | ATA_DRQ_FAST;
1005 ata_ident->capabilities1 = ATA_SUPPORT_LBA |
1007 ata_ident->capabilities2 = (1 << 14 | 1);
1008 ata_ident->atavalid = ATA_FLAG_64_70 | ATA_FLAG_88;
1009 ata_ident->obsolete62 = 0x3f;
1010 ata_ident->mwdmamodes = 7;
1011 if (p->xfermode & ATA_WDMA0)
1012 ata_ident->mwdmamodes |= (1 << ((p->xfermode & 7) + 8));
1013 ata_ident->apiomodes = 3;
1014 ata_ident->mwdmamin = 0x0078;
1015 ata_ident->mwdmarec = 0x0078;
1016 ata_ident->pioblind = 0x0078;
1017 ata_ident->pioiordy = 0x0078;
1018 ata_ident->satacapabilities = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
1019 ata_ident->satacapabilities2 = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
1020 ata_ident->satasupport = ATA_SUPPORT_NCQ_STREAM;
1021 ata_ident->version_major = 0x3f0;
1022 ata_ident->support.command1 = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1023 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1024 ata_ident->support.command2 = (1 << 14);
1025 ata_ident->support.extension = (1 << 14);
1026 ata_ident->enabled.command1 = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1027 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1028 ata_ident->enabled.extension = (1 << 14);
1029 ata_ident->udmamodes = 0x7f;
1030 if (p->xfermode & ATA_UDMA0)
1031 ata_ident->udmamodes |= (1 << ((p->xfermode & 7) + 8));
1032 ata_ident->transport_major = 0x1020;
1033 ata_ident->integrity = 0x00a5;
1036 int sectsz, psectsz, psectoff, candelete, ro;
1038 uint8_t sech, heads;
1040 ro = blockif_is_ro(p->bctx);
1041 candelete = blockif_candelete(p->bctx);
1042 sectsz = blockif_sectsz(p->bctx);
1043 sectors = blockif_size(p->bctx) / sectsz;
1044 blockif_chs(p->bctx, &cyl, &heads, &sech);
1045 blockif_psectsz(p->bctx, &psectsz, &psectoff);
1046 ata_ident->config = ATA_DRQ_FAST;
1047 ata_ident->cylinders = cyl;
1048 ata_ident->heads = heads;
1049 ata_ident->sectors = sech;
1051 ata_ident->sectors_intr = (0x8000 | 128);
1054 ata_ident->capabilities1 = ATA_SUPPORT_DMA |
1055 ATA_SUPPORT_LBA | ATA_SUPPORT_IORDY;
1056 ata_ident->capabilities2 = (1 << 14);
1057 ata_ident->atavalid = ATA_FLAG_64_70 | ATA_FLAG_88;
1058 if (p->mult_sectors)
1059 ata_ident->multi = (ATA_MULTI_VALID | p->mult_sectors);
1060 if (sectors <= 0x0fffffff) {
1061 ata_ident->lba_size_1 = sectors;
1062 ata_ident->lba_size_2 = (sectors >> 16);
1064 ata_ident->lba_size_1 = 0xffff;
1065 ata_ident->lba_size_2 = 0x0fff;
1067 ata_ident->mwdmamodes = 0x7;
1068 if (p->xfermode & ATA_WDMA0)
1069 ata_ident->mwdmamodes |= (1 << ((p->xfermode & 7) + 8));
1070 ata_ident->apiomodes = 0x3;
1071 ata_ident->mwdmamin = 0x0078;
1072 ata_ident->mwdmarec = 0x0078;
1073 ata_ident->pioblind = 0x0078;
1074 ata_ident->pioiordy = 0x0078;
1075 ata_ident->support3 = 0;
1076 ata_ident->queue = 31;
1077 ata_ident->satacapabilities = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
1079 ata_ident->satacapabilities2 = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
1080 (p->ssts & ATA_SS_SPD_MASK) >> 3);
1081 ata_ident->version_major = 0x3f0;
1082 ata_ident->version_minor = 0x28;
1083 ata_ident->support.command1 = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE |
1084 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1085 ata_ident->support.command2 = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1086 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
1087 ata_ident->support.extension = (1 << 14);
1088 ata_ident->enabled.command1 = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE |
1089 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1090 ata_ident->enabled.command2 = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1091 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
1092 ata_ident->enabled.extension = (1 << 14);
1093 ata_ident->udmamodes = 0x7f;
1094 if (p->xfermode & ATA_UDMA0)
1095 ata_ident->udmamodes |= (1 << ((p->xfermode & 7) + 8));
1096 ata_ident->lba_size48_1 = sectors;
1097 ata_ident->lba_size48_2 = (sectors >> 16);
1098 ata_ident->lba_size48_3 = (sectors >> 32);
1099 ata_ident->lba_size48_4 = (sectors >> 48);
1101 if (candelete && !ro) {
1102 ata_ident->support3 |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
1103 ata_ident->max_dsm_blocks = 1;
1104 ata_ident->support_dsm = ATA_SUPPORT_DSM_TRIM;
1106 ata_ident->pss = ATA_PSS_VALID_VALUE;
1107 ata_ident->lsalign = 0x4000;
1108 if (psectsz > sectsz) {
1109 ata_ident->pss |= ATA_PSS_MULTLS;
1110 ata_ident->pss |= ffsl(psectsz / sectsz) - 1;
1111 ata_ident->lsalign |= (psectoff / sectsz);
1114 ata_ident->pss |= ATA_PSS_LSSABOVE512;
1115 ata_ident->lss_1 = sectsz / 2;
1116 ata_ident->lss_2 = ((sectsz / 2) >> 16);
1118 ata_ident->support2 = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1119 ata_ident->enabled2 = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1120 ata_ident->transport_major = 0x1020;
1121 ata_ident->integrity = 0x00a5;
1123 ahci_checksum((uint8_t*)ata_ident, sizeof(struct ata_params));
1127 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
1130 ahci_write_fis_d2h(p, slot, cfis,
1131 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1133 ahci_write_fis_piosetup(p);
1134 write_prdt(p, slot, cfis, (void *)&p->ata_ident, sizeof(struct ata_params));
1135 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1140 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
1149 if (acmd[1] & 1) { /* VPD */
1150 if (acmd[2] == 0) { /* Supported VPD pages */
1158 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1160 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1161 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1162 ahci_write_fis_d2h(p, slot, cfis, tfd);
1174 atapi_string(buf + 8, "BHYVE", 8);
1175 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1176 atapi_string(buf + 32, "001", 4);
1182 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1183 write_prdt(p, slot, cfis, buf, len);
1184 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1188 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1193 sectors = blockif_size(p->bctx) / 2048;
1194 be32enc(buf, sectors - 1);
1195 be32enc(buf + 4, 2048);
1196 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1197 write_prdt(p, slot, cfis, buf, sizeof(buf));
1198 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1202 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1210 len = be16dec(acmd + 7);
1211 format = acmd[9] >> 6;
1218 uint8_t start_track, buf[20], *bp;
1220 msf = (acmd[1] >> 1) & 1;
1221 start_track = acmd[6];
1222 if (start_track > 1 && start_track != 0xaa) {
1224 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1226 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1227 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1228 ahci_write_fis_d2h(p, slot, cfis, tfd);
1234 if (start_track <= 1) {
1254 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1258 lba_to_msf(bp, sectors);
1261 be32enc(bp, sectors);
1265 be16enc(buf, size - 2);
1268 write_prdt(p, slot, cfis, buf, len);
1269 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1270 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1277 memset(buf, 0, sizeof(buf));
1281 if (len > sizeof(buf))
1283 write_prdt(p, slot, cfis, buf, len);
1284 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1285 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1293 uint8_t *bp, buf[50];
1295 msf = (acmd[1] >> 1) & 1;
1331 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1335 lba_to_msf(bp, sectors);
1338 be32enc(bp, sectors);
1361 be16enc(buf, size - 2);
1364 write_prdt(p, slot, cfis, buf, len);
1365 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1366 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1373 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1375 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1376 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1377 ahci_write_fis_d2h(p, slot, cfis, tfd);
1384 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1388 memset(buf, 0, sizeof(buf));
1391 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1392 write_prdt(p, slot, cfis, buf, sizeof(buf));
1393 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1397 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
1399 struct ahci_ioreq *aior;
1400 struct ahci_cmd_hdr *hdr;
1401 struct ahci_prdt_entry *prdt;
1402 struct blockif_req *breq;
1409 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1410 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1412 lba = be32dec(acmd + 2);
1413 if (acmd[0] == READ_10)
1414 len = be16dec(acmd + 7);
1416 len = be32dec(acmd + 6);
1418 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1419 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1425 * Pull request off free list
1427 aior = STAILQ_FIRST(&p->iofhd);
1428 assert(aior != NULL);
1429 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1435 breq = &aior->io_req;
1436 breq->br_offset = lba + done;
1437 ahci_build_iov(p, aior, prdt, hdr->prdtl);
1439 /* Mark this command in-flight. */
1440 p->pending |= 1 << slot;
1442 /* Stuff request onto busy list. */
1443 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1445 err = blockif_read(p->bctx, breq);
1450 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1458 if (len > sizeof(buf))
1460 memset(buf, 0, len);
1461 buf[0] = 0x70 | (1 << 7);
1462 buf[2] = p->sense_key;
1465 write_prdt(p, slot, cfis, buf, len);
1466 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1467 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1471 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1473 uint8_t *acmd = cfis + 0x40;
1476 switch (acmd[4] & 3) {
1480 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1481 tfd = ATA_S_READY | ATA_S_DSC;
1484 /* TODO eject media */
1485 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1486 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1488 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1491 ahci_write_fis_d2h(p, slot, cfis, tfd);
1495 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1503 len = be16dec(acmd + 7);
1505 code = acmd[2] & 0x3f;
1510 case MODEPAGE_RW_ERROR_RECOVERY:
1514 if (len > sizeof(buf))
1517 memset(buf, 0, sizeof(buf));
1518 be16enc(buf, 16 - 2);
1523 write_prdt(p, slot, cfis, buf, len);
1524 tfd = ATA_S_READY | ATA_S_DSC;
1527 case MODEPAGE_CD_CAPABILITIES:
1531 if (len > sizeof(buf))
1534 memset(buf, 0, sizeof(buf));
1535 be16enc(buf, 30 - 2);
1541 be16enc(&buf[18], 2);
1542 be16enc(&buf[20], 512);
1543 write_prdt(p, slot, cfis, buf, len);
1544 tfd = ATA_S_READY | ATA_S_DSC;
1553 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1555 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1560 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1562 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1565 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1566 ahci_write_fis_d2h(p, slot, cfis, tfd);
1570 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1578 /* we don't support asynchronous operation */
1579 if (!(acmd[1] & 1)) {
1580 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1582 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1587 len = be16dec(acmd + 7);
1588 if (len > sizeof(buf))
1591 memset(buf, 0, sizeof(buf));
1592 be16enc(buf, 8 - 2);
1596 write_prdt(p, slot, cfis, buf, len);
1597 tfd = ATA_S_READY | ATA_S_DSC;
1599 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1600 ahci_write_fis_d2h(p, slot, cfis, tfd);
1604 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1614 for (i = 0; i < 16; i++)
1615 DPRINTF("%02x ", acmd[i]);
1621 case TEST_UNIT_READY:
1622 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1623 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1626 atapi_inquiry(p, slot, cfis);
1629 atapi_read_capacity(p, slot, cfis);
1633 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1634 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1637 atapi_read_toc(p, slot, cfis);
1640 atapi_report_luns(p, slot, cfis);
1644 atapi_read(p, slot, cfis, 0);
1647 atapi_request_sense(p, slot, cfis);
1649 case START_STOP_UNIT:
1650 atapi_start_stop_unit(p, slot, cfis);
1653 atapi_mode_sense(p, slot, cfis);
1655 case GET_EVENT_STATUS_NOTIFICATION:
1656 atapi_get_event_status_notification(p, slot, cfis);
1659 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1660 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1662 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1663 ATA_S_READY | ATA_S_ERROR);
1669 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1672 p->tfd |= ATA_S_BUSY;
1674 case ATA_ATA_IDENTIFY:
1675 handle_identify(p, slot, cfis);
1677 case ATA_SETFEATURES:
1680 case ATA_SF_ENAB_SATA_SF:
1682 case ATA_SATA_SF_AN:
1683 p->tfd = ATA_S_DSC | ATA_S_READY;
1686 p->tfd = ATA_S_ERROR | ATA_S_READY;
1687 p->tfd |= (ATA_ERROR_ABORT << 8);
1691 case ATA_SF_ENAB_WCACHE:
1692 case ATA_SF_DIS_WCACHE:
1693 case ATA_SF_ENAB_RCACHE:
1694 case ATA_SF_DIS_RCACHE:
1695 p->tfd = ATA_S_DSC | ATA_S_READY;
1697 case ATA_SF_SETXFER:
1699 switch (cfis[12] & 0xf8) {
1705 p->xfermode = (cfis[12] & 0x7);
1708 p->tfd = ATA_S_DSC | ATA_S_READY;
1712 p->tfd = ATA_S_ERROR | ATA_S_READY;
1713 p->tfd |= (ATA_ERROR_ABORT << 8);
1716 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1720 if (cfis[12] != 0 &&
1721 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1722 p->tfd = ATA_S_ERROR | ATA_S_READY;
1723 p->tfd |= (ATA_ERROR_ABORT << 8);
1725 p->mult_sectors = cfis[12];
1726 p->tfd = ATA_S_DSC | ATA_S_READY;
1728 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1736 case ATA_READ_MUL48:
1737 case ATA_WRITE_MUL48:
1740 case ATA_READ_DMA48:
1741 case ATA_WRITE_DMA48:
1742 case ATA_READ_FPDMA_QUEUED:
1743 case ATA_WRITE_FPDMA_QUEUED:
1744 ahci_handle_rw(p, slot, cfis, 0);
1746 case ATA_FLUSHCACHE:
1747 case ATA_FLUSHCACHE48:
1748 ahci_handle_flush(p, slot, cfis);
1750 case ATA_DATA_SET_MANAGEMENT:
1751 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1752 cfis[13] == 0 && cfis[12] == 1) {
1753 ahci_handle_dsm_trim(p, slot, cfis, 0);
1756 ahci_write_fis_d2h(p, slot, cfis,
1757 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1759 case ATA_SEND_FPDMA_QUEUED:
1760 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1761 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1762 cfis[11] == 0 && cfis[3] == 1) {
1763 ahci_handle_dsm_trim(p, slot, cfis, 0);
1766 ahci_write_fis_d2h(p, slot, cfis,
1767 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1769 case ATA_READ_LOG_EXT:
1770 case ATA_READ_LOG_DMA_EXT:
1771 ahci_handle_read_log(p, slot, cfis);
1773 case ATA_SECURITY_FREEZE_LOCK:
1776 ahci_write_fis_d2h(p, slot, cfis,
1777 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1779 case ATA_CHECK_POWER_MODE:
1780 cfis[12] = 0xff; /* always on */
1781 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1783 case ATA_STANDBY_CMD:
1784 case ATA_STANDBY_IMMEDIATE:
1786 case ATA_IDLE_IMMEDIATE:
1788 case ATA_READ_VERIFY:
1789 case ATA_READ_VERIFY48:
1790 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1792 case ATA_ATAPI_IDENTIFY:
1793 handle_atapi_identify(p, slot, cfis);
1795 case ATA_PACKET_CMD:
1797 ahci_write_fis_d2h(p, slot, cfis,
1798 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1800 handle_packet_cmd(p, slot, cfis);
1803 EPRINTLN("Unsupported cmd:%02x", cfis[2]);
1804 ahci_write_fis_d2h(p, slot, cfis,
1805 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1811 ahci_handle_slot(struct ahci_port *p, int slot)
1813 struct ahci_cmd_hdr *hdr;
1815 struct ahci_prdt_entry *prdt;
1817 struct pci_ahci_softc *sc;
1824 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1826 cfl = (hdr->flags & 0x1f) * 4;
1828 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1829 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1831 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1834 for (i = 0; i < cfl; i++) {
1837 DPRINTF("%02x ", cfis[i]);
1841 for (i = 0; i < hdr->prdtl; i++) {
1842 DPRINTF("%d@%08"PRIx64"", prdt->dbc & 0x3fffff, prdt->dba);
1847 if (cfis[0] != FIS_TYPE_REGH2D) {
1848 EPRINTLN("Not a H2D FIS:%02x", cfis[0]);
1852 if (cfis[1] & 0x80) {
1853 ahci_handle_cmd(p, slot, cfis);
1855 if (cfis[15] & (1 << 2))
1857 else if (p->reset) {
1861 p->ci &= ~(1 << slot);
1866 ahci_handle_port(struct ahci_port *p)
1869 if (!(p->cmd & AHCI_P_CMD_ST))
1873 * Search for any new commands to issue ignoring those that
1874 * are already in-flight. Stop if device is busy or in error.
1876 for (; (p->ci & ~p->pending) != 0; p->ccs = ((p->ccs + 1) & 31)) {
1877 if ((p->tfd & (ATA_S_BUSY | ATA_S_DRQ)) != 0)
1879 if (p->waitforclear)
1881 if ((p->ci & ~p->pending & (1 << p->ccs)) != 0) {
1882 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1883 p->cmd |= p->ccs << AHCI_P_CMD_CCS_SHIFT;
1884 ahci_handle_slot(p, p->ccs);
1890 * blockif callback routine - this runs in the context of the blockif
1891 * i/o thread, so the mutex needs to be acquired.
1894 ata_ioreq_cb(struct blockif_req *br, int err)
1896 struct ahci_cmd_hdr *hdr;
1897 struct ahci_ioreq *aior;
1898 struct ahci_port *p;
1899 struct pci_ahci_softc *sc;
1904 DPRINTF("%s %d", __func__, err);
1907 aior = br->br_param;
1912 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1914 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1915 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1916 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1918 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1919 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1920 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1923 pthread_mutex_lock(&sc->mtx);
1926 * Delete the blockif request from the busy list
1928 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1931 * Move the blockif request back to the free list
1933 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1936 hdr->prdbc = aior->done;
1938 if (!err && aior->more) {
1940 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1942 ahci_handle_rw(p, slot, cfis, aior->done);
1947 tfd = ATA_S_READY | ATA_S_DSC;
1949 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1951 ahci_write_fis_sdb(p, slot, cfis, tfd);
1953 ahci_write_fis_d2h(p, slot, cfis, tfd);
1956 * This command is now complete.
1958 p->pending &= ~(1 << slot);
1960 ahci_check_stopped(p);
1961 ahci_handle_port(p);
1963 pthread_mutex_unlock(&sc->mtx);
1964 DPRINTF("%s exit", __func__);
1968 atapi_ioreq_cb(struct blockif_req *br, int err)
1970 struct ahci_cmd_hdr *hdr;
1971 struct ahci_ioreq *aior;
1972 struct ahci_port *p;
1973 struct pci_ahci_softc *sc;
1978 DPRINTF("%s %d", __func__, err);
1980 aior = br->br_param;
1985 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1987 pthread_mutex_lock(&sc->mtx);
1990 * Delete the blockif request from the busy list
1992 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1995 * Move the blockif request back to the free list
1997 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
2000 hdr->prdbc = aior->done;
2002 if (!err && aior->more) {
2003 atapi_read(p, slot, cfis, aior->done);
2008 tfd = ATA_S_READY | ATA_S_DSC;
2010 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
2012 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
2014 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
2015 ahci_write_fis_d2h(p, slot, cfis, tfd);
2018 * This command is now complete.
2020 p->pending &= ~(1 << slot);
2022 ahci_check_stopped(p);
2023 ahci_handle_port(p);
2025 pthread_mutex_unlock(&sc->mtx);
2026 DPRINTF("%s exit", __func__);
2030 pci_ahci_ioreq_init(struct ahci_port *pr)
2032 struct ahci_ioreq *vr;
2035 pr->ioqsz = blockif_queuesz(pr->bctx);
2036 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
2037 STAILQ_INIT(&pr->iofhd);
2040 * Add all i/o request entries to the free queue
2042 for (i = 0; i < pr->ioqsz; i++) {
2046 vr->io_req.br_callback = ata_ioreq_cb;
2048 vr->io_req.br_callback = atapi_ioreq_cb;
2049 vr->io_req.br_param = vr;
2050 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
2053 TAILQ_INIT(&pr->iobhd);
2057 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2059 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2060 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2061 struct ahci_port *p = &sc->port[port];
2063 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"",
2064 port, offset, value);
2084 p->ie = value & 0xFDC000FF;
2089 p->cmd &= ~(AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2090 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2091 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2092 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK);
2093 p->cmd |= (AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2094 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2095 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2096 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK) & value;
2098 if (!(value & AHCI_P_CMD_ST)) {
2103 p->cmd |= AHCI_P_CMD_CR;
2104 clb = (uint64_t)p->clbu << 32 | p->clb;
2105 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
2106 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
2109 if (value & AHCI_P_CMD_FRE) {
2112 p->cmd |= AHCI_P_CMD_FR;
2113 fb = (uint64_t)p->fbu << 32 | p->fb;
2114 /* we don't support FBSCP, so rfis size is 256Bytes */
2115 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
2117 p->cmd &= ~AHCI_P_CMD_FR;
2120 if (value & AHCI_P_CMD_CLO) {
2121 p->tfd &= ~(ATA_S_BUSY | ATA_S_DRQ);
2122 p->cmd &= ~AHCI_P_CMD_CLO;
2125 if (value & AHCI_P_CMD_ICC_MASK) {
2126 p->cmd &= ~AHCI_P_CMD_ICC_MASK;
2129 ahci_handle_port(p);
2135 EPRINTLN("pci_ahci_port: read only registers 0x%"PRIx64"", offset);
2139 if (!(p->cmd & AHCI_P_CMD_ST)) {
2140 if (value & ATA_SC_DET_RESET)
2152 ahci_handle_port(p);
2162 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2164 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"",
2172 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"", offset);
2175 if (value & AHCI_GHC_HR) {
2179 if (value & AHCI_GHC_IE)
2180 sc->ghc |= AHCI_GHC_IE;
2182 sc->ghc &= ~AHCI_GHC_IE;
2183 ahci_generate_intr(sc, 0xffffffff);
2187 ahci_generate_intr(sc, value);
2195 pci_ahci_write(struct pci_devinst *pi, int baridx, uint64_t offset, int size,
2198 struct pci_ahci_softc *sc = pi->pi_arg;
2200 assert(baridx == 5);
2201 assert((offset % 4) == 0 && size == 4);
2203 pthread_mutex_lock(&sc->mtx);
2205 if (offset < AHCI_OFFSET)
2206 pci_ahci_host_write(sc, offset, value);
2207 else if (offset < (uint64_t)AHCI_OFFSET + sc->ports * AHCI_STEP)
2208 pci_ahci_port_write(sc, offset, value);
2210 EPRINTLN("pci_ahci: unknown i/o write offset 0x%"PRIx64"", offset);
2212 pthread_mutex_unlock(&sc->mtx);
2216 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2232 uint32_t *p = &sc->cap;
2233 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2241 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x",
2248 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2251 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2252 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2272 uint32_t *p= &sc->port[port].clb;
2273 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2282 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x",
2283 port, offset, value);
2289 pci_ahci_read(struct pci_devinst *pi, int baridx, uint64_t regoff, int size)
2291 struct pci_ahci_softc *sc = pi->pi_arg;
2295 assert(baridx == 5);
2296 assert(size == 1 || size == 2 || size == 4);
2297 assert((regoff & (size - 1)) == 0);
2299 pthread_mutex_lock(&sc->mtx);
2301 offset = regoff & ~0x3; /* round down to a multiple of 4 bytes */
2302 if (offset < AHCI_OFFSET)
2303 value = pci_ahci_host_read(sc, offset);
2304 else if (offset < (uint64_t)AHCI_OFFSET + sc->ports * AHCI_STEP)
2305 value = pci_ahci_port_read(sc, offset);
2308 EPRINTLN("pci_ahci: unknown i/o read offset 0x%"PRIx64"",
2311 value >>= 8 * (regoff & 0x3);
2313 pthread_mutex_unlock(&sc->mtx);
2319 * Each AHCI controller has a "port" node which contains nodes for
2320 * each port named after the decimal number of the port (no leading
2321 * zeroes). Port nodes contain a "type" ("hd" or "cd"), as well as
2322 * options for blockif. For example:
2329 * .path="/path/to/image"
2332 pci_ahci_legacy_config_port(nvlist_t *nvl, int port, const char *type,
2335 char node_name[sizeof("XX")];
2338 snprintf(node_name, sizeof(node_name), "%d", port);
2339 port_nvl = create_relative_config_node(nvl, node_name);
2340 set_config_value_node(port_nvl, "type", type);
2341 return (blockif_legacy_config(port_nvl, opts));
2345 pci_ahci_legacy_config(nvlist_t *nvl, const char *opts)
2347 nvlist_t *ports_nvl;
2349 char *next, *next2, *str, *tofree;
2355 ports_nvl = create_relative_config_node(nvl, "port");
2357 tofree = str = strdup(opts);
2358 for (p = 0; p < MAX_PORTS && str != NULL; p++, str = next) {
2359 /* Identify and cut off type of present port. */
2360 if (strncmp(str, "hd:", 3) == 0) {
2363 } else if (strncmp(str, "cd:", 3) == 0) {
2369 /* Find and cut off the next port options. */
2370 next = strstr(str, ",hd:");
2371 next2 = strstr(str, ",cd:");
2372 if (next == NULL || (next2 != NULL && next2 < next))
2383 EPRINTLN("Missing or invalid type for port %d: \"%s\"",
2388 if (pci_ahci_legacy_config_port(ports_nvl, p, type, str) != 0)
2398 pci_ahci_cd_legacy_config(nvlist_t *nvl, const char *opts)
2400 nvlist_t *ports_nvl;
2402 ports_nvl = create_relative_config_node(nvl, "port");
2403 return (pci_ahci_legacy_config_port(ports_nvl, 0, "cd", opts));
2407 pci_ahci_hd_legacy_config(nvlist_t *nvl, const char *opts)
2409 nvlist_t *ports_nvl;
2411 ports_nvl = create_relative_config_node(nvl, "port");
2412 return (pci_ahci_legacy_config_port(ports_nvl, 0, "hd", opts));
2416 pci_ahci_init(struct pci_devinst *pi, nvlist_t *nvl)
2418 char bident[sizeof("XXX:XXX:XXX")];
2419 char node_name[sizeof("XX")];
2420 struct blockif_ctxt *bctxt;
2421 struct pci_ahci_softc *sc;
2422 int atapi, ret, slots, p;
2425 const char *path, *type, *value;
2426 nvlist_t *ports_nvl, *port_nvl;
2431 dbg = fopen("/tmp/log", "w+");
2434 sc = calloc(1, sizeof(struct pci_ahci_softc));
2437 pthread_mutex_init(&sc->mtx, NULL);
2442 ports_nvl = find_relative_config_node(nvl, "port");
2443 for (p = 0; ports_nvl != NULL && p < MAX_PORTS; p++) {
2444 struct ata_params *ata_ident = &sc->port[p].ata_ident;
2445 char ident[AHCI_PORT_IDENT];
2447 snprintf(node_name, sizeof(node_name), "%d", p);
2448 port_nvl = find_relative_config_node(ports_nvl, node_name);
2449 if (port_nvl == NULL)
2452 type = get_config_value_node(port_nvl, "type");
2456 if (strcmp(type, "hd") == 0)
2462 * Attempt to open the backing image. Use the PCI slot/func
2463 * and the port number for the identifier string.
2465 snprintf(bident, sizeof(bident), "%u:%u:%u", pi->pi_slot,
2468 bctxt = blockif_open(port_nvl, bident);
2469 if (bctxt == NULL) {
2475 ret = blockif_add_boot_device(pi, bctxt);
2481 sc->port[p].bctx = bctxt;
2482 sc->port[p].pr_sc = sc;
2483 sc->port[p].port = p;
2484 sc->port[p].atapi = atapi;
2487 * Create an identifier for the backing file.
2488 * Use parts of the md5 sum of the filename
2490 path = get_config_value_node(port_nvl, "path");
2492 MD5Update(&mdctx, path, strlen(path));
2493 MD5Final(digest, &mdctx);
2494 snprintf(ident, AHCI_PORT_IDENT,
2495 "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2496 digest[0], digest[1], digest[2], digest[3], digest[4],
2499 memset(ata_ident, 0, sizeof(struct ata_params));
2500 ata_string((uint8_t*)&ata_ident->serial, ident, 20);
2501 ata_string((uint8_t*)&ata_ident->revision, "001", 8);
2503 ata_string((uint8_t*)&ata_ident->model, "BHYVE SATA DVD ROM", 40);
2505 ata_string((uint8_t*)&ata_ident->model, "BHYVE SATA DISK", 40);
2506 value = get_config_value_node(port_nvl, "nmrr");
2508 ata_ident->media_rotation_rate = atoi(value);
2509 value = get_config_value_node(port_nvl, "ser");
2511 ata_string((uint8_t*)(&ata_ident->serial), value, 20);
2512 value = get_config_value_node(port_nvl, "rev");
2514 ata_string((uint8_t*)(&ata_ident->revision), value, 8);
2515 value = get_config_value_node(port_nvl, "model");
2517 ata_string((uint8_t*)(&ata_ident->model), value, 40);
2518 ata_identify_init(&sc->port[p], atapi);
2521 * Allocate blockif request structures and add them
2524 pci_ahci_ioreq_init(&sc->port[p]);
2527 if (sc->port[p].ioqsz < slots)
2528 slots = sc->port[p].ioqsz;
2532 /* Intel ICH8 AHCI */
2534 if (sc->ports < DEF_PORTS)
2535 sc->ports = DEF_PORTS;
2536 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2537 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2538 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2539 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2540 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2543 sc->cap2 = AHCI_CAP2_APST;
2546 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2547 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2548 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2549 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2550 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2551 p = MIN(sc->ports, 16);
2552 p = flsl(p) - ((p & (p - 1)) ? 0 : 1);
2553 pci_emul_add_msicap(pi, 1 << p);
2554 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2555 AHCI_OFFSET + sc->ports * AHCI_STEP);
2557 pci_lintr_request(pi);
2561 for (p = 0; p < sc->ports; p++) {
2562 if (sc->port[p].bctx != NULL)
2563 blockif_close(sc->port[p].bctx);
2571 #ifdef BHYVE_SNAPSHOT
2573 pci_ahci_snapshot(struct vm_snapshot_meta *meta)
2577 struct pci_devinst *pi;
2578 struct pci_ahci_softc *sc;
2579 struct ahci_port *port;
2581 pi = meta->dev_data;
2584 /* TODO: add mtx lock/unlock */
2586 SNAPSHOT_VAR_OR_LEAVE(sc->ports, meta, ret, done);
2587 SNAPSHOT_VAR_OR_LEAVE(sc->cap, meta, ret, done);
2588 SNAPSHOT_VAR_OR_LEAVE(sc->ghc, meta, ret, done);
2589 SNAPSHOT_VAR_OR_LEAVE(sc->is, meta, ret, done);
2590 SNAPSHOT_VAR_OR_LEAVE(sc->pi, meta, ret, done);
2591 SNAPSHOT_VAR_OR_LEAVE(sc->vs, meta, ret, done);
2592 SNAPSHOT_VAR_OR_LEAVE(sc->ccc_ctl, meta, ret, done);
2593 SNAPSHOT_VAR_OR_LEAVE(sc->ccc_pts, meta, ret, done);
2594 SNAPSHOT_VAR_OR_LEAVE(sc->em_loc, meta, ret, done);
2595 SNAPSHOT_VAR_OR_LEAVE(sc->em_ctl, meta, ret, done);
2596 SNAPSHOT_VAR_OR_LEAVE(sc->cap2, meta, ret, done);
2597 SNAPSHOT_VAR_OR_LEAVE(sc->bohc, meta, ret, done);
2598 SNAPSHOT_VAR_OR_LEAVE(sc->lintr, meta, ret, done);
2600 for (i = 0; i < MAX_PORTS; i++) {
2601 port = &sc->port[i];
2603 if (meta->op == VM_SNAPSHOT_SAVE)
2606 SNAPSHOT_VAR_OR_LEAVE(bctx, meta, ret, done);
2607 SNAPSHOT_VAR_OR_LEAVE(port->port, meta, ret, done);
2609 /* Mostly for restore; save is ensured by the lines above. */
2610 if (((bctx == NULL) && (port->bctx != NULL)) ||
2611 ((bctx != NULL) && (port->bctx == NULL))) {
2612 EPRINTLN("%s: ports not matching", __func__);
2617 if (port->bctx == NULL)
2620 if (port->port != i) {
2621 EPRINTLN("%s: ports not matching: "
2622 "actual: %d expected: %d", __func__, port->port, i);
2627 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(pi->pi_vmctx, port->cmd_lst,
2628 AHCI_CL_SIZE * AHCI_MAX_SLOTS, false, meta, ret, done);
2629 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(pi->pi_vmctx, port->rfis, 256,
2630 false, meta, ret, done);
2632 SNAPSHOT_VAR_OR_LEAVE(port->ata_ident, meta, ret, done);
2633 SNAPSHOT_VAR_OR_LEAVE(port->atapi, meta, ret, done);
2634 SNAPSHOT_VAR_OR_LEAVE(port->reset, meta, ret, done);
2635 SNAPSHOT_VAR_OR_LEAVE(port->waitforclear, meta, ret, done);
2636 SNAPSHOT_VAR_OR_LEAVE(port->mult_sectors, meta, ret, done);
2637 SNAPSHOT_VAR_OR_LEAVE(port->xfermode, meta, ret, done);
2638 SNAPSHOT_VAR_OR_LEAVE(port->err_cfis, meta, ret, done);
2639 SNAPSHOT_VAR_OR_LEAVE(port->sense_key, meta, ret, done);
2640 SNAPSHOT_VAR_OR_LEAVE(port->asc, meta, ret, done);
2641 SNAPSHOT_VAR_OR_LEAVE(port->ccs, meta, ret, done);
2642 SNAPSHOT_VAR_OR_LEAVE(port->pending, meta, ret, done);
2644 SNAPSHOT_VAR_OR_LEAVE(port->clb, meta, ret, done);
2645 SNAPSHOT_VAR_OR_LEAVE(port->clbu, meta, ret, done);
2646 SNAPSHOT_VAR_OR_LEAVE(port->fb, meta, ret, done);
2647 SNAPSHOT_VAR_OR_LEAVE(port->fbu, meta, ret, done);
2648 SNAPSHOT_VAR_OR_LEAVE(port->ie, meta, ret, done);
2649 SNAPSHOT_VAR_OR_LEAVE(port->cmd, meta, ret, done);
2650 SNAPSHOT_VAR_OR_LEAVE(port->unused0, meta, ret, done);
2651 SNAPSHOT_VAR_OR_LEAVE(port->tfd, meta, ret, done);
2652 SNAPSHOT_VAR_OR_LEAVE(port->sig, meta, ret, done);
2653 SNAPSHOT_VAR_OR_LEAVE(port->ssts, meta, ret, done);
2654 SNAPSHOT_VAR_OR_LEAVE(port->sctl, meta, ret, done);
2655 SNAPSHOT_VAR_OR_LEAVE(port->serr, meta, ret, done);
2656 SNAPSHOT_VAR_OR_LEAVE(port->sact, meta, ret, done);
2657 SNAPSHOT_VAR_OR_LEAVE(port->ci, meta, ret, done);
2658 SNAPSHOT_VAR_OR_LEAVE(port->sntf, meta, ret, done);
2659 SNAPSHOT_VAR_OR_LEAVE(port->fbs, meta, ret, done);
2660 SNAPSHOT_VAR_OR_LEAVE(port->ioqsz, meta, ret, done);
2662 assert(TAILQ_EMPTY(&port->iobhd));
2670 pci_ahci_pause(struct pci_devinst *pi)
2672 struct pci_ahci_softc *sc;
2673 struct blockif_ctxt *bctxt;
2678 for (i = 0; i < MAX_PORTS; i++) {
2679 bctxt = sc->port[i].bctx;
2683 blockif_pause(bctxt);
2690 pci_ahci_resume(struct pci_devinst *pi)
2692 struct pci_ahci_softc *sc;
2693 struct blockif_ctxt *bctxt;
2698 for (i = 0; i < MAX_PORTS; i++) {
2699 bctxt = sc->port[i].bctx;
2703 blockif_resume(bctxt);
2708 #endif /* BHYVE_SNAPSHOT */
2711 * Use separate emulation names to distinguish drive and atapi devices
2713 static const struct pci_devemu pci_de_ahci = {
2715 .pe_init = pci_ahci_init,
2716 .pe_legacy_config = pci_ahci_legacy_config,
2717 .pe_barwrite = pci_ahci_write,
2718 .pe_barread = pci_ahci_read,
2719 #ifdef BHYVE_SNAPSHOT
2720 .pe_snapshot = pci_ahci_snapshot,
2721 .pe_pause = pci_ahci_pause,
2722 .pe_resume = pci_ahci_resume,
2725 PCI_EMUL_SET(pci_de_ahci);
2727 static const struct pci_devemu pci_de_ahci_hd = {
2728 .pe_emu = "ahci-hd",
2729 .pe_legacy_config = pci_ahci_hd_legacy_config,
2732 PCI_EMUL_SET(pci_de_ahci_hd);
2734 static const struct pci_devemu pci_de_ahci_cd = {
2735 .pe_emu = "ahci-cd",
2736 .pe_legacy_config = pci_ahci_cd_legacy_config,
2739 PCI_EMUL_SET(pci_de_ahci_cd);