2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
5 * Copyright (c) 2015-2016 Alexander Motin <mav@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/linker_set.h>
39 #include <sys/ioctl.h>
42 #include <sys/endian.h>
44 #include <machine/vmm_snapshot.h>
56 #include <pthread_np.h>
67 #define DEF_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
68 #define MAX_PORTS 32 /* AHCI supports 32 ports */
70 #define PxSIG_ATA 0x00000101 /* ATA drive */
71 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
74 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
75 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
76 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
77 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
78 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
79 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
80 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
81 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
87 #define TEST_UNIT_READY 0x00
88 #define REQUEST_SENSE 0x03
90 #define START_STOP_UNIT 0x1B
91 #define PREVENT_ALLOW 0x1E
92 #define READ_CAPACITY 0x25
94 #define POSITION_TO_ELEMENT 0x2B
96 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
97 #define MODE_SENSE_10 0x5A
98 #define REPORT_LUNS 0xA0
103 * SCSI mode page codes
105 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
106 #define MODEPAGE_CD_CAPABILITIES 0x2A
111 #define ATA_SF_ENAB_SATA_SF 0x10
112 #define ATA_SATA_SF_AN 0x05
113 #define ATA_SF_DIS_SATA_SF 0x90
120 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
122 #define DPRINTF(format, arg...)
124 #define WPRINTF(format, arg...) printf(format, ##arg)
126 #define AHCI_PORT_IDENT 20 + 1
129 struct blockif_req io_req;
130 struct ahci_port *io_pr;
131 STAILQ_ENTRY(ahci_ioreq) io_flist;
132 TAILQ_ENTRY(ahci_ioreq) io_blist;
142 struct blockif_ctxt *bctx;
143 struct pci_ahci_softc *pr_sc;
144 struct ata_params ata_ident;
153 uint8_t err_cfis[20];
180 struct ahci_ioreq *ioreq;
182 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
183 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
186 struct ahci_cmd_hdr {
191 uint32_t reserved[4];
194 struct ahci_prdt_entry {
197 #define DBCMASK 0x3fffff
201 struct pci_ahci_softc {
202 struct pci_devinst *asc_pi;
217 struct ahci_port port[MAX_PORTS];
219 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
221 static void ahci_handle_port(struct ahci_port *p);
223 static inline void lba_to_msf(uint8_t *buf, int lba)
226 buf[0] = (lba / 75) / 60;
227 buf[1] = (lba / 75) % 60;
232 * Generate HBA interrupts on global IS register write.
235 ahci_generate_intr(struct pci_ahci_softc *sc, uint32_t mask)
237 struct pci_devinst *pi = sc->asc_pi;
242 /* Update global IS from PxIS/PxIE. */
243 for (i = 0; i < sc->ports; i++) {
248 DPRINTF("%s(%08x) %08x", __func__, mask, sc->is);
250 /* If there is nothing enabled -- clear legacy interrupt and exit. */
251 if (sc->is == 0 || (sc->ghc & AHCI_GHC_IE) == 0) {
253 pci_lintr_deassert(pi);
259 /* If there is anything and no MSI -- assert legacy interrupt. */
260 nmsg = pci_msi_maxmsgnum(pi);
264 pci_lintr_assert(pi);
269 /* Assert respective MSIs for ports that were touched. */
270 for (i = 0; i < nmsg; i++) {
271 if (sc->ports <= nmsg || i < nmsg - 1)
274 mmask = 0xffffffff << i;
275 if (sc->is & mask && mmask & mask)
276 pci_generate_msi(pi, i);
281 * Generate HBA interrupt on specific port event.
284 ahci_port_intr(struct ahci_port *p)
286 struct pci_ahci_softc *sc = p->pr_sc;
287 struct pci_devinst *pi = sc->asc_pi;
290 DPRINTF("%s(%d) %08x/%08x %08x", __func__,
291 p->port, p->is, p->ie, sc->is);
293 /* If there is nothing enabled -- we are done. */
294 if ((p->is & p->ie) == 0)
297 /* In case of non-shared MSI always generate interrupt. */
298 nmsg = pci_msi_maxmsgnum(pi);
299 if (sc->ports <= nmsg || p->port < nmsg - 1) {
300 sc->is |= (1 << p->port);
301 if ((sc->ghc & AHCI_GHC_IE) == 0)
303 pci_generate_msi(pi, p->port);
307 /* If IS for this port is already set -- do nothing. */
308 if (sc->is & (1 << p->port))
311 sc->is |= (1 << p->port);
313 /* If interrupts are enabled -- generate one. */
314 if ((sc->ghc & AHCI_GHC_IE) == 0)
317 pci_generate_msi(pi, nmsg - 1);
318 } else if (!sc->lintr) {
320 pci_lintr_assert(pi);
325 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
327 int offset, len, irq;
329 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
333 case FIS_TYPE_REGD2H:
336 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_DHR : 0;
338 case FIS_TYPE_SETDEVBITS:
341 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_SDB : 0;
343 case FIS_TYPE_PIOSETUP:
346 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_PS : 0;
349 WPRINTF("unsupported fis type %d", ft);
352 if (fis[2] & ATA_S_ERROR) {
354 irq |= AHCI_P_IX_TFE;
356 memcpy(p->rfis + offset, fis, len);
366 ahci_write_fis_piosetup(struct ahci_port *p)
370 memset(fis, 0, sizeof(fis));
371 fis[0] = FIS_TYPE_PIOSETUP;
372 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
376 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
381 error = (tfd >> 8) & 0xff;
383 memset(fis, 0, sizeof(fis));
384 fis[0] = FIS_TYPE_SETDEVBITS;
388 if (fis[2] & ATA_S_ERROR) {
389 p->err_cfis[0] = slot;
390 p->err_cfis[2] = tfd;
391 p->err_cfis[3] = error;
392 memcpy(&p->err_cfis[4], cfis + 4, 16);
394 *(uint32_t *)(fis + 4) = (1 << slot);
395 p->sact &= ~(1 << slot);
399 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
403 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
408 error = (tfd >> 8) & 0xff;
409 memset(fis, 0, sizeof(fis));
410 fis[0] = FIS_TYPE_REGD2H;
424 if (fis[2] & ATA_S_ERROR) {
425 p->err_cfis[0] = 0x80;
426 p->err_cfis[2] = tfd & 0xff;
427 p->err_cfis[3] = error;
428 memcpy(&p->err_cfis[4], cfis + 4, 16);
430 p->ci &= ~(1 << slot);
432 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
436 ahci_write_fis_d2h_ncq(struct ahci_port *p, int slot)
440 p->tfd = ATA_S_READY | ATA_S_DSC;
441 memset(fis, 0, sizeof(fis));
442 fis[0] = FIS_TYPE_REGD2H;
443 fis[1] = 0; /* No interrupt */
444 fis[2] = p->tfd; /* Status */
445 fis[3] = 0; /* No error */
446 p->ci &= ~(1 << slot);
447 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
451 ahci_write_reset_fis_d2h(struct ahci_port *p)
455 memset(fis, 0, sizeof(fis));
456 fis[0] = FIS_TYPE_REGD2H;
464 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
468 ahci_check_stopped(struct ahci_port *p)
471 * If we are no longer processing the command list and nothing
472 * is in-flight, clear the running bit, the current command
473 * slot, the command issue and active bits.
475 if (!(p->cmd & AHCI_P_CMD_ST)) {
476 if (p->pending == 0) {
478 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
487 ahci_port_stop(struct ahci_port *p)
489 struct ahci_ioreq *aior;
494 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
496 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
498 * Try to cancel the outstanding blockif request.
500 error = blockif_cancel(p->bctx, &aior->io_req);
506 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
507 cfis[2] == ATA_READ_FPDMA_QUEUED ||
508 cfis[2] == ATA_SEND_FPDMA_QUEUED)
509 p->sact &= ~(1 << slot); /* NCQ */
511 p->ci &= ~(1 << slot);
514 * This command is now done.
516 p->pending &= ~(1 << slot);
519 * Delete the blockif request from the busy list
521 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
524 * Move the blockif request back to the free list
526 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
529 ahci_check_stopped(p);
533 ahci_port_reset(struct ahci_port *pr)
537 pr->xfermode = ATA_UDMA6;
538 pr->mult_sectors = 128;
541 pr->ssts = ATA_SS_DET_NO_DEVICE;
542 pr->sig = 0xFFFFFFFF;
546 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
547 if (pr->sctl & ATA_SC_SPD_MASK)
548 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
550 pr->ssts |= ATA_SS_SPD_GEN3;
551 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
554 pr->tfd |= ATA_S_READY;
556 pr->sig = PxSIG_ATAPI;
557 ahci_write_reset_fis_d2h(pr);
561 ahci_reset(struct pci_ahci_softc *sc)
565 sc->ghc = AHCI_GHC_AE;
569 pci_lintr_deassert(sc->asc_pi);
573 for (i = 0; i < sc->ports; i++) {
576 sc->port[i].cmd = (AHCI_P_CMD_SUD | AHCI_P_CMD_POD);
577 if (sc->port[i].bctx)
578 sc->port[i].cmd |= AHCI_P_CMD_CPS;
579 sc->port[i].sctl = 0;
580 ahci_port_reset(&sc->port[i]);
585 ata_string(uint8_t *dest, const char *src, int len)
589 for (i = 0; i < len; i++) {
591 dest[i ^ 1] = *src++;
598 atapi_string(uint8_t *dest, const char *src, int len)
602 for (i = 0; i < len; i++) {
611 * Build up the iovec based on the PRDT, 'done' and 'len'.
614 ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
615 struct ahci_prdt_entry *prdt, uint16_t prdtl)
617 struct blockif_req *breq = &aior->io_req;
618 int i, j, skip, todo, left, extra;
621 /* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
623 left = aior->len - aior->done;
625 for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
627 dbcsz = (prdt->dbc & DBCMASK) + 1;
628 /* Skip already done part of the PRDT */
636 breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
637 prdt->dba + skip, dbcsz);
638 breq->br_iov[j].iov_len = dbcsz;
645 /* If we got limited by IOV length, round I/O down to sector size. */
646 if (j == BLOCKIF_IOV_MAX) {
647 extra = todo % blockif_sectsz(p->bctx);
651 if (breq->br_iov[j - 1].iov_len > extra) {
652 breq->br_iov[j - 1].iov_len -= extra;
655 extra -= breq->br_iov[j - 1].iov_len;
661 breq->br_resid = todo;
663 aior->more = (aior->done < aior->len && i < prdtl);
667 ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
669 struct ahci_ioreq *aior;
670 struct blockif_req *breq;
671 struct ahci_prdt_entry *prdt;
672 struct ahci_cmd_hdr *hdr;
675 int err, first, ncq, readop;
677 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
678 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
683 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
684 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
685 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
686 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
689 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
690 cfis[2] == ATA_READ_FPDMA_QUEUED) {
691 lba = ((uint64_t)cfis[10] << 40) |
692 ((uint64_t)cfis[9] << 32) |
693 ((uint64_t)cfis[8] << 24) |
694 ((uint64_t)cfis[6] << 16) |
695 ((uint64_t)cfis[5] << 8) |
697 len = cfis[11] << 8 | cfis[3];
701 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
702 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
703 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
704 lba = ((uint64_t)cfis[10] << 40) |
705 ((uint64_t)cfis[9] << 32) |
706 ((uint64_t)cfis[8] << 24) |
707 ((uint64_t)cfis[6] << 16) |
708 ((uint64_t)cfis[5] << 8) |
710 len = cfis[13] << 8 | cfis[12];
714 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
715 (cfis[5] << 8) | cfis[4];
720 lba *= blockif_sectsz(p->bctx);
721 len *= blockif_sectsz(p->bctx);
723 /* Pull request off free list */
724 aior = STAILQ_FIRST(&p->iofhd);
725 assert(aior != NULL);
726 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
732 aior->readop = readop;
733 breq = &aior->io_req;
734 breq->br_offset = lba + done;
735 ahci_build_iov(p, aior, prdt, hdr->prdtl);
737 /* Mark this command in-flight. */
738 p->pending |= 1 << slot;
740 /* Stuff request onto busy list. */
741 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
744 ahci_write_fis_d2h_ncq(p, slot);
747 err = blockif_read(p->bctx, breq);
749 err = blockif_write(p->bctx, breq);
754 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
756 struct ahci_ioreq *aior;
757 struct blockif_req *breq;
761 * Pull request off free list
763 aior = STAILQ_FIRST(&p->iofhd);
764 assert(aior != NULL);
765 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
771 breq = &aior->io_req;
774 * Mark this command in-flight.
776 p->pending |= 1 << slot;
779 * Stuff request onto busy list
781 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
783 err = blockif_flush(p->bctx, breq);
788 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
791 struct ahci_cmd_hdr *hdr;
792 struct ahci_prdt_entry *prdt;
796 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
799 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
800 for (i = 0; i < hdr->prdtl && len; i++) {
805 dbcsz = (prdt->dbc & DBCMASK) + 1;
806 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
807 sublen = MIN(len, dbcsz);
808 memcpy(to, ptr, sublen);
816 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
818 struct ahci_ioreq *aior;
819 struct blockif_req *breq;
827 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
828 len = (uint16_t)cfis[13] << 8 | cfis[12];
831 } else { /* ATA_SEND_FPDMA_QUEUED */
832 len = (uint16_t)cfis[11] << 8 | cfis[3];
836 read_prdt(p, slot, cfis, buf, sizeof(buf));
840 elba = ((uint64_t)entry[5] << 40) |
841 ((uint64_t)entry[4] << 32) |
842 ((uint64_t)entry[3] << 24) |
843 ((uint64_t)entry[2] << 16) |
844 ((uint64_t)entry[1] << 8) |
846 elen = (uint16_t)entry[7] << 8 | entry[6];
852 ahci_write_fis_d2h_ncq(p, slot);
853 ahci_write_fis_sdb(p, slot, cfis,
854 ATA_S_READY | ATA_S_DSC);
856 ahci_write_fis_d2h(p, slot, cfis,
857 ATA_S_READY | ATA_S_DSC);
859 p->pending &= ~(1 << slot);
860 ahci_check_stopped(p);
869 * Pull request off free list
871 aior = STAILQ_FIRST(&p->iofhd);
872 assert(aior != NULL);
873 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
878 aior->more = (len != done);
880 breq = &aior->io_req;
881 breq->br_offset = elba * blockif_sectsz(p->bctx);
882 breq->br_resid = elen * blockif_sectsz(p->bctx);
885 * Mark this command in-flight.
887 p->pending |= 1 << slot;
890 * Stuff request onto busy list
892 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
895 ahci_write_fis_d2h_ncq(p, slot);
897 err = blockif_delete(p->bctx, breq);
902 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
905 struct ahci_cmd_hdr *hdr;
906 struct ahci_prdt_entry *prdt;
910 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
913 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
914 for (i = 0; i < hdr->prdtl && len; i++) {
919 dbcsz = (prdt->dbc & DBCMASK) + 1;
920 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
921 sublen = MIN(len, dbcsz);
922 memcpy(ptr, from, sublen);
927 hdr->prdbc = size - len;
931 ahci_checksum(uint8_t *buf, int size)
936 for (i = 0; i < size - 1; i++)
938 buf[size - 1] = 0x100 - sum;
942 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
944 struct ahci_cmd_hdr *hdr;
946 uint8_t *buf8 = (uint8_t *)buf;
947 uint16_t *buf16 = (uint16_t *)buf;
949 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
950 if (p->atapi || hdr->prdtl == 0 || cfis[5] != 0 ||
951 cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
952 ahci_write_fis_d2h(p, slot, cfis,
953 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
957 memset(buf, 0, sizeof(buf));
958 if (cfis[4] == 0x00) { /* Log directory */
959 buf16[0x00] = 1; /* Version -- 1 */
960 buf16[0x10] = 1; /* NCQ Command Error Log -- 1 page */
961 buf16[0x13] = 1; /* SATA NCQ Send and Receive Log -- 1 page */
962 } else if (cfis[4] == 0x10) { /* NCQ Command Error Log */
963 memcpy(buf8, p->err_cfis, sizeof(p->err_cfis));
964 ahci_checksum(buf8, sizeof(buf));
965 } else if (cfis[4] == 0x13) { /* SATA NCQ Send and Receive Log */
966 if (blockif_candelete(p->bctx) && !blockif_is_ro(p->bctx)) {
967 buf[0x00] = 1; /* SFQ DSM supported */
968 buf[0x01] = 1; /* SFQ DSM TRIM supported */
971 ahci_write_fis_d2h(p, slot, cfis,
972 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
976 if (cfis[2] == ATA_READ_LOG_EXT)
977 ahci_write_fis_piosetup(p);
978 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
979 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
983 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
985 struct ahci_cmd_hdr *hdr;
987 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
988 if (p->atapi || hdr->prdtl == 0) {
989 ahci_write_fis_d2h(p, slot, cfis,
990 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
992 ahci_write_fis_piosetup(p);
993 write_prdt(p, slot, cfis, (void*)&p->ata_ident, sizeof(struct ata_params));
994 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
999 ata_identify_init(struct ahci_port* p, int atapi)
1001 struct ata_params* ata_ident = &p->ata_ident;
1004 ata_ident->config = ATA_PROTO_ATAPI | ATA_ATAPI_TYPE_CDROM |
1005 ATA_ATAPI_REMOVABLE | ATA_DRQ_FAST;
1006 ata_ident->capabilities1 = ATA_SUPPORT_LBA |
1008 ata_ident->capabilities2 = (1 << 14 | 1);
1009 ata_ident->atavalid = ATA_FLAG_64_70 | ATA_FLAG_88;
1010 ata_ident->obsolete62 = 0x3f;
1011 ata_ident->mwdmamodes = 7;
1012 if (p->xfermode & ATA_WDMA0)
1013 ata_ident->mwdmamodes |= (1 << ((p->xfermode & 7) + 8));
1014 ata_ident->apiomodes = 3;
1015 ata_ident->mwdmamin = 0x0078;
1016 ata_ident->mwdmarec = 0x0078;
1017 ata_ident->pioblind = 0x0078;
1018 ata_ident->pioiordy = 0x0078;
1019 ata_ident->satacapabilities = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
1020 ata_ident->satacapabilities2 = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
1021 ata_ident->satasupport = ATA_SUPPORT_NCQ_STREAM;
1022 ata_ident->version_major = 0x3f0;
1023 ata_ident->support.command1 = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1024 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1025 ata_ident->support.command2 = (1 << 14);
1026 ata_ident->support.extension = (1 << 14);
1027 ata_ident->enabled.command1 = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1028 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1029 ata_ident->enabled.extension = (1 << 14);
1030 ata_ident->udmamodes = 0x7f;
1031 if (p->xfermode & ATA_UDMA0)
1032 ata_ident->udmamodes |= (1 << ((p->xfermode & 7) + 8));
1033 ata_ident->transport_major = 0x1020;
1034 ata_ident->integrity = 0x00a5;
1037 int sectsz, psectsz, psectoff, candelete, ro;
1039 uint8_t sech, heads;
1041 ro = blockif_is_ro(p->bctx);
1042 candelete = blockif_candelete(p->bctx);
1043 sectsz = blockif_sectsz(p->bctx);
1044 sectors = blockif_size(p->bctx) / sectsz;
1045 blockif_chs(p->bctx, &cyl, &heads, &sech);
1046 blockif_psectsz(p->bctx, &psectsz, &psectoff);
1047 ata_ident->config = ATA_DRQ_FAST;
1048 ata_ident->cylinders = cyl;
1049 ata_ident->heads = heads;
1050 ata_ident->sectors = sech;
1052 ata_ident->sectors_intr = (0x8000 | 128);
1055 ata_ident->capabilities1 = ATA_SUPPORT_DMA |
1056 ATA_SUPPORT_LBA | ATA_SUPPORT_IORDY;
1057 ata_ident->capabilities2 = (1 << 14);
1058 ata_ident->atavalid = ATA_FLAG_64_70 | ATA_FLAG_88;
1059 if (p->mult_sectors)
1060 ata_ident->multi = (ATA_MULTI_VALID | p->mult_sectors);
1061 if (sectors <= 0x0fffffff) {
1062 ata_ident->lba_size_1 = sectors;
1063 ata_ident->lba_size_2 = (sectors >> 16);
1065 ata_ident->lba_size_1 = 0xffff;
1066 ata_ident->lba_size_2 = 0x0fff;
1068 ata_ident->mwdmamodes = 0x7;
1069 if (p->xfermode & ATA_WDMA0)
1070 ata_ident->mwdmamodes |= (1 << ((p->xfermode & 7) + 8));
1071 ata_ident->apiomodes = 0x3;
1072 ata_ident->mwdmamin = 0x0078;
1073 ata_ident->mwdmarec = 0x0078;
1074 ata_ident->pioblind = 0x0078;
1075 ata_ident->pioiordy = 0x0078;
1076 ata_ident->support3 = 0;
1077 ata_ident->queue = 31;
1078 ata_ident->satacapabilities = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
1080 ata_ident->satacapabilities2 = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
1081 (p->ssts & ATA_SS_SPD_MASK) >> 3);
1082 ata_ident->version_major = 0x3f0;
1083 ata_ident->version_minor = 0x28;
1084 ata_ident->support.command1 = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE |
1085 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1086 ata_ident->support.command2 = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1087 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
1088 ata_ident->support.extension = (1 << 14);
1089 ata_ident->enabled.command1 = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE |
1090 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1091 ata_ident->enabled.command2 = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1092 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
1093 ata_ident->enabled.extension = (1 << 14);
1094 ata_ident->udmamodes = 0x7f;
1095 if (p->xfermode & ATA_UDMA0)
1096 ata_ident->udmamodes |= (1 << ((p->xfermode & 7) + 8));
1097 ata_ident->lba_size48_1 = sectors;
1098 ata_ident->lba_size48_2 = (sectors >> 16);
1099 ata_ident->lba_size48_3 = (sectors >> 32);
1100 ata_ident->lba_size48_4 = (sectors >> 48);
1102 if (candelete && !ro) {
1103 ata_ident->support3 |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
1104 ata_ident->max_dsm_blocks = 1;
1105 ata_ident->support_dsm = ATA_SUPPORT_DSM_TRIM;
1107 ata_ident->pss = ATA_PSS_VALID_VALUE;
1108 ata_ident->lsalign = 0x4000;
1109 if (psectsz > sectsz) {
1110 ata_ident->pss |= ATA_PSS_MULTLS;
1111 ata_ident->pss |= ffsl(psectsz / sectsz) - 1;
1112 ata_ident->lsalign |= (psectoff / sectsz);
1115 ata_ident->pss |= ATA_PSS_LSSABOVE512;
1116 ata_ident->lss_1 = sectsz / 2;
1117 ata_ident->lss_2 = ((sectsz / 2) >> 16);
1119 ata_ident->support2 = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1120 ata_ident->enabled2 = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1121 ata_ident->transport_major = 0x1020;
1122 ata_ident->integrity = 0x00a5;
1124 ahci_checksum((uint8_t*)ata_ident, sizeof(struct ata_params));
1128 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
1131 ahci_write_fis_d2h(p, slot, cfis,
1132 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1134 ahci_write_fis_piosetup(p);
1135 write_prdt(p, slot, cfis, (void *)&p->ata_ident, sizeof(struct ata_params));
1136 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1141 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
1150 if (acmd[1] & 1) { /* VPD */
1151 if (acmd[2] == 0) { /* Supported VPD pages */
1159 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1161 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1162 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1163 ahci_write_fis_d2h(p, slot, cfis, tfd);
1175 atapi_string(buf + 8, "BHYVE", 8);
1176 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1177 atapi_string(buf + 32, "001", 4);
1183 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1184 write_prdt(p, slot, cfis, buf, len);
1185 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1189 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1194 sectors = blockif_size(p->bctx) / 2048;
1195 be32enc(buf, sectors - 1);
1196 be32enc(buf + 4, 2048);
1197 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1198 write_prdt(p, slot, cfis, buf, sizeof(buf));
1199 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1203 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1211 len = be16dec(acmd + 7);
1212 format = acmd[9] >> 6;
1218 uint8_t start_track, buf[20], *bp;
1220 msf = (acmd[1] >> 1) & 1;
1221 start_track = acmd[6];
1222 if (start_track > 1 && start_track != 0xaa) {
1224 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1226 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1227 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1228 ahci_write_fis_d2h(p, slot, cfis, tfd);
1234 if (start_track <= 1) {
1254 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1258 lba_to_msf(bp, sectors);
1261 be32enc(bp, sectors);
1265 be16enc(buf, size - 2);
1268 write_prdt(p, slot, cfis, buf, len);
1269 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1270 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1277 memset(buf, 0, sizeof(buf));
1281 if (len > sizeof(buf))
1283 write_prdt(p, slot, cfis, buf, len);
1284 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1285 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1292 uint8_t *bp, buf[50];
1294 msf = (acmd[1] >> 1) & 1;
1330 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1334 lba_to_msf(bp, sectors);
1337 be32enc(bp, sectors);
1360 be16enc(buf, size - 2);
1363 write_prdt(p, slot, cfis, buf, len);
1364 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1365 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1372 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1374 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1375 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1376 ahci_write_fis_d2h(p, slot, cfis, tfd);
1383 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1387 memset(buf, 0, sizeof(buf));
1390 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1391 write_prdt(p, slot, cfis, buf, sizeof(buf));
1392 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1396 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
1398 struct ahci_ioreq *aior;
1399 struct ahci_cmd_hdr *hdr;
1400 struct ahci_prdt_entry *prdt;
1401 struct blockif_req *breq;
1408 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1409 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1411 lba = be32dec(acmd + 2);
1412 if (acmd[0] == READ_10)
1413 len = be16dec(acmd + 7);
1415 len = be32dec(acmd + 6);
1417 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1418 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1424 * Pull request off free list
1426 aior = STAILQ_FIRST(&p->iofhd);
1427 assert(aior != NULL);
1428 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1434 breq = &aior->io_req;
1435 breq->br_offset = lba + done;
1436 ahci_build_iov(p, aior, prdt, hdr->prdtl);
1438 /* Mark this command in-flight. */
1439 p->pending |= 1 << slot;
1441 /* Stuff request onto busy list. */
1442 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1444 err = blockif_read(p->bctx, breq);
1449 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1457 if (len > sizeof(buf))
1459 memset(buf, 0, len);
1460 buf[0] = 0x70 | (1 << 7);
1461 buf[2] = p->sense_key;
1464 write_prdt(p, slot, cfis, buf, len);
1465 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1466 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1470 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1472 uint8_t *acmd = cfis + 0x40;
1475 switch (acmd[4] & 3) {
1479 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1480 tfd = ATA_S_READY | ATA_S_DSC;
1483 /* TODO eject media */
1484 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1485 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1487 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1490 ahci_write_fis_d2h(p, slot, cfis, tfd);
1494 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1502 len = be16dec(acmd + 7);
1504 code = acmd[2] & 0x3f;
1509 case MODEPAGE_RW_ERROR_RECOVERY:
1513 if (len > sizeof(buf))
1516 memset(buf, 0, sizeof(buf));
1517 be16enc(buf, 16 - 2);
1522 write_prdt(p, slot, cfis, buf, len);
1523 tfd = ATA_S_READY | ATA_S_DSC;
1526 case MODEPAGE_CD_CAPABILITIES:
1530 if (len > sizeof(buf))
1533 memset(buf, 0, sizeof(buf));
1534 be16enc(buf, 30 - 2);
1540 be16enc(&buf[18], 2);
1541 be16enc(&buf[20], 512);
1542 write_prdt(p, slot, cfis, buf, len);
1543 tfd = ATA_S_READY | ATA_S_DSC;
1552 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1554 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1559 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1561 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1564 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1565 ahci_write_fis_d2h(p, slot, cfis, tfd);
1569 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1577 /* we don't support asynchronous operation */
1578 if (!(acmd[1] & 1)) {
1579 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1581 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1586 len = be16dec(acmd + 7);
1587 if (len > sizeof(buf))
1590 memset(buf, 0, sizeof(buf));
1591 be16enc(buf, 8 - 2);
1595 write_prdt(p, slot, cfis, buf, len);
1596 tfd = ATA_S_READY | ATA_S_DSC;
1598 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1599 ahci_write_fis_d2h(p, slot, cfis, tfd);
1603 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1613 for (i = 0; i < 16; i++)
1614 DPRINTF("%02x ", acmd[i]);
1620 case TEST_UNIT_READY:
1621 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1622 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1625 atapi_inquiry(p, slot, cfis);
1628 atapi_read_capacity(p, slot, cfis);
1632 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1633 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1636 atapi_read_toc(p, slot, cfis);
1639 atapi_report_luns(p, slot, cfis);
1643 atapi_read(p, slot, cfis, 0);
1646 atapi_request_sense(p, slot, cfis);
1648 case START_STOP_UNIT:
1649 atapi_start_stop_unit(p, slot, cfis);
1652 atapi_mode_sense(p, slot, cfis);
1654 case GET_EVENT_STATUS_NOTIFICATION:
1655 atapi_get_event_status_notification(p, slot, cfis);
1658 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1659 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1661 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1662 ATA_S_READY | ATA_S_ERROR);
1668 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1671 p->tfd |= ATA_S_BUSY;
1673 case ATA_ATA_IDENTIFY:
1674 handle_identify(p, slot, cfis);
1676 case ATA_SETFEATURES:
1679 case ATA_SF_ENAB_SATA_SF:
1681 case ATA_SATA_SF_AN:
1682 p->tfd = ATA_S_DSC | ATA_S_READY;
1685 p->tfd = ATA_S_ERROR | ATA_S_READY;
1686 p->tfd |= (ATA_ERROR_ABORT << 8);
1690 case ATA_SF_ENAB_WCACHE:
1691 case ATA_SF_DIS_WCACHE:
1692 case ATA_SF_ENAB_RCACHE:
1693 case ATA_SF_DIS_RCACHE:
1694 p->tfd = ATA_S_DSC | ATA_S_READY;
1696 case ATA_SF_SETXFER:
1698 switch (cfis[12] & 0xf8) {
1704 p->xfermode = (cfis[12] & 0x7);
1707 p->tfd = ATA_S_DSC | ATA_S_READY;
1711 p->tfd = ATA_S_ERROR | ATA_S_READY;
1712 p->tfd |= (ATA_ERROR_ABORT << 8);
1715 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1719 if (cfis[12] != 0 &&
1720 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1721 p->tfd = ATA_S_ERROR | ATA_S_READY;
1722 p->tfd |= (ATA_ERROR_ABORT << 8);
1724 p->mult_sectors = cfis[12];
1725 p->tfd = ATA_S_DSC | ATA_S_READY;
1727 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1735 case ATA_READ_MUL48:
1736 case ATA_WRITE_MUL48:
1739 case ATA_READ_DMA48:
1740 case ATA_WRITE_DMA48:
1741 case ATA_READ_FPDMA_QUEUED:
1742 case ATA_WRITE_FPDMA_QUEUED:
1743 ahci_handle_rw(p, slot, cfis, 0);
1745 case ATA_FLUSHCACHE:
1746 case ATA_FLUSHCACHE48:
1747 ahci_handle_flush(p, slot, cfis);
1749 case ATA_DATA_SET_MANAGEMENT:
1750 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1751 cfis[13] == 0 && cfis[12] == 1) {
1752 ahci_handle_dsm_trim(p, slot, cfis, 0);
1755 ahci_write_fis_d2h(p, slot, cfis,
1756 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1758 case ATA_SEND_FPDMA_QUEUED:
1759 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1760 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1761 cfis[11] == 0 && cfis[3] == 1) {
1762 ahci_handle_dsm_trim(p, slot, cfis, 0);
1765 ahci_write_fis_d2h(p, slot, cfis,
1766 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1768 case ATA_READ_LOG_EXT:
1769 case ATA_READ_LOG_DMA_EXT:
1770 ahci_handle_read_log(p, slot, cfis);
1772 case ATA_SECURITY_FREEZE_LOCK:
1775 ahci_write_fis_d2h(p, slot, cfis,
1776 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1778 case ATA_CHECK_POWER_MODE:
1779 cfis[12] = 0xff; /* always on */
1780 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1782 case ATA_STANDBY_CMD:
1783 case ATA_STANDBY_IMMEDIATE:
1785 case ATA_IDLE_IMMEDIATE:
1787 case ATA_READ_VERIFY:
1788 case ATA_READ_VERIFY48:
1789 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1791 case ATA_ATAPI_IDENTIFY:
1792 handle_atapi_identify(p, slot, cfis);
1794 case ATA_PACKET_CMD:
1796 ahci_write_fis_d2h(p, slot, cfis,
1797 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1799 handle_packet_cmd(p, slot, cfis);
1802 WPRINTF("Unsupported cmd:%02x", cfis[2]);
1803 ahci_write_fis_d2h(p, slot, cfis,
1804 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1810 ahci_handle_slot(struct ahci_port *p, int slot)
1812 struct ahci_cmd_hdr *hdr;
1814 struct ahci_prdt_entry *prdt;
1816 struct pci_ahci_softc *sc;
1823 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1825 cfl = (hdr->flags & 0x1f) * 4;
1827 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1828 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1830 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1833 for (i = 0; i < cfl; i++) {
1836 DPRINTF("%02x ", cfis[i]);
1840 for (i = 0; i < hdr->prdtl; i++) {
1841 DPRINTF("%d@%08"PRIx64"", prdt->dbc & 0x3fffff, prdt->dba);
1846 if (cfis[0] != FIS_TYPE_REGH2D) {
1847 WPRINTF("Not a H2D FIS:%02x", cfis[0]);
1851 if (cfis[1] & 0x80) {
1852 ahci_handle_cmd(p, slot, cfis);
1854 if (cfis[15] & (1 << 2))
1856 else if (p->reset) {
1860 p->ci &= ~(1 << slot);
1865 ahci_handle_port(struct ahci_port *p)
1868 if (!(p->cmd & AHCI_P_CMD_ST))
1872 * Search for any new commands to issue ignoring those that
1873 * are already in-flight. Stop if device is busy or in error.
1875 for (; (p->ci & ~p->pending) != 0; p->ccs = ((p->ccs + 1) & 31)) {
1876 if ((p->tfd & (ATA_S_BUSY | ATA_S_DRQ)) != 0)
1878 if (p->waitforclear)
1880 if ((p->ci & ~p->pending & (1 << p->ccs)) != 0) {
1881 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1882 p->cmd |= p->ccs << AHCI_P_CMD_CCS_SHIFT;
1883 ahci_handle_slot(p, p->ccs);
1889 * blockif callback routine - this runs in the context of the blockif
1890 * i/o thread, so the mutex needs to be acquired.
1893 ata_ioreq_cb(struct blockif_req *br, int err)
1895 struct ahci_cmd_hdr *hdr;
1896 struct ahci_ioreq *aior;
1897 struct ahci_port *p;
1898 struct pci_ahci_softc *sc;
1903 DPRINTF("%s %d", __func__, err);
1906 aior = br->br_param;
1911 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1913 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1914 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1915 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1917 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1918 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1919 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1922 pthread_mutex_lock(&sc->mtx);
1925 * Delete the blockif request from the busy list
1927 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1930 * Move the blockif request back to the free list
1932 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1935 hdr->prdbc = aior->done;
1937 if (!err && aior->more) {
1939 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1941 ahci_handle_rw(p, slot, cfis, aior->done);
1946 tfd = ATA_S_READY | ATA_S_DSC;
1948 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1950 ahci_write_fis_sdb(p, slot, cfis, tfd);
1952 ahci_write_fis_d2h(p, slot, cfis, tfd);
1955 * This command is now complete.
1957 p->pending &= ~(1 << slot);
1959 ahci_check_stopped(p);
1960 ahci_handle_port(p);
1962 pthread_mutex_unlock(&sc->mtx);
1963 DPRINTF("%s exit", __func__);
1967 atapi_ioreq_cb(struct blockif_req *br, int err)
1969 struct ahci_cmd_hdr *hdr;
1970 struct ahci_ioreq *aior;
1971 struct ahci_port *p;
1972 struct pci_ahci_softc *sc;
1977 DPRINTF("%s %d", __func__, err);
1979 aior = br->br_param;
1984 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1986 pthread_mutex_lock(&sc->mtx);
1989 * Delete the blockif request from the busy list
1991 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1994 * Move the blockif request back to the free list
1996 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1999 hdr->prdbc = aior->done;
2001 if (!err && aior->more) {
2002 atapi_read(p, slot, cfis, aior->done);
2007 tfd = ATA_S_READY | ATA_S_DSC;
2009 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
2011 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
2013 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
2014 ahci_write_fis_d2h(p, slot, cfis, tfd);
2017 * This command is now complete.
2019 p->pending &= ~(1 << slot);
2021 ahci_check_stopped(p);
2022 ahci_handle_port(p);
2024 pthread_mutex_unlock(&sc->mtx);
2025 DPRINTF("%s exit", __func__);
2029 pci_ahci_ioreq_init(struct ahci_port *pr)
2031 struct ahci_ioreq *vr;
2034 pr->ioqsz = blockif_queuesz(pr->bctx);
2035 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
2036 STAILQ_INIT(&pr->iofhd);
2039 * Add all i/o request entries to the free queue
2041 for (i = 0; i < pr->ioqsz; i++) {
2045 vr->io_req.br_callback = ata_ioreq_cb;
2047 vr->io_req.br_callback = atapi_ioreq_cb;
2048 vr->io_req.br_param = vr;
2049 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
2052 TAILQ_INIT(&pr->iobhd);
2056 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2058 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2059 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2060 struct ahci_port *p = &sc->port[port];
2062 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"",
2063 port, offset, value);
2083 p->ie = value & 0xFDC000FF;
2088 p->cmd &= ~(AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2089 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2090 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2091 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK);
2092 p->cmd |= (AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2093 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2094 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2095 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK) & value;
2097 if (!(value & AHCI_P_CMD_ST)) {
2102 p->cmd |= AHCI_P_CMD_CR;
2103 clb = (uint64_t)p->clbu << 32 | p->clb;
2104 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
2105 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
2108 if (value & AHCI_P_CMD_FRE) {
2111 p->cmd |= AHCI_P_CMD_FR;
2112 fb = (uint64_t)p->fbu << 32 | p->fb;
2113 /* we don't support FBSCP, so rfis size is 256Bytes */
2114 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
2116 p->cmd &= ~AHCI_P_CMD_FR;
2119 if (value & AHCI_P_CMD_CLO) {
2120 p->tfd &= ~(ATA_S_BUSY | ATA_S_DRQ);
2121 p->cmd &= ~AHCI_P_CMD_CLO;
2124 if (value & AHCI_P_CMD_ICC_MASK) {
2125 p->cmd &= ~AHCI_P_CMD_ICC_MASK;
2128 ahci_handle_port(p);
2134 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"", offset);
2138 if (!(p->cmd & AHCI_P_CMD_ST)) {
2139 if (value & ATA_SC_DET_RESET)
2151 ahci_handle_port(p);
2161 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2163 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"",
2171 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"", offset);
2174 if (value & AHCI_GHC_HR) {
2178 if (value & AHCI_GHC_IE)
2179 sc->ghc |= AHCI_GHC_IE;
2181 sc->ghc &= ~AHCI_GHC_IE;
2182 ahci_generate_intr(sc, 0xffffffff);
2186 ahci_generate_intr(sc, value);
2194 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2195 int baridx, uint64_t offset, int size, uint64_t value)
2197 struct pci_ahci_softc *sc = pi->pi_arg;
2199 assert(baridx == 5);
2200 assert((offset % 4) == 0 && size == 4);
2202 pthread_mutex_lock(&sc->mtx);
2204 if (offset < AHCI_OFFSET)
2205 pci_ahci_host_write(sc, offset, value);
2206 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2207 pci_ahci_port_write(sc, offset, value);
2209 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"", offset);
2211 pthread_mutex_unlock(&sc->mtx);
2215 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2231 uint32_t *p = &sc->cap;
2232 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2240 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x",
2247 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2250 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2251 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2271 uint32_t *p= &sc->port[port].clb;
2272 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2281 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x",
2282 port, offset, value);
2288 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2289 uint64_t regoff, int size)
2291 struct pci_ahci_softc *sc = pi->pi_arg;
2295 assert(baridx == 5);
2296 assert(size == 1 || size == 2 || size == 4);
2297 assert((regoff & (size - 1)) == 0);
2299 pthread_mutex_lock(&sc->mtx);
2301 offset = regoff & ~0x3; /* round down to a multiple of 4 bytes */
2302 if (offset < AHCI_OFFSET)
2303 value = pci_ahci_host_read(sc, offset);
2304 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2305 value = pci_ahci_port_read(sc, offset);
2308 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"",
2311 value >>= 8 * (regoff & 0x3);
2313 pthread_mutex_unlock(&sc->mtx);
2319 * Each AHCI controller has a "port" node which contains nodes for
2320 * each port named after the decimal number of the port (no leading
2321 * zeroes). Port nodes contain a "type" ("hd" or "cd"), as well as
2322 * options for blockif. For example:
2329 * .path="/path/to/image"
2332 pci_ahci_legacy_config_port(nvlist_t *nvl, int port, const char *type,
2335 char node_name[sizeof("XX")];
2338 snprintf(node_name, sizeof(node_name), "%d", port);
2339 port_nvl = create_relative_config_node(nvl, node_name);
2340 set_config_value_node(port_nvl, "type", type);
2341 return (blockif_legacy_config(port_nvl, opts));
2345 pci_ahci_legacy_config(nvlist_t *nvl, const char *opts)
2347 nvlist_t *ports_nvl;
2349 char *next, *next2, *str, *tofree;
2355 ports_nvl = create_relative_config_node(nvl, "port");
2357 tofree = str = strdup(opts);
2358 for (p = 0; p < MAX_PORTS && str != NULL; p++, str = next) {
2359 /* Identify and cut off type of present port. */
2360 if (strncmp(str, "hd:", 3) == 0) {
2363 } else if (strncmp(str, "cd:", 3) == 0) {
2369 /* Find and cut off the next port options. */
2370 next = strstr(str, ",hd:");
2371 next2 = strstr(str, ",cd:");
2372 if (next == NULL || (next2 != NULL && next2 < next))
2383 EPRINTLN("Missing or invalid type for port %d: \"%s\"",
2388 if (pci_ahci_legacy_config_port(ports_nvl, p, type, str) != 0)
2398 pci_ahci_cd_legacy_config(nvlist_t *nvl, const char *opts)
2400 nvlist_t *ports_nvl;
2402 ports_nvl = create_relative_config_node(nvl, "port");
2403 return (pci_ahci_legacy_config_port(ports_nvl, 0, "cd", opts));
2407 pci_ahci_hd_legacy_config(nvlist_t *nvl, const char *opts)
2409 nvlist_t *ports_nvl;
2411 ports_nvl = create_relative_config_node(nvl, "port");
2412 return (pci_ahci_legacy_config_port(ports_nvl, 0, "hd", opts));
2416 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, nvlist_t *nvl)
2418 char bident[sizeof("XX:XX:XX")];
2419 char node_name[sizeof("XX")];
2420 struct blockif_ctxt *bctxt;
2421 struct pci_ahci_softc *sc;
2422 int atapi, ret, slots, p;
2425 const char *path, *type, *value;
2426 nvlist_t *ports_nvl, *port_nvl;
2431 dbg = fopen("/tmp/log", "w+");
2434 sc = calloc(1, sizeof(struct pci_ahci_softc));
2437 pthread_mutex_init(&sc->mtx, NULL);
2442 ports_nvl = find_relative_config_node(nvl, "port");
2443 for (p = 0; p < MAX_PORTS; p++) {
2444 struct ata_params *ata_ident = &sc->port[p].ata_ident;
2445 char ident[AHCI_PORT_IDENT];
2447 snprintf(node_name, sizeof(node_name), "%d", p);
2448 port_nvl = find_relative_config_node(ports_nvl, node_name);
2449 if (port_nvl == NULL)
2452 type = get_config_value_node(port_nvl, "type");
2456 if (strcmp(type, "hd") == 0)
2462 * Attempt to open the backing image. Use the PCI slot/func
2463 * and the port number for the identifier string.
2465 snprintf(bident, sizeof(bident), "%d:%d:%d", pi->pi_slot,
2468 bctxt = blockif_open(port_nvl, bident);
2469 if (bctxt == NULL) {
2474 sc->port[p].bctx = bctxt;
2475 sc->port[p].pr_sc = sc;
2476 sc->port[p].port = p;
2477 sc->port[p].atapi = atapi;
2480 * Create an identifier for the backing file.
2481 * Use parts of the md5 sum of the filename
2483 path = get_config_value_node(port_nvl, "path");
2485 MD5Update(&mdctx, path, strlen(path));
2486 MD5Final(digest, &mdctx);
2487 snprintf(ident, AHCI_PORT_IDENT,
2488 "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2489 digest[0], digest[1], digest[2], digest[3], digest[4],
2492 memset(ata_ident, 0, sizeof(struct ata_params));
2493 ata_string((uint8_t*)&ata_ident->serial, ident, 20);
2494 ata_string((uint8_t*)&ata_ident->revision, "001", 8);
2496 ata_string((uint8_t*)&ata_ident->model, "BHYVE SATA DVD ROM", 40);
2498 ata_string((uint8_t*)&ata_ident->model, "BHYVE SATA DISK", 40);
2499 value = get_config_value_node(port_nvl, "nmrr");
2501 ata_ident->media_rotation_rate = atoi(value);
2502 value = get_config_value_node(port_nvl, "ser");
2504 ata_string((uint8_t*)(&ata_ident->serial), value, 20);
2505 value = get_config_value_node(port_nvl, "rev");
2507 ata_string((uint8_t*)(&ata_ident->revision), value, 8);
2508 value = get_config_value_node(port_nvl, "model");
2510 ata_string((uint8_t*)(&ata_ident->model), value, 40);
2511 ata_identify_init(&sc->port[p], atapi);
2514 * Allocate blockif request structures and add them
2517 pci_ahci_ioreq_init(&sc->port[p]);
2520 if (sc->port[p].ioqsz < slots)
2521 slots = sc->port[p].ioqsz;
2525 /* Intel ICH8 AHCI */
2527 if (sc->ports < DEF_PORTS)
2528 sc->ports = DEF_PORTS;
2529 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2530 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2531 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2532 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2533 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2536 sc->cap2 = AHCI_CAP2_APST;
2539 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2540 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2541 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2542 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2543 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2544 p = MIN(sc->ports, 16);
2545 p = flsl(p) - ((p & (p - 1)) ? 0 : 1);
2546 pci_emul_add_msicap(pi, 1 << p);
2547 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2548 AHCI_OFFSET + sc->ports * AHCI_STEP);
2550 pci_lintr_request(pi);
2554 for (p = 0; p < sc->ports; p++) {
2555 if (sc->port[p].bctx != NULL)
2556 blockif_close(sc->port[p].bctx);
2564 #ifdef BHYVE_SNAPSHOT
2566 pci_ahci_snapshot_save_queues(struct ahci_port *port,
2567 struct vm_snapshot_meta *meta)
2571 struct ahci_ioreq *ioreq;
2573 STAILQ_FOREACH(ioreq, &port->iofhd, io_flist) {
2574 idx = ((void *) ioreq - (void *) port->ioreq) / sizeof(*ioreq);
2575 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2579 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2581 TAILQ_FOREACH(ioreq, &port->iobhd, io_blist) {
2582 idx = ((void *) ioreq - (void *) port->ioreq) / sizeof(*ioreq);
2583 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2586 * Snapshot only the busy requests; other requests are
2589 ret = blockif_snapshot_req(&ioreq->io_req, meta);
2591 fprintf(stderr, "%s: failed to snapshot req\r\n",
2598 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2605 pci_ahci_snapshot_restore_queues(struct ahci_port *port,
2606 struct vm_snapshot_meta *meta)
2610 struct ahci_ioreq *ioreq;
2612 /* Empty the free queue before restoring. */
2613 while (!STAILQ_EMPTY(&port->iofhd))
2614 STAILQ_REMOVE_HEAD(&port->iofhd, io_flist);
2616 /* Restore the free queue. */
2618 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2622 STAILQ_INSERT_TAIL(&port->iofhd, &port->ioreq[idx], io_flist);
2625 /* Restore the busy queue. */
2627 SNAPSHOT_VAR_OR_LEAVE(idx, meta, ret, done);
2631 ioreq = &port->ioreq[idx];
2632 TAILQ_INSERT_TAIL(&port->iobhd, ioreq, io_blist);
2635 * Restore only the busy requests; other requests are
2638 ret = blockif_snapshot_req(&ioreq->io_req, meta);
2640 fprintf(stderr, "%s: failed to restore request\r\n",
2645 /* Re-enqueue the requests in the block interface. */
2647 ret = blockif_read(port->bctx, &ioreq->io_req);
2649 ret = blockif_write(port->bctx, &ioreq->io_req);
2653 "%s: failed to re-enqueue request\r\n",
2664 pci_ahci_snapshot(struct vm_snapshot_meta *meta)
2668 struct pci_devinst *pi;
2669 struct pci_ahci_softc *sc;
2670 struct ahci_port *port;
2671 struct ahci_cmd_hdr *hdr;
2672 struct ahci_ioreq *ioreq;
2674 pi = meta->dev_data;
2677 /* TODO: add mtx lock/unlock */
2679 SNAPSHOT_VAR_OR_LEAVE(sc->ports, meta, ret, done);
2680 SNAPSHOT_VAR_OR_LEAVE(sc->cap, meta, ret, done);
2681 SNAPSHOT_VAR_OR_LEAVE(sc->ghc, meta, ret, done);
2682 SNAPSHOT_VAR_OR_LEAVE(sc->is, meta, ret, done);
2683 SNAPSHOT_VAR_OR_LEAVE(sc->pi, meta, ret, done);
2684 SNAPSHOT_VAR_OR_LEAVE(sc->vs, meta, ret, done);
2685 SNAPSHOT_VAR_OR_LEAVE(sc->ccc_ctl, meta, ret, done);
2686 SNAPSHOT_VAR_OR_LEAVE(sc->ccc_pts, meta, ret, done);
2687 SNAPSHOT_VAR_OR_LEAVE(sc->em_loc, meta, ret, done);
2688 SNAPSHOT_VAR_OR_LEAVE(sc->em_ctl, meta, ret, done);
2689 SNAPSHOT_VAR_OR_LEAVE(sc->cap2, meta, ret, done);
2690 SNAPSHOT_VAR_OR_LEAVE(sc->bohc, meta, ret, done);
2691 SNAPSHOT_VAR_OR_LEAVE(sc->lintr, meta, ret, done);
2693 for (i = 0; i < MAX_PORTS; i++) {
2694 port = &sc->port[i];
2696 if (meta->op == VM_SNAPSHOT_SAVE)
2699 SNAPSHOT_VAR_OR_LEAVE(bctx, meta, ret, done);
2700 SNAPSHOT_VAR_OR_LEAVE(port->port, meta, ret, done);
2702 /* Mostly for restore; save is ensured by the lines above. */
2703 if (((bctx == NULL) && (port->bctx != NULL)) ||
2704 ((bctx != NULL) && (port->bctx == NULL))) {
2705 fprintf(stderr, "%s: ports not matching\r\n", __func__);
2710 if (port->bctx == NULL)
2713 if (port->port != i) {
2714 fprintf(stderr, "%s: ports not matching: "
2715 "actual: %d expected: %d\r\n",
2716 __func__, port->port, i);
2721 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(port->cmd_lst,
2722 AHCI_CL_SIZE * AHCI_MAX_SLOTS, false, meta, ret, done);
2723 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(port->rfis, 256, false, meta,
2726 SNAPSHOT_VAR_OR_LEAVE(port->ata_ident, meta, ret, done);
2727 SNAPSHOT_VAR_OR_LEAVE(port->atapi, meta, ret, done);
2728 SNAPSHOT_VAR_OR_LEAVE(port->reset, meta, ret, done);
2729 SNAPSHOT_VAR_OR_LEAVE(port->waitforclear, meta, ret, done);
2730 SNAPSHOT_VAR_OR_LEAVE(port->mult_sectors, meta, ret, done);
2731 SNAPSHOT_VAR_OR_LEAVE(port->xfermode, meta, ret, done);
2732 SNAPSHOT_VAR_OR_LEAVE(port->err_cfis, meta, ret, done);
2733 SNAPSHOT_VAR_OR_LEAVE(port->sense_key, meta, ret, done);
2734 SNAPSHOT_VAR_OR_LEAVE(port->asc, meta, ret, done);
2735 SNAPSHOT_VAR_OR_LEAVE(port->ccs, meta, ret, done);
2736 SNAPSHOT_VAR_OR_LEAVE(port->pending, meta, ret, done);
2738 SNAPSHOT_VAR_OR_LEAVE(port->clb, meta, ret, done);
2739 SNAPSHOT_VAR_OR_LEAVE(port->clbu, meta, ret, done);
2740 SNAPSHOT_VAR_OR_LEAVE(port->fb, meta, ret, done);
2741 SNAPSHOT_VAR_OR_LEAVE(port->fbu, meta, ret, done);
2742 SNAPSHOT_VAR_OR_LEAVE(port->ie, meta, ret, done);
2743 SNAPSHOT_VAR_OR_LEAVE(port->cmd, meta, ret, done);
2744 SNAPSHOT_VAR_OR_LEAVE(port->unused0, meta, ret, done);
2745 SNAPSHOT_VAR_OR_LEAVE(port->tfd, meta, ret, done);
2746 SNAPSHOT_VAR_OR_LEAVE(port->sig, meta, ret, done);
2747 SNAPSHOT_VAR_OR_LEAVE(port->ssts, meta, ret, done);
2748 SNAPSHOT_VAR_OR_LEAVE(port->sctl, meta, ret, done);
2749 SNAPSHOT_VAR_OR_LEAVE(port->serr, meta, ret, done);
2750 SNAPSHOT_VAR_OR_LEAVE(port->sact, meta, ret, done);
2751 SNAPSHOT_VAR_OR_LEAVE(port->ci, meta, ret, done);
2752 SNAPSHOT_VAR_OR_LEAVE(port->sntf, meta, ret, done);
2753 SNAPSHOT_VAR_OR_LEAVE(port->fbs, meta, ret, done);
2754 SNAPSHOT_VAR_OR_LEAVE(port->ioqsz, meta, ret, done);
2756 for (j = 0; j < port->ioqsz; j++) {
2757 ioreq = &port->ioreq[j];
2759 /* blockif_req snapshot done only for busy requests. */
2760 hdr = (struct ahci_cmd_hdr *)(port->cmd_lst +
2761 ioreq->slot * AHCI_CL_SIZE);
2762 SNAPSHOT_GUEST2HOST_ADDR_OR_LEAVE(ioreq->cfis,
2763 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry),
2764 false, meta, ret, done);
2766 SNAPSHOT_VAR_OR_LEAVE(ioreq->len, meta, ret, done);
2767 SNAPSHOT_VAR_OR_LEAVE(ioreq->done, meta, ret, done);
2768 SNAPSHOT_VAR_OR_LEAVE(ioreq->slot, meta, ret, done);
2769 SNAPSHOT_VAR_OR_LEAVE(ioreq->more, meta, ret, done);
2770 SNAPSHOT_VAR_OR_LEAVE(ioreq->readop, meta, ret, done);
2773 /* Perform save / restore specific operations. */
2774 if (meta->op == VM_SNAPSHOT_SAVE) {
2775 ret = pci_ahci_snapshot_save_queues(port, meta);
2778 } else if (meta->op == VM_SNAPSHOT_RESTORE) {
2779 ret = pci_ahci_snapshot_restore_queues(port, meta);
2787 ret = blockif_snapshot(port->bctx, meta);
2789 fprintf(stderr, "%s: failed to restore blockif\r\n",
2800 pci_ahci_pause(struct vmctx *ctx, struct pci_devinst *pi)
2802 struct pci_ahci_softc *sc;
2803 struct blockif_ctxt *bctxt;
2808 for (i = 0; i < MAX_PORTS; i++) {
2809 bctxt = sc->port[i].bctx;
2813 blockif_pause(bctxt);
2820 pci_ahci_resume(struct vmctx *ctx, struct pci_devinst *pi)
2822 struct pci_ahci_softc *sc;
2823 struct blockif_ctxt *bctxt;
2828 for (i = 0; i < MAX_PORTS; i++) {
2829 bctxt = sc->port[i].bctx;
2833 blockif_resume(bctxt);
2841 * Use separate emulation names to distinguish drive and atapi devices
2843 struct pci_devemu pci_de_ahci = {
2845 .pe_init = pci_ahci_init,
2846 .pe_legacy_config = pci_ahci_legacy_config,
2847 .pe_barwrite = pci_ahci_write,
2848 .pe_barread = pci_ahci_read,
2849 #ifdef BHYVE_SNAPSHOT
2850 .pe_snapshot = pci_ahci_snapshot,
2851 .pe_pause = pci_ahci_pause,
2852 .pe_resume = pci_ahci_resume,
2855 PCI_EMUL_SET(pci_de_ahci);
2857 struct pci_devemu pci_de_ahci_hd = {
2858 .pe_emu = "ahci-hd",
2859 .pe_legacy_config = pci_ahci_hd_legacy_config,
2862 PCI_EMUL_SET(pci_de_ahci_hd);
2864 struct pci_devemu pci_de_ahci_cd = {
2865 .pe_emu = "ahci-cd",
2866 .pe_legacy_config = pci_ahci_cd_legacy_config,
2869 PCI_EMUL_SET(pci_de_ahci_cd);