2 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
3 * Copyright (c) 2015-2016 Alexander Motin <mav@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/linker_set.h>
37 #include <sys/ioctl.h>
40 #include <sys/endian.h>
52 #include <pthread_np.h>
61 #define DEF_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
62 #define MAX_PORTS 32 /* AHCI supports 32 ports */
64 #define PxSIG_ATA 0x00000101 /* ATA drive */
65 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
68 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
69 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
70 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
71 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
72 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
73 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
74 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
75 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
81 #define TEST_UNIT_READY 0x00
82 #define REQUEST_SENSE 0x03
84 #define START_STOP_UNIT 0x1B
85 #define PREVENT_ALLOW 0x1E
86 #define READ_CAPACITY 0x25
88 #define POSITION_TO_ELEMENT 0x2B
90 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
91 #define MODE_SENSE_10 0x5A
92 #define REPORT_LUNS 0xA0
97 * SCSI mode page codes
99 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
100 #define MODEPAGE_CD_CAPABILITIES 0x2A
105 #define ATA_SF_ENAB_SATA_SF 0x10
106 #define ATA_SATA_SF_AN 0x05
107 #define ATA_SF_DIS_SATA_SF 0x90
114 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
116 #define DPRINTF(format, arg...)
118 #define WPRINTF(format, arg...) printf(format, ##arg)
121 struct blockif_req io_req;
122 struct ahci_port *io_pr;
123 STAILQ_ENTRY(ahci_ioreq) io_flist;
124 TAILQ_ENTRY(ahci_ioreq) io_blist;
133 struct blockif_ctxt *bctx;
134 struct pci_ahci_softc *pr_sc;
144 uint8_t err_cfis[20];
171 struct ahci_ioreq *ioreq;
173 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
174 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
177 struct ahci_cmd_hdr {
182 uint32_t reserved[4];
185 struct ahci_prdt_entry {
188 #define DBCMASK 0x3fffff
192 struct pci_ahci_softc {
193 struct pci_devinst *asc_pi;
208 struct ahci_port port[MAX_PORTS];
210 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
212 static void ahci_handle_port(struct ahci_port *p);
214 static inline void lba_to_msf(uint8_t *buf, int lba)
217 buf[0] = (lba / 75) / 60;
218 buf[1] = (lba / 75) % 60;
223 * Generate HBA interrupts on global IS register write.
226 ahci_generate_intr(struct pci_ahci_softc *sc, uint32_t mask)
228 struct pci_devinst *pi = sc->asc_pi;
233 /* Update global IS from PxIS/PxIE. */
234 for (i = 0; i < sc->ports; i++) {
239 DPRINTF("%s(%08x) %08x\n", __func__, mask, sc->is);
241 /* If there is nothing enabled -- clear legacy interrupt and exit. */
242 if (sc->is == 0 || (sc->ghc & AHCI_GHC_IE) == 0) {
244 pci_lintr_deassert(pi);
250 /* If there is anything and no MSI -- assert legacy interrupt. */
251 nmsg = pci_msi_maxmsgnum(pi);
255 pci_lintr_assert(pi);
260 /* Assert respective MSIs for ports that were touched. */
261 for (i = 0; i < nmsg; i++) {
262 if (sc->ports <= nmsg || i < nmsg - 1)
265 mmask = 0xffffffff << i;
266 if (sc->is & mask && mmask & mask)
267 pci_generate_msi(pi, i);
272 * Generate HBA interrupt on specific port event.
275 ahci_port_intr(struct ahci_port *p)
277 struct pci_ahci_softc *sc = p->pr_sc;
278 struct pci_devinst *pi = sc->asc_pi;
281 DPRINTF("%s(%d) %08x/%08x %08x\n", __func__,
282 p->port, p->is, p->ie, sc->is);
284 /* If there is nothing enabled -- we are done. */
285 if ((p->is & p->ie) == 0)
288 /* In case of non-shared MSI always generate interrupt. */
289 nmsg = pci_msi_maxmsgnum(pi);
290 if (sc->ports <= nmsg || p->port < nmsg - 1) {
291 sc->is |= (1 << p->port);
292 if ((sc->ghc & AHCI_GHC_IE) == 0)
294 pci_generate_msi(pi, p->port);
298 /* If IS for this port is already set -- do nothing. */
299 if (sc->is & (1 << p->port))
302 sc->is |= (1 << p->port);
304 /* If interrupts are enabled -- generate one. */
305 if ((sc->ghc & AHCI_GHC_IE) == 0)
308 pci_generate_msi(pi, nmsg - 1);
309 } else if (!sc->lintr) {
311 pci_lintr_assert(pi);
316 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
318 int offset, len, irq;
320 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
324 case FIS_TYPE_REGD2H:
327 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_DHR : 0;
329 case FIS_TYPE_SETDEVBITS:
332 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_SDB : 0;
334 case FIS_TYPE_PIOSETUP:
337 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_PS : 0;
340 WPRINTF("unsupported fis type %d\n", ft);
343 if (fis[2] & ATA_S_ERROR) {
345 irq |= AHCI_P_IX_TFE;
347 memcpy(p->rfis + offset, fis, len);
357 ahci_write_fis_piosetup(struct ahci_port *p)
361 memset(fis, 0, sizeof(fis));
362 fis[0] = FIS_TYPE_PIOSETUP;
363 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
367 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
372 error = (tfd >> 8) & 0xff;
374 memset(fis, 0, sizeof(fis));
375 fis[0] = FIS_TYPE_SETDEVBITS;
379 if (fis[2] & ATA_S_ERROR) {
380 p->err_cfis[0] = slot;
381 p->err_cfis[2] = tfd;
382 p->err_cfis[3] = error;
383 memcpy(&p->err_cfis[4], cfis + 4, 16);
385 *(uint32_t *)(fis + 4) = (1 << slot);
386 p->sact &= ~(1 << slot);
390 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
394 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
399 error = (tfd >> 8) & 0xff;
400 memset(fis, 0, sizeof(fis));
401 fis[0] = FIS_TYPE_REGD2H;
415 if (fis[2] & ATA_S_ERROR) {
416 p->err_cfis[0] = 0x80;
417 p->err_cfis[2] = tfd & 0xff;
418 p->err_cfis[3] = error;
419 memcpy(&p->err_cfis[4], cfis + 4, 16);
421 p->ci &= ~(1 << slot);
423 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
427 ahci_write_fis_d2h_ncq(struct ahci_port *p, int slot)
431 p->tfd = ATA_S_READY | ATA_S_DSC;
432 memset(fis, 0, sizeof(fis));
433 fis[0] = FIS_TYPE_REGD2H;
434 fis[1] = 0; /* No interrupt */
435 fis[2] = p->tfd; /* Status */
436 fis[3] = 0; /* No error */
437 p->ci &= ~(1 << slot);
438 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
442 ahci_write_reset_fis_d2h(struct ahci_port *p)
446 memset(fis, 0, sizeof(fis));
447 fis[0] = FIS_TYPE_REGD2H;
455 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
459 ahci_check_stopped(struct ahci_port *p)
462 * If we are no longer processing the command list and nothing
463 * is in-flight, clear the running bit, the current command
464 * slot, the command issue and active bits.
466 if (!(p->cmd & AHCI_P_CMD_ST)) {
467 if (p->pending == 0) {
469 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
478 ahci_port_stop(struct ahci_port *p)
480 struct ahci_ioreq *aior;
486 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
488 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
490 * Try to cancel the outstanding blockif request.
492 error = blockif_cancel(p->bctx, &aior->io_req);
498 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
499 cfis[2] == ATA_READ_FPDMA_QUEUED ||
500 cfis[2] == ATA_SEND_FPDMA_QUEUED)
504 p->sact &= ~(1 << slot);
506 p->ci &= ~(1 << slot);
509 * This command is now done.
511 p->pending &= ~(1 << slot);
514 * Delete the blockif request from the busy list
516 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
519 * Move the blockif request back to the free list
521 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
524 ahci_check_stopped(p);
528 ahci_port_reset(struct ahci_port *pr)
532 pr->xfermode = ATA_UDMA6;
533 pr->mult_sectors = 128;
536 pr->ssts = ATA_SS_DET_NO_DEVICE;
537 pr->sig = 0xFFFFFFFF;
541 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
542 if (pr->sctl & ATA_SC_SPD_MASK)
543 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
545 pr->ssts |= ATA_SS_SPD_GEN3;
546 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
549 pr->tfd |= ATA_S_READY;
551 pr->sig = PxSIG_ATAPI;
552 ahci_write_reset_fis_d2h(pr);
556 ahci_reset(struct pci_ahci_softc *sc)
560 sc->ghc = AHCI_GHC_AE;
564 pci_lintr_deassert(sc->asc_pi);
568 for (i = 0; i < sc->ports; i++) {
571 sc->port[i].cmd = (AHCI_P_CMD_SUD | AHCI_P_CMD_POD);
572 if (sc->port[i].bctx)
573 sc->port[i].cmd |= AHCI_P_CMD_CPS;
574 sc->port[i].sctl = 0;
575 ahci_port_reset(&sc->port[i]);
580 ata_string(uint8_t *dest, const char *src, int len)
584 for (i = 0; i < len; i++) {
586 dest[i ^ 1] = *src++;
593 atapi_string(uint8_t *dest, const char *src, int len)
597 for (i = 0; i < len; i++) {
606 * Build up the iovec based on the PRDT, 'done' and 'len'.
609 ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
610 struct ahci_prdt_entry *prdt, uint16_t prdtl)
612 struct blockif_req *breq = &aior->io_req;
613 int i, j, skip, todo, left, extra;
616 /* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
618 left = aior->len - aior->done;
620 for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
622 dbcsz = (prdt->dbc & DBCMASK) + 1;
623 /* Skip already done part of the PRDT */
631 breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
632 prdt->dba + skip, dbcsz);
633 breq->br_iov[j].iov_len = dbcsz;
640 /* If we got limited by IOV length, round I/O down to sector size. */
641 if (j == BLOCKIF_IOV_MAX) {
642 extra = todo % blockif_sectsz(p->bctx);
646 if (breq->br_iov[j - 1].iov_len > extra) {
647 breq->br_iov[j - 1].iov_len -= extra;
650 extra -= breq->br_iov[j - 1].iov_len;
656 breq->br_resid = todo;
658 aior->more = (aior->done < aior->len && i < prdtl);
662 ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
664 struct ahci_ioreq *aior;
665 struct blockif_req *breq;
666 struct ahci_prdt_entry *prdt;
667 struct ahci_cmd_hdr *hdr;
670 int err, first, ncq, readop;
672 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
673 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
678 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
679 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
680 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
681 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
684 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
685 cfis[2] == ATA_READ_FPDMA_QUEUED) {
686 lba = ((uint64_t)cfis[10] << 40) |
687 ((uint64_t)cfis[9] << 32) |
688 ((uint64_t)cfis[8] << 24) |
689 ((uint64_t)cfis[6] << 16) |
690 ((uint64_t)cfis[5] << 8) |
692 len = cfis[11] << 8 | cfis[3];
696 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
697 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
698 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
699 lba = ((uint64_t)cfis[10] << 40) |
700 ((uint64_t)cfis[9] << 32) |
701 ((uint64_t)cfis[8] << 24) |
702 ((uint64_t)cfis[6] << 16) |
703 ((uint64_t)cfis[5] << 8) |
705 len = cfis[13] << 8 | cfis[12];
709 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
710 (cfis[5] << 8) | cfis[4];
715 lba *= blockif_sectsz(p->bctx);
716 len *= blockif_sectsz(p->bctx);
718 /* Pull request off free list */
719 aior = STAILQ_FIRST(&p->iofhd);
720 assert(aior != NULL);
721 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
727 breq = &aior->io_req;
728 breq->br_offset = lba + done;
729 ahci_build_iov(p, aior, prdt, hdr->prdtl);
731 /* Mark this command in-flight. */
732 p->pending |= 1 << slot;
734 /* Stuff request onto busy list. */
735 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
738 ahci_write_fis_d2h_ncq(p, slot);
741 err = blockif_read(p->bctx, breq);
743 err = blockif_write(p->bctx, breq);
748 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
750 struct ahci_ioreq *aior;
751 struct blockif_req *breq;
755 * Pull request off free list
757 aior = STAILQ_FIRST(&p->iofhd);
758 assert(aior != NULL);
759 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
765 breq = &aior->io_req;
768 * Mark this command in-flight.
770 p->pending |= 1 << slot;
773 * Stuff request onto busy list
775 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
777 err = blockif_flush(p->bctx, breq);
782 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
785 struct ahci_cmd_hdr *hdr;
786 struct ahci_prdt_entry *prdt;
790 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
793 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
794 for (i = 0; i < hdr->prdtl && len; i++) {
799 dbcsz = (prdt->dbc & DBCMASK) + 1;
800 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
801 sublen = len < dbcsz ? len : dbcsz;
802 memcpy(to, ptr, sublen);
810 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
812 struct ahci_ioreq *aior;
813 struct blockif_req *breq;
821 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
822 len = (uint16_t)cfis[13] << 8 | cfis[12];
825 } else { /* ATA_SEND_FPDMA_QUEUED */
826 len = (uint16_t)cfis[11] << 8 | cfis[3];
830 read_prdt(p, slot, cfis, buf, sizeof(buf));
834 elba = ((uint64_t)entry[5] << 40) |
835 ((uint64_t)entry[4] << 32) |
836 ((uint64_t)entry[3] << 24) |
837 ((uint64_t)entry[2] << 16) |
838 ((uint64_t)entry[1] << 8) |
840 elen = (uint16_t)entry[7] << 8 | entry[6];
846 ahci_write_fis_d2h_ncq(p, slot);
847 ahci_write_fis_sdb(p, slot, cfis,
848 ATA_S_READY | ATA_S_DSC);
850 ahci_write_fis_d2h(p, slot, cfis,
851 ATA_S_READY | ATA_S_DSC);
853 p->pending &= ~(1 << slot);
854 ahci_check_stopped(p);
863 * Pull request off free list
865 aior = STAILQ_FIRST(&p->iofhd);
866 assert(aior != NULL);
867 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
872 aior->more = (len != done);
874 breq = &aior->io_req;
875 breq->br_offset = elba * blockif_sectsz(p->bctx);
876 breq->br_resid = elen * blockif_sectsz(p->bctx);
879 * Mark this command in-flight.
881 p->pending |= 1 << slot;
884 * Stuff request onto busy list
886 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
889 ahci_write_fis_d2h_ncq(p, slot);
891 err = blockif_delete(p->bctx, breq);
896 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
899 struct ahci_cmd_hdr *hdr;
900 struct ahci_prdt_entry *prdt;
904 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
907 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
908 for (i = 0; i < hdr->prdtl && len; i++) {
913 dbcsz = (prdt->dbc & DBCMASK) + 1;
914 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
915 sublen = len < dbcsz ? len : dbcsz;
916 memcpy(ptr, from, sublen);
921 hdr->prdbc = size - len;
925 ahci_checksum(uint8_t *buf, int size)
930 for (i = 0; i < size - 1; i++)
932 buf[size - 1] = 0x100 - sum;
936 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
938 struct ahci_cmd_hdr *hdr;
940 uint8_t *buf8 = (uint8_t *)buf;
941 uint16_t *buf16 = (uint16_t *)buf;
943 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
944 if (p->atapi || hdr->prdtl == 0 || cfis[5] != 0 ||
945 cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
946 ahci_write_fis_d2h(p, slot, cfis,
947 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
951 memset(buf, 0, sizeof(buf));
952 if (cfis[4] == 0x00) { /* Log directory */
953 buf16[0x00] = 1; /* Version -- 1 */
954 buf16[0x10] = 1; /* NCQ Command Error Log -- 1 page */
955 buf16[0x13] = 1; /* SATA NCQ Send and Receive Log -- 1 page */
956 } else if (cfis[4] == 0x10) { /* NCQ Command Error Log */
957 memcpy(buf8, p->err_cfis, sizeof(p->err_cfis));
958 ahci_checksum(buf8, sizeof(buf));
959 } else if (cfis[4] == 0x13) { /* SATA NCQ Send and Receive Log */
960 if (blockif_candelete(p->bctx) && !blockif_is_ro(p->bctx)) {
961 buf[0x00] = 1; /* SFQ DSM supported */
962 buf[0x01] = 1; /* SFQ DSM TRIM supported */
965 ahci_write_fis_d2h(p, slot, cfis,
966 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
970 if (cfis[2] == ATA_READ_LOG_EXT)
971 ahci_write_fis_piosetup(p);
972 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
973 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
977 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
979 struct ahci_cmd_hdr *hdr;
981 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
982 if (p->atapi || hdr->prdtl == 0) {
983 ahci_write_fis_d2h(p, slot, cfis,
984 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
988 int sectsz, psectsz, psectoff, candelete, ro;
992 ro = blockif_is_ro(p->bctx);
993 candelete = blockif_candelete(p->bctx);
994 sectsz = blockif_sectsz(p->bctx);
995 sectors = blockif_size(p->bctx) / sectsz;
996 blockif_chs(p->bctx, &cyl, &heads, &sech);
997 blockif_psectsz(p->bctx, &psectsz, &psectoff);
998 memset(buf, 0, sizeof(buf));
1003 ata_string((uint8_t *)(buf+10), p->ident, 20);
1004 ata_string((uint8_t *)(buf+23), "001", 8);
1005 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
1006 buf[47] = (0x8000 | 128);
1008 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
1009 buf[50] = (1 << 14);
1010 buf[53] = (1 << 1 | 1 << 2);
1011 if (p->mult_sectors)
1012 buf[59] = (0x100 | p->mult_sectors);
1013 if (sectors <= 0x0fffffff) {
1015 buf[61] = (sectors >> 16);
1021 if (p->xfermode & ATA_WDMA0)
1022 buf[63] |= (1 << ((p->xfermode & 7) + 8));
1030 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
1032 buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
1033 (p->ssts & ATA_SS_SPD_MASK) >> 3);
1036 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
1037 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1038 buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1039 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
1040 buf[84] = (1 << 14);
1041 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
1042 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1043 buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1044 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
1045 buf[87] = (1 << 14);
1047 if (p->xfermode & ATA_UDMA0)
1048 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1050 buf[101] = (sectors >> 16);
1051 buf[102] = (sectors >> 32);
1052 buf[103] = (sectors >> 48);
1053 if (candelete && !ro) {
1054 buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
1056 buf[169] = ATA_SUPPORT_DSM_TRIM;
1060 if (psectsz > sectsz) {
1062 buf[106] |= ffsl(psectsz / sectsz) - 1;
1063 buf[209] |= (psectoff / sectsz);
1067 buf[117] = sectsz / 2;
1068 buf[118] = ((sectsz / 2) >> 16);
1070 buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1071 buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1074 ahci_checksum((uint8_t *)buf, sizeof(buf));
1075 ahci_write_fis_piosetup(p);
1076 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1077 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1082 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
1085 ahci_write_fis_d2h(p, slot, cfis,
1086 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1090 memset(buf, 0, sizeof(buf));
1091 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
1092 ata_string((uint8_t *)(buf+10), p->ident, 20);
1093 ata_string((uint8_t *)(buf+23), "001", 8);
1094 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
1095 buf[49] = (1 << 9 | 1 << 8);
1096 buf[50] = (1 << 14 | 1);
1097 buf[53] = (1 << 2 | 1 << 1);
1100 if (p->xfermode & ATA_WDMA0)
1101 buf[63] |= (1 << ((p->xfermode & 7) + 8));
1107 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
1108 buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
1111 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1112 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1113 buf[83] = (1 << 14);
1114 buf[84] = (1 << 14);
1115 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1116 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1117 buf[87] = (1 << 14);
1119 if (p->xfermode & ATA_UDMA0)
1120 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1123 ahci_checksum((uint8_t *)buf, sizeof(buf));
1124 ahci_write_fis_piosetup(p);
1125 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1126 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1131 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
1140 if (acmd[1] & 1) { /* VPD */
1141 if (acmd[2] == 0) { /* Supported VPD pages */
1149 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1151 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1152 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1153 ahci_write_fis_d2h(p, slot, cfis, tfd);
1165 atapi_string(buf + 8, "BHYVE", 8);
1166 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1167 atapi_string(buf + 32, "001", 4);
1173 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1174 write_prdt(p, slot, cfis, buf, len);
1175 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1179 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1184 sectors = blockif_size(p->bctx) / 2048;
1185 be32enc(buf, sectors - 1);
1186 be32enc(buf + 4, 2048);
1187 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1188 write_prdt(p, slot, cfis, buf, sizeof(buf));
1189 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1193 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1201 len = be16dec(acmd + 7);
1202 format = acmd[9] >> 6;
1208 uint8_t start_track, buf[20], *bp;
1210 msf = (acmd[1] >> 1) & 1;
1211 start_track = acmd[6];
1212 if (start_track > 1 && start_track != 0xaa) {
1214 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1216 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1217 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1218 ahci_write_fis_d2h(p, slot, cfis, tfd);
1224 if (start_track <= 1) {
1244 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1248 lba_to_msf(bp, sectors);
1251 be32enc(bp, sectors);
1255 be16enc(buf, size - 2);
1258 write_prdt(p, slot, cfis, buf, len);
1259 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1260 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1267 memset(buf, 0, sizeof(buf));
1271 if (len > sizeof(buf))
1273 write_prdt(p, slot, cfis, buf, len);
1274 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1275 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1282 uint8_t start_track, *bp, buf[50];
1284 msf = (acmd[1] >> 1) & 1;
1285 start_track = acmd[6];
1321 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1325 lba_to_msf(bp, sectors);
1328 be32enc(bp, sectors);
1351 be16enc(buf, size - 2);
1354 write_prdt(p, slot, cfis, buf, len);
1355 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1356 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1363 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1365 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1366 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1367 ahci_write_fis_d2h(p, slot, cfis, tfd);
1374 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1378 memset(buf, 0, sizeof(buf));
1381 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1382 write_prdt(p, slot, cfis, buf, sizeof(buf));
1383 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1387 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
1389 struct ahci_ioreq *aior;
1390 struct ahci_cmd_hdr *hdr;
1391 struct ahci_prdt_entry *prdt;
1392 struct blockif_req *breq;
1393 struct pci_ahci_softc *sc;
1401 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1402 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1404 lba = be32dec(acmd + 2);
1405 if (acmd[0] == READ_10)
1406 len = be16dec(acmd + 7);
1408 len = be32dec(acmd + 6);
1410 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1411 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1417 * Pull request off free list
1419 aior = STAILQ_FIRST(&p->iofhd);
1420 assert(aior != NULL);
1421 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1426 breq = &aior->io_req;
1427 breq->br_offset = lba + done;
1428 ahci_build_iov(p, aior, prdt, hdr->prdtl);
1430 /* Mark this command in-flight. */
1431 p->pending |= 1 << slot;
1433 /* Stuff request onto busy list. */
1434 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1436 err = blockif_read(p->bctx, breq);
1441 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1449 if (len > sizeof(buf))
1451 memset(buf, 0, len);
1452 buf[0] = 0x70 | (1 << 7);
1453 buf[2] = p->sense_key;
1456 write_prdt(p, slot, cfis, buf, len);
1457 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1458 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1462 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1464 uint8_t *acmd = cfis + 0x40;
1467 switch (acmd[4] & 3) {
1471 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1472 tfd = ATA_S_READY | ATA_S_DSC;
1475 /* TODO eject media */
1476 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1477 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1479 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1482 ahci_write_fis_d2h(p, slot, cfis, tfd);
1486 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1494 len = be16dec(acmd + 7);
1496 code = acmd[2] & 0x3f;
1501 case MODEPAGE_RW_ERROR_RECOVERY:
1505 if (len > sizeof(buf))
1508 memset(buf, 0, sizeof(buf));
1509 be16enc(buf, 16 - 2);
1514 write_prdt(p, slot, cfis, buf, len);
1515 tfd = ATA_S_READY | ATA_S_DSC;
1518 case MODEPAGE_CD_CAPABILITIES:
1522 if (len > sizeof(buf))
1525 memset(buf, 0, sizeof(buf));
1526 be16enc(buf, 30 - 2);
1532 be16enc(&buf[18], 2);
1533 be16enc(&buf[20], 512);
1534 write_prdt(p, slot, cfis, buf, len);
1535 tfd = ATA_S_READY | ATA_S_DSC;
1544 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1546 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1551 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1553 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1556 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1557 ahci_write_fis_d2h(p, slot, cfis, tfd);
1561 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1569 /* we don't support asynchronous operation */
1570 if (!(acmd[1] & 1)) {
1571 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1573 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1578 len = be16dec(acmd + 7);
1579 if (len > sizeof(buf))
1582 memset(buf, 0, sizeof(buf));
1583 be16enc(buf, 8 - 2);
1587 write_prdt(p, slot, cfis, buf, len);
1588 tfd = ATA_S_READY | ATA_S_DSC;
1590 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1591 ahci_write_fis_d2h(p, slot, cfis, tfd);
1595 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1605 for (i = 0; i < 16; i++)
1606 DPRINTF("%02x ", acmd[i]);
1612 case TEST_UNIT_READY:
1613 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1614 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1617 atapi_inquiry(p, slot, cfis);
1620 atapi_read_capacity(p, slot, cfis);
1624 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1625 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1628 atapi_read_toc(p, slot, cfis);
1631 atapi_report_luns(p, slot, cfis);
1635 atapi_read(p, slot, cfis, 0);
1638 atapi_request_sense(p, slot, cfis);
1640 case START_STOP_UNIT:
1641 atapi_start_stop_unit(p, slot, cfis);
1644 atapi_mode_sense(p, slot, cfis);
1646 case GET_EVENT_STATUS_NOTIFICATION:
1647 atapi_get_event_status_notification(p, slot, cfis);
1650 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1651 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1653 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1654 ATA_S_READY | ATA_S_ERROR);
1660 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1663 p->tfd |= ATA_S_BUSY;
1665 case ATA_ATA_IDENTIFY:
1666 handle_identify(p, slot, cfis);
1668 case ATA_SETFEATURES:
1671 case ATA_SF_ENAB_SATA_SF:
1673 case ATA_SATA_SF_AN:
1674 p->tfd = ATA_S_DSC | ATA_S_READY;
1677 p->tfd = ATA_S_ERROR | ATA_S_READY;
1678 p->tfd |= (ATA_ERROR_ABORT << 8);
1682 case ATA_SF_ENAB_WCACHE:
1683 case ATA_SF_DIS_WCACHE:
1684 case ATA_SF_ENAB_RCACHE:
1685 case ATA_SF_DIS_RCACHE:
1686 p->tfd = ATA_S_DSC | ATA_S_READY;
1688 case ATA_SF_SETXFER:
1690 switch (cfis[12] & 0xf8) {
1696 p->xfermode = (cfis[12] & 0x7);
1699 p->tfd = ATA_S_DSC | ATA_S_READY;
1703 p->tfd = ATA_S_ERROR | ATA_S_READY;
1704 p->tfd |= (ATA_ERROR_ABORT << 8);
1707 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1711 if (cfis[12] != 0 &&
1712 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1713 p->tfd = ATA_S_ERROR | ATA_S_READY;
1714 p->tfd |= (ATA_ERROR_ABORT << 8);
1716 p->mult_sectors = cfis[12];
1717 p->tfd = ATA_S_DSC | ATA_S_READY;
1719 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1727 case ATA_READ_MUL48:
1728 case ATA_WRITE_MUL48:
1731 case ATA_READ_DMA48:
1732 case ATA_WRITE_DMA48:
1733 case ATA_READ_FPDMA_QUEUED:
1734 case ATA_WRITE_FPDMA_QUEUED:
1735 ahci_handle_rw(p, slot, cfis, 0);
1737 case ATA_FLUSHCACHE:
1738 case ATA_FLUSHCACHE48:
1739 ahci_handle_flush(p, slot, cfis);
1741 case ATA_DATA_SET_MANAGEMENT:
1742 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1743 cfis[13] == 0 && cfis[12] == 1) {
1744 ahci_handle_dsm_trim(p, slot, cfis, 0);
1747 ahci_write_fis_d2h(p, slot, cfis,
1748 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1750 case ATA_SEND_FPDMA_QUEUED:
1751 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1752 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1753 cfis[11] == 0 && cfis[3] == 1) {
1754 ahci_handle_dsm_trim(p, slot, cfis, 0);
1757 ahci_write_fis_d2h(p, slot, cfis,
1758 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1760 case ATA_READ_LOG_EXT:
1761 case ATA_READ_LOG_DMA_EXT:
1762 ahci_handle_read_log(p, slot, cfis);
1764 case ATA_SECURITY_FREEZE_LOCK:
1767 ahci_write_fis_d2h(p, slot, cfis,
1768 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1770 case ATA_CHECK_POWER_MODE:
1771 cfis[12] = 0xff; /* always on */
1772 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1774 case ATA_STANDBY_CMD:
1775 case ATA_STANDBY_IMMEDIATE:
1777 case ATA_IDLE_IMMEDIATE:
1779 case ATA_READ_VERIFY:
1780 case ATA_READ_VERIFY48:
1781 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1783 case ATA_ATAPI_IDENTIFY:
1784 handle_atapi_identify(p, slot, cfis);
1786 case ATA_PACKET_CMD:
1788 ahci_write_fis_d2h(p, slot, cfis,
1789 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1791 handle_packet_cmd(p, slot, cfis);
1794 WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
1795 ahci_write_fis_d2h(p, slot, cfis,
1796 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1802 ahci_handle_slot(struct ahci_port *p, int slot)
1804 struct ahci_cmd_hdr *hdr;
1806 struct ahci_prdt_entry *prdt;
1808 struct pci_ahci_softc *sc;
1815 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1817 cfl = (hdr->flags & 0x1f) * 4;
1819 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1820 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1822 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1825 for (i = 0; i < cfl; i++) {
1828 DPRINTF("%02x ", cfis[i]);
1832 for (i = 0; i < hdr->prdtl; i++) {
1833 DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
1838 if (cfis[0] != FIS_TYPE_REGH2D) {
1839 WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
1843 if (cfis[1] & 0x80) {
1844 ahci_handle_cmd(p, slot, cfis);
1846 if (cfis[15] & (1 << 2))
1848 else if (p->reset) {
1852 p->ci &= ~(1 << slot);
1857 ahci_handle_port(struct ahci_port *p)
1860 if (!(p->cmd & AHCI_P_CMD_ST))
1864 * Search for any new commands to issue ignoring those that
1865 * are already in-flight. Stop if device is busy or in error.
1867 for (; (p->ci & ~p->pending) != 0; p->ccs = ((p->ccs + 1) & 31)) {
1868 if ((p->tfd & (ATA_S_BUSY | ATA_S_DRQ)) != 0)
1870 if (p->waitforclear)
1872 if ((p->ci & ~p->pending & (1 << p->ccs)) != 0) {
1873 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1874 p->cmd |= p->ccs << AHCI_P_CMD_CCS_SHIFT;
1875 ahci_handle_slot(p, p->ccs);
1881 * blockif callback routine - this runs in the context of the blockif
1882 * i/o thread, so the mutex needs to be acquired.
1885 ata_ioreq_cb(struct blockif_req *br, int err)
1887 struct ahci_cmd_hdr *hdr;
1888 struct ahci_ioreq *aior;
1889 struct ahci_port *p;
1890 struct pci_ahci_softc *sc;
1895 DPRINTF("%s %d\n", __func__, err);
1898 aior = br->br_param;
1903 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1905 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1906 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1907 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1909 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1910 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1911 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1914 pthread_mutex_lock(&sc->mtx);
1917 * Delete the blockif request from the busy list
1919 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1922 * Move the blockif request back to the free list
1924 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1927 hdr->prdbc = aior->done;
1929 if (!err && aior->more) {
1931 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1933 ahci_handle_rw(p, slot, cfis, aior->done);
1938 tfd = ATA_S_READY | ATA_S_DSC;
1940 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1942 ahci_write_fis_sdb(p, slot, cfis, tfd);
1944 ahci_write_fis_d2h(p, slot, cfis, tfd);
1947 * This command is now complete.
1949 p->pending &= ~(1 << slot);
1951 ahci_check_stopped(p);
1952 ahci_handle_port(p);
1954 pthread_mutex_unlock(&sc->mtx);
1955 DPRINTF("%s exit\n", __func__);
1959 atapi_ioreq_cb(struct blockif_req *br, int err)
1961 struct ahci_cmd_hdr *hdr;
1962 struct ahci_ioreq *aior;
1963 struct ahci_port *p;
1964 struct pci_ahci_softc *sc;
1969 DPRINTF("%s %d\n", __func__, err);
1971 aior = br->br_param;
1976 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1978 pthread_mutex_lock(&sc->mtx);
1981 * Delete the blockif request from the busy list
1983 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1986 * Move the blockif request back to the free list
1988 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1991 hdr->prdbc = aior->done;
1993 if (!err && aior->more) {
1994 atapi_read(p, slot, cfis, aior->done);
1999 tfd = ATA_S_READY | ATA_S_DSC;
2001 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
2003 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
2005 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
2006 ahci_write_fis_d2h(p, slot, cfis, tfd);
2009 * This command is now complete.
2011 p->pending &= ~(1 << slot);
2013 ahci_check_stopped(p);
2014 ahci_handle_port(p);
2016 pthread_mutex_unlock(&sc->mtx);
2017 DPRINTF("%s exit\n", __func__);
2021 pci_ahci_ioreq_init(struct ahci_port *pr)
2023 struct ahci_ioreq *vr;
2026 pr->ioqsz = blockif_queuesz(pr->bctx);
2027 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
2028 STAILQ_INIT(&pr->iofhd);
2031 * Add all i/o request entries to the free queue
2033 for (i = 0; i < pr->ioqsz; i++) {
2037 vr->io_req.br_callback = ata_ioreq_cb;
2039 vr->io_req.br_callback = atapi_ioreq_cb;
2040 vr->io_req.br_param = vr;
2041 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
2044 TAILQ_INIT(&pr->iobhd);
2048 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2050 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2051 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2052 struct ahci_port *p = &sc->port[port];
2054 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2055 port, offset, value);
2075 p->ie = value & 0xFDC000FF;
2080 p->cmd &= ~(AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2081 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2082 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2083 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK);
2084 p->cmd |= (AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2085 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2086 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2087 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK) & value;
2089 if (!(value & AHCI_P_CMD_ST)) {
2094 p->cmd |= AHCI_P_CMD_CR;
2095 clb = (uint64_t)p->clbu << 32 | p->clb;
2096 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
2097 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
2100 if (value & AHCI_P_CMD_FRE) {
2103 p->cmd |= AHCI_P_CMD_FR;
2104 fb = (uint64_t)p->fbu << 32 | p->fb;
2105 /* we don't support FBSCP, so rfis size is 256Bytes */
2106 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
2108 p->cmd &= ~AHCI_P_CMD_FR;
2111 if (value & AHCI_P_CMD_CLO) {
2112 p->tfd &= ~(ATA_S_BUSY | ATA_S_DRQ);
2113 p->cmd &= ~AHCI_P_CMD_CLO;
2116 if (value & AHCI_P_CMD_ICC_MASK) {
2117 p->cmd &= ~AHCI_P_CMD_ICC_MASK;
2120 ahci_handle_port(p);
2126 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
2130 if (!(p->cmd & AHCI_P_CMD_ST)) {
2131 if (value & ATA_SC_DET_RESET)
2143 ahci_handle_port(p);
2153 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2155 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2163 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
2166 if (value & AHCI_GHC_HR) {
2170 if (value & AHCI_GHC_IE)
2171 sc->ghc |= AHCI_GHC_IE;
2173 sc->ghc &= ~AHCI_GHC_IE;
2174 ahci_generate_intr(sc, 0xffffffff);
2178 ahci_generate_intr(sc, value);
2186 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2187 int baridx, uint64_t offset, int size, uint64_t value)
2189 struct pci_ahci_softc *sc = pi->pi_arg;
2191 assert(baridx == 5);
2192 assert((offset % 4) == 0 && size == 4);
2194 pthread_mutex_lock(&sc->mtx);
2196 if (offset < AHCI_OFFSET)
2197 pci_ahci_host_write(sc, offset, value);
2198 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2199 pci_ahci_port_write(sc, offset, value);
2201 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
2203 pthread_mutex_unlock(&sc->mtx);
2207 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2223 uint32_t *p = &sc->cap;
2224 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2232 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
2239 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2242 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2243 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2263 uint32_t *p= &sc->port[port].clb;
2264 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2273 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
2274 port, offset, value);
2280 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2281 uint64_t regoff, int size)
2283 struct pci_ahci_softc *sc = pi->pi_arg;
2287 assert(baridx == 5);
2288 assert(size == 1 || size == 2 || size == 4);
2289 assert((regoff & (size - 1)) == 0);
2291 pthread_mutex_lock(&sc->mtx);
2293 offset = regoff & ~0x3; /* round down to a multiple of 4 bytes */
2294 if (offset < AHCI_OFFSET)
2295 value = pci_ahci_host_read(sc, offset);
2296 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2297 value = pci_ahci_port_read(sc, offset);
2300 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n",
2303 value >>= 8 * (regoff & 0x3);
2305 pthread_mutex_unlock(&sc->mtx);
2311 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
2313 char bident[sizeof("XX:XX:XX")];
2314 struct blockif_ctxt *bctxt;
2315 struct pci_ahci_softc *sc;
2324 dbg = fopen("/tmp/log", "w+");
2327 sc = calloc(1, sizeof(struct pci_ahci_softc));
2330 pthread_mutex_init(&sc->mtx, NULL);
2335 for (p = 0; p < MAX_PORTS && opts != NULL; p++, opts = next) {
2336 /* Identify and cut off type of present port. */
2337 if (strncmp(opts, "hd:", 3) == 0) {
2340 } else if (strncmp(opts, "cd:", 3) == 0) {
2345 /* Find and cut off the next port options. */
2346 next = strstr(opts, ",hd:");
2347 next2 = strstr(opts, ",cd:");
2348 if (next == NULL || (next2 != NULL && next2 < next))
2359 * Attempt to open the backing image. Use the PCI slot/func
2360 * and the port number for the identifier string.
2362 snprintf(bident, sizeof(bident), "%d:%d:%d", pi->pi_slot,
2364 bctxt = blockif_open(opts, bident);
2365 if (bctxt == NULL) {
2370 sc->port[p].bctx = bctxt;
2371 sc->port[p].pr_sc = sc;
2372 sc->port[p].port = p;
2373 sc->port[p].atapi = atapi;
2376 * Create an identifier for the backing file.
2377 * Use parts of the md5 sum of the filename
2380 MD5Update(&mdctx, opts, strlen(opts));
2381 MD5Final(digest, &mdctx);
2382 sprintf(sc->port[p].ident, "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2383 digest[0], digest[1], digest[2], digest[3], digest[4],
2387 * Allocate blockif request structures and add them
2390 pci_ahci_ioreq_init(&sc->port[p]);
2393 if (sc->port[p].ioqsz < slots)
2394 slots = sc->port[p].ioqsz;
2398 /* Intel ICH8 AHCI */
2400 if (sc->ports < DEF_PORTS)
2401 sc->ports = DEF_PORTS;
2402 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2403 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2404 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2405 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2406 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2409 sc->cap2 = AHCI_CAP2_APST;
2412 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2413 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2414 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2415 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2416 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2417 p = MIN(sc->ports, 16);
2418 p = flsl(p) - ((p & (p - 1)) ? 0 : 1);
2419 pci_emul_add_msicap(pi, 1 << p);
2420 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2421 AHCI_OFFSET + sc->ports * AHCI_STEP);
2423 pci_lintr_request(pi);
2427 for (p = 0; p < sc->ports; p++) {
2428 if (sc->port[p].bctx != NULL)
2429 blockif_close(sc->port[p].bctx);
2438 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2441 return (pci_ahci_init(ctx, pi, opts, 0));
2445 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2448 return (pci_ahci_init(ctx, pi, opts, 1));
2452 * Use separate emulation names to distinguish drive and atapi devices
2454 struct pci_devemu pci_de_ahci = {
2456 .pe_init = pci_ahci_hd_init,
2457 .pe_barwrite = pci_ahci_write,
2458 .pe_barread = pci_ahci_read
2460 PCI_EMUL_SET(pci_de_ahci);
2462 struct pci_devemu pci_de_ahci_hd = {
2463 .pe_emu = "ahci-hd",
2464 .pe_init = pci_ahci_hd_init,
2465 .pe_barwrite = pci_ahci_write,
2466 .pe_barread = pci_ahci_read
2468 PCI_EMUL_SET(pci_de_ahci_hd);
2470 struct pci_devemu pci_de_ahci_cd = {
2471 .pe_emu = "ahci-cd",
2472 .pe_init = pci_ahci_atapi_init,
2473 .pe_barwrite = pci_ahci_write,
2474 .pe_barread = pci_ahci_read
2476 PCI_EMUL_SET(pci_de_ahci_cd);