2 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
3 * Copyright (c) 2015-2016 Alexander Motin <mav@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/linker_set.h>
37 #include <sys/ioctl.h>
40 #include <sys/endian.h>
52 #include <pthread_np.h>
61 #define DEF_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
62 #define MAX_PORTS 32 /* AHCI supports 32 ports */
64 #define PxSIG_ATA 0x00000101 /* ATA drive */
65 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
68 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
69 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
70 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
71 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
72 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
73 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
74 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
75 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
81 #define TEST_UNIT_READY 0x00
82 #define REQUEST_SENSE 0x03
84 #define START_STOP_UNIT 0x1B
85 #define PREVENT_ALLOW 0x1E
86 #define READ_CAPACITY 0x25
88 #define POSITION_TO_ELEMENT 0x2B
90 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
91 #define MODE_SENSE_10 0x5A
92 #define REPORT_LUNS 0xA0
97 * SCSI mode page codes
99 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
100 #define MODEPAGE_CD_CAPABILITIES 0x2A
105 #define ATA_SF_ENAB_SATA_SF 0x10
106 #define ATA_SATA_SF_AN 0x05
107 #define ATA_SF_DIS_SATA_SF 0x90
114 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
116 #define DPRINTF(format, arg...)
118 #define WPRINTF(format, arg...) printf(format, ##arg)
121 struct blockif_req io_req;
122 struct ahci_port *io_pr;
123 STAILQ_ENTRY(ahci_ioreq) io_flist;
124 TAILQ_ENTRY(ahci_ioreq) io_blist;
133 struct blockif_ctxt *bctx;
134 struct pci_ahci_softc *pr_sc;
144 uint8_t err_cfis[20];
171 struct ahci_ioreq *ioreq;
173 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
174 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
177 struct ahci_cmd_hdr {
182 uint32_t reserved[4];
185 struct ahci_prdt_entry {
188 #define DBCMASK 0x3fffff
192 struct pci_ahci_softc {
193 struct pci_devinst *asc_pi;
208 struct ahci_port port[MAX_PORTS];
210 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
212 static void ahci_handle_port(struct ahci_port *p);
214 static inline void lba_to_msf(uint8_t *buf, int lba)
217 buf[0] = (lba / 75) / 60;
218 buf[1] = (lba / 75) % 60;
223 * Generate HBA interrupts on global IS register write.
226 ahci_generate_intr(struct pci_ahci_softc *sc, uint32_t mask)
228 struct pci_devinst *pi = sc->asc_pi;
233 /* Update global IS from PxIS/PxIE. */
234 for (i = 0; i < sc->ports; i++) {
239 DPRINTF("%s(%08x) %08x\n", __func__, mask, sc->is);
241 /* If there is nothing enabled -- clear legacy interrupt and exit. */
242 if (sc->is == 0 || (sc->ghc & AHCI_GHC_IE) == 0) {
244 pci_lintr_deassert(pi);
250 /* If there is anything and no MSI -- assert legacy interrupt. */
251 nmsg = pci_msi_maxmsgnum(pi);
255 pci_lintr_assert(pi);
260 /* Assert respective MSIs for ports that were touched. */
261 for (i = 0; i < nmsg; i++) {
262 if (sc->ports <= nmsg || i < nmsg - 1)
265 mmask = 0xffffffff << i;
266 if (sc->is & mask && mmask & mask)
267 pci_generate_msi(pi, i);
272 * Generate HBA interrupt on specific port event.
275 ahci_port_intr(struct ahci_port *p)
277 struct pci_ahci_softc *sc = p->pr_sc;
278 struct pci_devinst *pi = sc->asc_pi;
281 DPRINTF("%s(%d) %08x/%08x %08x\n", __func__,
282 p->port, p->is, p->ie, sc->is);
284 /* If there is nothing enabled -- we are done. */
285 if ((p->is & p->ie) == 0)
288 /* In case of non-shared MSI always generate interrupt. */
289 nmsg = pci_msi_maxmsgnum(pi);
290 if (sc->ports <= nmsg || p->port < nmsg - 1) {
291 sc->is |= (1 << p->port);
292 if ((sc->ghc & AHCI_GHC_IE) == 0)
294 pci_generate_msi(pi, p->port);
298 /* If IS for this port is already set -- do nothing. */
299 if (sc->is & (1 << p->port))
302 sc->is |= (1 << p->port);
304 /* If interrupts are enabled -- generate one. */
305 if ((sc->ghc & AHCI_GHC_IE) == 0)
308 pci_generate_msi(pi, nmsg - 1);
309 } else if (!sc->lintr) {
311 pci_lintr_assert(pi);
316 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
318 int offset, len, irq;
320 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
324 case FIS_TYPE_REGD2H:
327 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_DHR : 0;
329 case FIS_TYPE_SETDEVBITS:
332 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_SDB : 0;
334 case FIS_TYPE_PIOSETUP:
337 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_PS : 0;
340 WPRINTF("unsupported fis type %d\n", ft);
343 if (fis[2] & ATA_S_ERROR) {
345 irq |= AHCI_P_IX_TFE;
347 memcpy(p->rfis + offset, fis, len);
357 ahci_write_fis_piosetup(struct ahci_port *p)
361 memset(fis, 0, sizeof(fis));
362 fis[0] = FIS_TYPE_PIOSETUP;
363 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
367 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
372 error = (tfd >> 8) & 0xff;
374 memset(fis, 0, sizeof(fis));
375 fis[0] = FIS_TYPE_SETDEVBITS;
379 if (fis[2] & ATA_S_ERROR) {
380 p->err_cfis[0] = slot;
381 p->err_cfis[2] = tfd;
382 p->err_cfis[3] = error;
383 memcpy(&p->err_cfis[4], cfis + 4, 16);
385 *(uint32_t *)(fis + 4) = (1 << slot);
386 p->sact &= ~(1 << slot);
390 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
394 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
399 error = (tfd >> 8) & 0xff;
400 memset(fis, 0, sizeof(fis));
401 fis[0] = FIS_TYPE_REGD2H;
415 if (fis[2] & ATA_S_ERROR) {
416 p->err_cfis[0] = 0x80;
417 p->err_cfis[2] = tfd & 0xff;
418 p->err_cfis[3] = error;
419 memcpy(&p->err_cfis[4], cfis + 4, 16);
421 p->ci &= ~(1 << slot);
423 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
427 ahci_write_fis_d2h_ncq(struct ahci_port *p, int slot)
431 p->tfd = ATA_S_READY | ATA_S_DSC;
432 memset(fis, 0, sizeof(fis));
433 fis[0] = FIS_TYPE_REGD2H;
434 fis[1] = 0; /* No interrupt */
435 fis[2] = p->tfd; /* Status */
436 fis[3] = 0; /* No error */
437 p->ci &= ~(1 << slot);
438 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
442 ahci_write_reset_fis_d2h(struct ahci_port *p)
446 memset(fis, 0, sizeof(fis));
447 fis[0] = FIS_TYPE_REGD2H;
455 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
459 ahci_check_stopped(struct ahci_port *p)
462 * If we are no longer processing the command list and nothing
463 * is in-flight, clear the running bit, the current command
464 * slot, the command issue and active bits.
466 if (!(p->cmd & AHCI_P_CMD_ST)) {
467 if (p->pending == 0) {
469 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
478 ahci_port_stop(struct ahci_port *p)
480 struct ahci_ioreq *aior;
485 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
487 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
489 * Try to cancel the outstanding blockif request.
491 error = blockif_cancel(p->bctx, &aior->io_req);
497 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
498 cfis[2] == ATA_READ_FPDMA_QUEUED ||
499 cfis[2] == ATA_SEND_FPDMA_QUEUED)
500 p->sact &= ~(1 << slot); /* NCQ */
502 p->ci &= ~(1 << slot);
505 * This command is now done.
507 p->pending &= ~(1 << slot);
510 * Delete the blockif request from the busy list
512 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
515 * Move the blockif request back to the free list
517 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
520 ahci_check_stopped(p);
524 ahci_port_reset(struct ahci_port *pr)
528 pr->xfermode = ATA_UDMA6;
529 pr->mult_sectors = 128;
532 pr->ssts = ATA_SS_DET_NO_DEVICE;
533 pr->sig = 0xFFFFFFFF;
537 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
538 if (pr->sctl & ATA_SC_SPD_MASK)
539 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
541 pr->ssts |= ATA_SS_SPD_GEN3;
542 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
545 pr->tfd |= ATA_S_READY;
547 pr->sig = PxSIG_ATAPI;
548 ahci_write_reset_fis_d2h(pr);
552 ahci_reset(struct pci_ahci_softc *sc)
556 sc->ghc = AHCI_GHC_AE;
560 pci_lintr_deassert(sc->asc_pi);
564 for (i = 0; i < sc->ports; i++) {
567 sc->port[i].cmd = (AHCI_P_CMD_SUD | AHCI_P_CMD_POD);
568 if (sc->port[i].bctx)
569 sc->port[i].cmd |= AHCI_P_CMD_CPS;
570 sc->port[i].sctl = 0;
571 ahci_port_reset(&sc->port[i]);
576 ata_string(uint8_t *dest, const char *src, int len)
580 for (i = 0; i < len; i++) {
582 dest[i ^ 1] = *src++;
589 atapi_string(uint8_t *dest, const char *src, int len)
593 for (i = 0; i < len; i++) {
602 * Build up the iovec based on the PRDT, 'done' and 'len'.
605 ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
606 struct ahci_prdt_entry *prdt, uint16_t prdtl)
608 struct blockif_req *breq = &aior->io_req;
609 int i, j, skip, todo, left, extra;
612 /* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
614 left = aior->len - aior->done;
616 for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
618 dbcsz = (prdt->dbc & DBCMASK) + 1;
619 /* Skip already done part of the PRDT */
627 breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
628 prdt->dba + skip, dbcsz);
629 breq->br_iov[j].iov_len = dbcsz;
636 /* If we got limited by IOV length, round I/O down to sector size. */
637 if (j == BLOCKIF_IOV_MAX) {
638 extra = todo % blockif_sectsz(p->bctx);
642 if (breq->br_iov[j - 1].iov_len > extra) {
643 breq->br_iov[j - 1].iov_len -= extra;
646 extra -= breq->br_iov[j - 1].iov_len;
652 breq->br_resid = todo;
654 aior->more = (aior->done < aior->len && i < prdtl);
658 ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
660 struct ahci_ioreq *aior;
661 struct blockif_req *breq;
662 struct ahci_prdt_entry *prdt;
663 struct ahci_cmd_hdr *hdr;
666 int err, first, ncq, readop;
668 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
669 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
674 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
675 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
676 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
677 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
680 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
681 cfis[2] == ATA_READ_FPDMA_QUEUED) {
682 lba = ((uint64_t)cfis[10] << 40) |
683 ((uint64_t)cfis[9] << 32) |
684 ((uint64_t)cfis[8] << 24) |
685 ((uint64_t)cfis[6] << 16) |
686 ((uint64_t)cfis[5] << 8) |
688 len = cfis[11] << 8 | cfis[3];
692 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
693 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
694 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
695 lba = ((uint64_t)cfis[10] << 40) |
696 ((uint64_t)cfis[9] << 32) |
697 ((uint64_t)cfis[8] << 24) |
698 ((uint64_t)cfis[6] << 16) |
699 ((uint64_t)cfis[5] << 8) |
701 len = cfis[13] << 8 | cfis[12];
705 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
706 (cfis[5] << 8) | cfis[4];
711 lba *= blockif_sectsz(p->bctx);
712 len *= blockif_sectsz(p->bctx);
714 /* Pull request off free list */
715 aior = STAILQ_FIRST(&p->iofhd);
716 assert(aior != NULL);
717 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
723 breq = &aior->io_req;
724 breq->br_offset = lba + done;
725 ahci_build_iov(p, aior, prdt, hdr->prdtl);
727 /* Mark this command in-flight. */
728 p->pending |= 1 << slot;
730 /* Stuff request onto busy list. */
731 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
734 ahci_write_fis_d2h_ncq(p, slot);
737 err = blockif_read(p->bctx, breq);
739 err = blockif_write(p->bctx, breq);
744 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
746 struct ahci_ioreq *aior;
747 struct blockif_req *breq;
751 * Pull request off free list
753 aior = STAILQ_FIRST(&p->iofhd);
754 assert(aior != NULL);
755 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
761 breq = &aior->io_req;
764 * Mark this command in-flight.
766 p->pending |= 1 << slot;
769 * Stuff request onto busy list
771 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
773 err = blockif_flush(p->bctx, breq);
778 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
781 struct ahci_cmd_hdr *hdr;
782 struct ahci_prdt_entry *prdt;
786 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
789 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
790 for (i = 0; i < hdr->prdtl && len; i++) {
795 dbcsz = (prdt->dbc & DBCMASK) + 1;
796 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
797 sublen = MIN(len, dbcsz);
798 memcpy(to, ptr, sublen);
806 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
808 struct ahci_ioreq *aior;
809 struct blockif_req *breq;
817 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
818 len = (uint16_t)cfis[13] << 8 | cfis[12];
821 } else { /* ATA_SEND_FPDMA_QUEUED */
822 len = (uint16_t)cfis[11] << 8 | cfis[3];
826 read_prdt(p, slot, cfis, buf, sizeof(buf));
830 elba = ((uint64_t)entry[5] << 40) |
831 ((uint64_t)entry[4] << 32) |
832 ((uint64_t)entry[3] << 24) |
833 ((uint64_t)entry[2] << 16) |
834 ((uint64_t)entry[1] << 8) |
836 elen = (uint16_t)entry[7] << 8 | entry[6];
842 ahci_write_fis_d2h_ncq(p, slot);
843 ahci_write_fis_sdb(p, slot, cfis,
844 ATA_S_READY | ATA_S_DSC);
846 ahci_write_fis_d2h(p, slot, cfis,
847 ATA_S_READY | ATA_S_DSC);
849 p->pending &= ~(1 << slot);
850 ahci_check_stopped(p);
859 * Pull request off free list
861 aior = STAILQ_FIRST(&p->iofhd);
862 assert(aior != NULL);
863 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
868 aior->more = (len != done);
870 breq = &aior->io_req;
871 breq->br_offset = elba * blockif_sectsz(p->bctx);
872 breq->br_resid = elen * blockif_sectsz(p->bctx);
875 * Mark this command in-flight.
877 p->pending |= 1 << slot;
880 * Stuff request onto busy list
882 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
885 ahci_write_fis_d2h_ncq(p, slot);
887 err = blockif_delete(p->bctx, breq);
892 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
895 struct ahci_cmd_hdr *hdr;
896 struct ahci_prdt_entry *prdt;
900 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
903 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
904 for (i = 0; i < hdr->prdtl && len; i++) {
909 dbcsz = (prdt->dbc & DBCMASK) + 1;
910 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
911 sublen = MIN(len, dbcsz);
912 memcpy(ptr, from, sublen);
917 hdr->prdbc = size - len;
921 ahci_checksum(uint8_t *buf, int size)
926 for (i = 0; i < size - 1; i++)
928 buf[size - 1] = 0x100 - sum;
932 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
934 struct ahci_cmd_hdr *hdr;
937 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
938 if (p->atapi || hdr->prdtl == 0 || cfis[4] != 0x10 ||
939 cfis[5] != 0 || cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
940 ahci_write_fis_d2h(p, slot, cfis,
941 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
945 memset(buf, 0, sizeof(buf));
946 memcpy(buf, p->err_cfis, sizeof(p->err_cfis));
947 ahci_checksum(buf, sizeof(buf));
949 if (cfis[2] == ATA_READ_LOG_EXT)
950 ahci_write_fis_piosetup(p);
951 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
952 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
956 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
958 struct ahci_cmd_hdr *hdr;
960 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
961 if (p->atapi || hdr->prdtl == 0) {
962 ahci_write_fis_d2h(p, slot, cfis,
963 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
967 int sectsz, psectsz, psectoff, candelete, ro;
971 ro = blockif_is_ro(p->bctx);
972 candelete = blockif_candelete(p->bctx);
973 sectsz = blockif_sectsz(p->bctx);
974 sectors = blockif_size(p->bctx) / sectsz;
975 blockif_chs(p->bctx, &cyl, &heads, &sech);
976 blockif_psectsz(p->bctx, &psectsz, &psectoff);
977 memset(buf, 0, sizeof(buf));
982 ata_string((uint8_t *)(buf+10), p->ident, 20);
983 ata_string((uint8_t *)(buf+23), "001", 8);
984 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
985 buf[47] = (0x8000 | 128);
987 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
989 buf[53] = (1 << 1 | 1 << 2);
991 buf[59] = (0x100 | p->mult_sectors);
992 if (sectors <= 0x0fffffff) {
994 buf[61] = (sectors >> 16);
1000 if (p->xfermode & ATA_WDMA0)
1001 buf[63] |= (1 << ((p->xfermode & 7) + 8));
1009 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
1011 buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
1012 (p->ssts & ATA_SS_SPD_MASK) >> 3);
1015 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
1016 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1017 buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1018 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
1019 buf[84] = (1 << 14);
1020 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
1021 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1022 buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1023 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
1024 buf[87] = (1 << 14);
1026 if (p->xfermode & ATA_UDMA0)
1027 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1029 buf[101] = (sectors >> 16);
1030 buf[102] = (sectors >> 32);
1031 buf[103] = (sectors >> 48);
1032 if (candelete && !ro) {
1033 buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
1035 buf[169] = ATA_SUPPORT_DSM_TRIM;
1039 if (psectsz > sectsz) {
1041 buf[106] |= ffsl(psectsz / sectsz) - 1;
1042 buf[209] |= (psectoff / sectsz);
1046 buf[117] = sectsz / 2;
1047 buf[118] = ((sectsz / 2) >> 16);
1049 buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1050 buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1053 ahci_checksum((uint8_t *)buf, sizeof(buf));
1054 ahci_write_fis_piosetup(p);
1055 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1056 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1061 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
1064 ahci_write_fis_d2h(p, slot, cfis,
1065 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1069 memset(buf, 0, sizeof(buf));
1070 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
1071 ata_string((uint8_t *)(buf+10), p->ident, 20);
1072 ata_string((uint8_t *)(buf+23), "001", 8);
1073 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
1074 buf[49] = (1 << 9 | 1 << 8);
1075 buf[50] = (1 << 14 | 1);
1076 buf[53] = (1 << 2 | 1 << 1);
1079 if (p->xfermode & ATA_WDMA0)
1080 buf[63] |= (1 << ((p->xfermode & 7) + 8));
1086 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
1087 buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
1090 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1091 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1092 buf[83] = (1 << 14);
1093 buf[84] = (1 << 14);
1094 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1095 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1096 buf[87] = (1 << 14);
1098 if (p->xfermode & ATA_UDMA0)
1099 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1102 ahci_checksum((uint8_t *)buf, sizeof(buf));
1103 ahci_write_fis_piosetup(p);
1104 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1105 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1110 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
1119 if (acmd[1] & 1) { /* VPD */
1120 if (acmd[2] == 0) { /* Supported VPD pages */
1128 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1130 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1131 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1132 ahci_write_fis_d2h(p, slot, cfis, tfd);
1144 atapi_string(buf + 8, "BHYVE", 8);
1145 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1146 atapi_string(buf + 32, "001", 4);
1152 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1153 write_prdt(p, slot, cfis, buf, len);
1154 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1158 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1163 sectors = blockif_size(p->bctx) / 2048;
1164 be32enc(buf, sectors - 1);
1165 be32enc(buf + 4, 2048);
1166 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1167 write_prdt(p, slot, cfis, buf, sizeof(buf));
1168 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1172 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1180 len = be16dec(acmd + 7);
1181 format = acmd[9] >> 6;
1187 uint8_t start_track, buf[20], *bp;
1189 msf = (acmd[1] >> 1) & 1;
1190 start_track = acmd[6];
1191 if (start_track > 1 && start_track != 0xaa) {
1193 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1195 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1196 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1197 ahci_write_fis_d2h(p, slot, cfis, tfd);
1203 if (start_track <= 1) {
1223 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1227 lba_to_msf(bp, sectors);
1230 be32enc(bp, sectors);
1234 be16enc(buf, size - 2);
1237 write_prdt(p, slot, cfis, buf, len);
1238 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1239 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1246 memset(buf, 0, sizeof(buf));
1250 if (len > sizeof(buf))
1252 write_prdt(p, slot, cfis, buf, len);
1253 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1254 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1261 uint8_t *bp, buf[50];
1263 msf = (acmd[1] >> 1) & 1;
1299 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1303 lba_to_msf(bp, sectors);
1306 be32enc(bp, sectors);
1329 be16enc(buf, size - 2);
1332 write_prdt(p, slot, cfis, buf, len);
1333 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1334 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1341 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1343 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1344 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1345 ahci_write_fis_d2h(p, slot, cfis, tfd);
1352 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1356 memset(buf, 0, sizeof(buf));
1359 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1360 write_prdt(p, slot, cfis, buf, sizeof(buf));
1361 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1365 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
1367 struct ahci_ioreq *aior;
1368 struct ahci_cmd_hdr *hdr;
1369 struct ahci_prdt_entry *prdt;
1370 struct blockif_req *breq;
1377 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1378 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1380 lba = be32dec(acmd + 2);
1381 if (acmd[0] == READ_10)
1382 len = be16dec(acmd + 7);
1384 len = be32dec(acmd + 6);
1386 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1387 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1393 * Pull request off free list
1395 aior = STAILQ_FIRST(&p->iofhd);
1396 assert(aior != NULL);
1397 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1402 breq = &aior->io_req;
1403 breq->br_offset = lba + done;
1404 ahci_build_iov(p, aior, prdt, hdr->prdtl);
1406 /* Mark this command in-flight. */
1407 p->pending |= 1 << slot;
1409 /* Stuff request onto busy list. */
1410 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1412 err = blockif_read(p->bctx, breq);
1417 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1425 if (len > sizeof(buf))
1427 memset(buf, 0, len);
1428 buf[0] = 0x70 | (1 << 7);
1429 buf[2] = p->sense_key;
1432 write_prdt(p, slot, cfis, buf, len);
1433 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1434 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1438 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1440 uint8_t *acmd = cfis + 0x40;
1443 switch (acmd[4] & 3) {
1447 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1448 tfd = ATA_S_READY | ATA_S_DSC;
1451 /* TODO eject media */
1452 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1453 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1455 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1458 ahci_write_fis_d2h(p, slot, cfis, tfd);
1462 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1470 len = be16dec(acmd + 7);
1472 code = acmd[2] & 0x3f;
1477 case MODEPAGE_RW_ERROR_RECOVERY:
1481 if (len > sizeof(buf))
1484 memset(buf, 0, sizeof(buf));
1485 be16enc(buf, 16 - 2);
1490 write_prdt(p, slot, cfis, buf, len);
1491 tfd = ATA_S_READY | ATA_S_DSC;
1494 case MODEPAGE_CD_CAPABILITIES:
1498 if (len > sizeof(buf))
1501 memset(buf, 0, sizeof(buf));
1502 be16enc(buf, 30 - 2);
1508 be16enc(&buf[18], 2);
1509 be16enc(&buf[20], 512);
1510 write_prdt(p, slot, cfis, buf, len);
1511 tfd = ATA_S_READY | ATA_S_DSC;
1520 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1522 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1527 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1529 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1532 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1533 ahci_write_fis_d2h(p, slot, cfis, tfd);
1537 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1545 /* we don't support asynchronous operation */
1546 if (!(acmd[1] & 1)) {
1547 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1549 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1554 len = be16dec(acmd + 7);
1555 if (len > sizeof(buf))
1558 memset(buf, 0, sizeof(buf));
1559 be16enc(buf, 8 - 2);
1563 write_prdt(p, slot, cfis, buf, len);
1564 tfd = ATA_S_READY | ATA_S_DSC;
1566 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1567 ahci_write_fis_d2h(p, slot, cfis, tfd);
1571 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1581 for (i = 0; i < 16; i++)
1582 DPRINTF("%02x ", acmd[i]);
1588 case TEST_UNIT_READY:
1589 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1590 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1593 atapi_inquiry(p, slot, cfis);
1596 atapi_read_capacity(p, slot, cfis);
1600 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1601 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1604 atapi_read_toc(p, slot, cfis);
1607 atapi_report_luns(p, slot, cfis);
1611 atapi_read(p, slot, cfis, 0);
1614 atapi_request_sense(p, slot, cfis);
1616 case START_STOP_UNIT:
1617 atapi_start_stop_unit(p, slot, cfis);
1620 atapi_mode_sense(p, slot, cfis);
1622 case GET_EVENT_STATUS_NOTIFICATION:
1623 atapi_get_event_status_notification(p, slot, cfis);
1626 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1627 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1629 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1630 ATA_S_READY | ATA_S_ERROR);
1636 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1639 p->tfd |= ATA_S_BUSY;
1641 case ATA_ATA_IDENTIFY:
1642 handle_identify(p, slot, cfis);
1644 case ATA_SETFEATURES:
1647 case ATA_SF_ENAB_SATA_SF:
1649 case ATA_SATA_SF_AN:
1650 p->tfd = ATA_S_DSC | ATA_S_READY;
1653 p->tfd = ATA_S_ERROR | ATA_S_READY;
1654 p->tfd |= (ATA_ERROR_ABORT << 8);
1658 case ATA_SF_ENAB_WCACHE:
1659 case ATA_SF_DIS_WCACHE:
1660 case ATA_SF_ENAB_RCACHE:
1661 case ATA_SF_DIS_RCACHE:
1662 p->tfd = ATA_S_DSC | ATA_S_READY;
1664 case ATA_SF_SETXFER:
1666 switch (cfis[12] & 0xf8) {
1672 p->xfermode = (cfis[12] & 0x7);
1675 p->tfd = ATA_S_DSC | ATA_S_READY;
1679 p->tfd = ATA_S_ERROR | ATA_S_READY;
1680 p->tfd |= (ATA_ERROR_ABORT << 8);
1683 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1687 if (cfis[12] != 0 &&
1688 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1689 p->tfd = ATA_S_ERROR | ATA_S_READY;
1690 p->tfd |= (ATA_ERROR_ABORT << 8);
1692 p->mult_sectors = cfis[12];
1693 p->tfd = ATA_S_DSC | ATA_S_READY;
1695 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1703 case ATA_READ_MUL48:
1704 case ATA_WRITE_MUL48:
1707 case ATA_READ_DMA48:
1708 case ATA_WRITE_DMA48:
1709 case ATA_READ_FPDMA_QUEUED:
1710 case ATA_WRITE_FPDMA_QUEUED:
1711 ahci_handle_rw(p, slot, cfis, 0);
1713 case ATA_FLUSHCACHE:
1714 case ATA_FLUSHCACHE48:
1715 ahci_handle_flush(p, slot, cfis);
1717 case ATA_DATA_SET_MANAGEMENT:
1718 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1719 cfis[13] == 0 && cfis[12] == 1) {
1720 ahci_handle_dsm_trim(p, slot, cfis, 0);
1723 ahci_write_fis_d2h(p, slot, cfis,
1724 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1726 case ATA_SEND_FPDMA_QUEUED:
1727 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1728 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1729 cfis[11] == 0 && cfis[3] == 1) {
1730 ahci_handle_dsm_trim(p, slot, cfis, 0);
1733 ahci_write_fis_d2h(p, slot, cfis,
1734 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1736 case ATA_READ_LOG_EXT:
1737 case ATA_READ_LOG_DMA_EXT:
1738 ahci_handle_read_log(p, slot, cfis);
1740 case ATA_SECURITY_FREEZE_LOCK:
1743 ahci_write_fis_d2h(p, slot, cfis,
1744 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1746 case ATA_CHECK_POWER_MODE:
1747 cfis[12] = 0xff; /* always on */
1748 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1750 case ATA_STANDBY_CMD:
1751 case ATA_STANDBY_IMMEDIATE:
1753 case ATA_IDLE_IMMEDIATE:
1755 case ATA_READ_VERIFY:
1756 case ATA_READ_VERIFY48:
1757 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1759 case ATA_ATAPI_IDENTIFY:
1760 handle_atapi_identify(p, slot, cfis);
1762 case ATA_PACKET_CMD:
1764 ahci_write_fis_d2h(p, slot, cfis,
1765 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1767 handle_packet_cmd(p, slot, cfis);
1770 WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
1771 ahci_write_fis_d2h(p, slot, cfis,
1772 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1778 ahci_handle_slot(struct ahci_port *p, int slot)
1780 struct ahci_cmd_hdr *hdr;
1782 struct ahci_prdt_entry *prdt;
1784 struct pci_ahci_softc *sc;
1791 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1793 cfl = (hdr->flags & 0x1f) * 4;
1795 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1796 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1798 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1801 for (i = 0; i < cfl; i++) {
1804 DPRINTF("%02x ", cfis[i]);
1808 for (i = 0; i < hdr->prdtl; i++) {
1809 DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
1814 if (cfis[0] != FIS_TYPE_REGH2D) {
1815 WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
1819 if (cfis[1] & 0x80) {
1820 ahci_handle_cmd(p, slot, cfis);
1822 if (cfis[15] & (1 << 2))
1824 else if (p->reset) {
1828 p->ci &= ~(1 << slot);
1833 ahci_handle_port(struct ahci_port *p)
1836 if (!(p->cmd & AHCI_P_CMD_ST))
1840 * Search for any new commands to issue ignoring those that
1841 * are already in-flight. Stop if device is busy or in error.
1843 for (; (p->ci & ~p->pending) != 0; p->ccs = ((p->ccs + 1) & 31)) {
1844 if ((p->tfd & (ATA_S_BUSY | ATA_S_DRQ)) != 0)
1846 if (p->waitforclear)
1848 if ((p->ci & ~p->pending & (1 << p->ccs)) != 0) {
1849 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1850 p->cmd |= p->ccs << AHCI_P_CMD_CCS_SHIFT;
1851 ahci_handle_slot(p, p->ccs);
1857 * blockif callback routine - this runs in the context of the blockif
1858 * i/o thread, so the mutex needs to be acquired.
1861 ata_ioreq_cb(struct blockif_req *br, int err)
1863 struct ahci_cmd_hdr *hdr;
1864 struct ahci_ioreq *aior;
1865 struct ahci_port *p;
1866 struct pci_ahci_softc *sc;
1871 DPRINTF("%s %d\n", __func__, err);
1874 aior = br->br_param;
1879 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1881 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1882 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1883 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1885 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1886 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1887 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1890 pthread_mutex_lock(&sc->mtx);
1893 * Delete the blockif request from the busy list
1895 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1898 * Move the blockif request back to the free list
1900 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1903 hdr->prdbc = aior->done;
1905 if (!err && aior->more) {
1907 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1909 ahci_handle_rw(p, slot, cfis, aior->done);
1914 tfd = ATA_S_READY | ATA_S_DSC;
1916 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1918 ahci_write_fis_sdb(p, slot, cfis, tfd);
1920 ahci_write_fis_d2h(p, slot, cfis, tfd);
1923 * This command is now complete.
1925 p->pending &= ~(1 << slot);
1927 ahci_check_stopped(p);
1928 ahci_handle_port(p);
1930 pthread_mutex_unlock(&sc->mtx);
1931 DPRINTF("%s exit\n", __func__);
1935 atapi_ioreq_cb(struct blockif_req *br, int err)
1937 struct ahci_cmd_hdr *hdr;
1938 struct ahci_ioreq *aior;
1939 struct ahci_port *p;
1940 struct pci_ahci_softc *sc;
1945 DPRINTF("%s %d\n", __func__, err);
1947 aior = br->br_param;
1952 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1954 pthread_mutex_lock(&sc->mtx);
1957 * Delete the blockif request from the busy list
1959 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1962 * Move the blockif request back to the free list
1964 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1967 hdr->prdbc = aior->done;
1969 if (!err && aior->more) {
1970 atapi_read(p, slot, cfis, aior->done);
1975 tfd = ATA_S_READY | ATA_S_DSC;
1977 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1979 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1981 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1982 ahci_write_fis_d2h(p, slot, cfis, tfd);
1985 * This command is now complete.
1987 p->pending &= ~(1 << slot);
1989 ahci_check_stopped(p);
1990 ahci_handle_port(p);
1992 pthread_mutex_unlock(&sc->mtx);
1993 DPRINTF("%s exit\n", __func__);
1997 pci_ahci_ioreq_init(struct ahci_port *pr)
1999 struct ahci_ioreq *vr;
2002 pr->ioqsz = blockif_queuesz(pr->bctx);
2003 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
2004 STAILQ_INIT(&pr->iofhd);
2007 * Add all i/o request entries to the free queue
2009 for (i = 0; i < pr->ioqsz; i++) {
2013 vr->io_req.br_callback = ata_ioreq_cb;
2015 vr->io_req.br_callback = atapi_ioreq_cb;
2016 vr->io_req.br_param = vr;
2017 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
2020 TAILQ_INIT(&pr->iobhd);
2024 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2026 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2027 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2028 struct ahci_port *p = &sc->port[port];
2030 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2031 port, offset, value);
2051 p->ie = value & 0xFDC000FF;
2056 p->cmd &= ~(AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2057 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2058 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2059 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK);
2060 p->cmd |= (AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2061 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2062 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2063 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK) & value;
2065 if (!(value & AHCI_P_CMD_ST)) {
2070 p->cmd |= AHCI_P_CMD_CR;
2071 clb = (uint64_t)p->clbu << 32 | p->clb;
2072 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
2073 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
2076 if (value & AHCI_P_CMD_FRE) {
2079 p->cmd |= AHCI_P_CMD_FR;
2080 fb = (uint64_t)p->fbu << 32 | p->fb;
2081 /* we don't support FBSCP, so rfis size is 256Bytes */
2082 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
2084 p->cmd &= ~AHCI_P_CMD_FR;
2087 if (value & AHCI_P_CMD_CLO) {
2088 p->tfd &= ~(ATA_S_BUSY | ATA_S_DRQ);
2089 p->cmd &= ~AHCI_P_CMD_CLO;
2092 if (value & AHCI_P_CMD_ICC_MASK) {
2093 p->cmd &= ~AHCI_P_CMD_ICC_MASK;
2096 ahci_handle_port(p);
2102 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
2106 if (!(p->cmd & AHCI_P_CMD_ST)) {
2107 if (value & ATA_SC_DET_RESET)
2119 ahci_handle_port(p);
2129 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2131 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2139 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
2142 if (value & AHCI_GHC_HR) {
2146 if (value & AHCI_GHC_IE)
2147 sc->ghc |= AHCI_GHC_IE;
2149 sc->ghc &= ~AHCI_GHC_IE;
2150 ahci_generate_intr(sc, 0xffffffff);
2154 ahci_generate_intr(sc, value);
2162 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2163 int baridx, uint64_t offset, int size, uint64_t value)
2165 struct pci_ahci_softc *sc = pi->pi_arg;
2167 assert(baridx == 5);
2168 assert((offset % 4) == 0 && size == 4);
2170 pthread_mutex_lock(&sc->mtx);
2172 if (offset < AHCI_OFFSET)
2173 pci_ahci_host_write(sc, offset, value);
2174 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2175 pci_ahci_port_write(sc, offset, value);
2177 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
2179 pthread_mutex_unlock(&sc->mtx);
2183 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2199 uint32_t *p = &sc->cap;
2200 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2208 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
2215 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2218 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2219 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2239 uint32_t *p= &sc->port[port].clb;
2240 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2249 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
2250 port, offset, value);
2256 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2257 uint64_t regoff, int size)
2259 struct pci_ahci_softc *sc = pi->pi_arg;
2263 assert(baridx == 5);
2264 assert(size == 1 || size == 2 || size == 4);
2265 assert((regoff & (size - 1)) == 0);
2267 pthread_mutex_lock(&sc->mtx);
2269 offset = regoff & ~0x3; /* round down to a multiple of 4 bytes */
2270 if (offset < AHCI_OFFSET)
2271 value = pci_ahci_host_read(sc, offset);
2272 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2273 value = pci_ahci_port_read(sc, offset);
2276 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n",
2279 value >>= 8 * (regoff & 0x3);
2281 pthread_mutex_unlock(&sc->mtx);
2287 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
2289 char bident[sizeof("XX:XX:XX")];
2290 struct blockif_ctxt *bctxt;
2291 struct pci_ahci_softc *sc;
2300 dbg = fopen("/tmp/log", "w+");
2303 sc = calloc(1, sizeof(struct pci_ahci_softc));
2306 pthread_mutex_init(&sc->mtx, NULL);
2311 for (p = 0; p < MAX_PORTS && opts != NULL; p++, opts = next) {
2312 /* Identify and cut off type of present port. */
2313 if (strncmp(opts, "hd:", 3) == 0) {
2316 } else if (strncmp(opts, "cd:", 3) == 0) {
2321 /* Find and cut off the next port options. */
2322 next = strstr(opts, ",hd:");
2323 next2 = strstr(opts, ",cd:");
2324 if (next == NULL || (next2 != NULL && next2 < next))
2335 * Attempt to open the backing image. Use the PCI slot/func
2336 * and the port number for the identifier string.
2338 snprintf(bident, sizeof(bident), "%d:%d:%d", pi->pi_slot,
2340 bctxt = blockif_open(opts, bident);
2341 if (bctxt == NULL) {
2346 sc->port[p].bctx = bctxt;
2347 sc->port[p].pr_sc = sc;
2348 sc->port[p].port = p;
2349 sc->port[p].atapi = atapi;
2352 * Create an identifier for the backing file.
2353 * Use parts of the md5 sum of the filename
2356 MD5Update(&mdctx, opts, strlen(opts));
2357 MD5Final(digest, &mdctx);
2358 sprintf(sc->port[p].ident, "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2359 digest[0], digest[1], digest[2], digest[3], digest[4],
2363 * Allocate blockif request structures and add them
2366 pci_ahci_ioreq_init(&sc->port[p]);
2369 if (sc->port[p].ioqsz < slots)
2370 slots = sc->port[p].ioqsz;
2374 /* Intel ICH8 AHCI */
2376 if (sc->ports < DEF_PORTS)
2377 sc->ports = DEF_PORTS;
2378 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2379 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2380 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2381 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2382 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2385 sc->cap2 = AHCI_CAP2_APST;
2388 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2389 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2390 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2391 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2392 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2393 p = MIN(sc->ports, 16);
2394 p = flsl(p) - ((p & (p - 1)) ? 0 : 1);
2395 pci_emul_add_msicap(pi, 1 << p);
2396 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2397 AHCI_OFFSET + sc->ports * AHCI_STEP);
2399 pci_lintr_request(pi);
2403 for (p = 0; p < sc->ports; p++) {
2404 if (sc->port[p].bctx != NULL)
2405 blockif_close(sc->port[p].bctx);
2414 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2417 return (pci_ahci_init(ctx, pi, opts, 0));
2421 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2424 return (pci_ahci_init(ctx, pi, opts, 1));
2428 * Use separate emulation names to distinguish drive and atapi devices
2430 struct pci_devemu pci_de_ahci = {
2432 .pe_init = pci_ahci_hd_init,
2433 .pe_barwrite = pci_ahci_write,
2434 .pe_barread = pci_ahci_read
2436 PCI_EMUL_SET(pci_de_ahci);
2438 struct pci_devemu pci_de_ahci_hd = {
2439 .pe_emu = "ahci-hd",
2440 .pe_init = pci_ahci_hd_init,
2441 .pe_barwrite = pci_ahci_write,
2442 .pe_barread = pci_ahci_read
2444 PCI_EMUL_SET(pci_de_ahci_hd);
2446 struct pci_devemu pci_de_ahci_cd = {
2447 .pe_emu = "ahci-cd",
2448 .pe_init = pci_ahci_atapi_init,
2449 .pe_barwrite = pci_ahci_write,
2450 .pe_barread = pci_ahci_read
2452 PCI_EMUL_SET(pci_de_ahci_cd);