2 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
36 #include <sys/ioctl.h>
39 #include <sys/endian.h>
51 #include <pthread_np.h>
60 #define MAX_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
62 #define PxSIG_ATA 0x00000101 /* ATA drive */
63 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
66 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
67 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
68 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
69 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
70 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
71 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
72 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
73 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
79 #define TEST_UNIT_READY 0x00
80 #define REQUEST_SENSE 0x03
82 #define START_STOP_UNIT 0x1B
83 #define PREVENT_ALLOW 0x1E
84 #define READ_CAPACITY 0x25
86 #define POSITION_TO_ELEMENT 0x2B
88 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
89 #define MODE_SENSE_10 0x5A
90 #define REPORT_LUNS 0xA0
95 * SCSI mode page codes
97 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
98 #define MODEPAGE_CD_CAPABILITIES 0x2A
103 #define ATA_SF_ENAB_SATA_SF 0x10
104 #define ATA_SATA_SF_AN 0x05
105 #define ATA_SF_DIS_SATA_SF 0x90
112 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
114 #define DPRINTF(format, arg...)
116 #define WPRINTF(format, arg...) printf(format, ##arg)
119 struct blockif_req io_req;
120 struct ahci_port *io_pr;
121 STAILQ_ENTRY(ahci_ioreq) io_flist;
122 TAILQ_ENTRY(ahci_ioreq) io_blist;
131 struct blockif_ctxt *bctx;
132 struct pci_ahci_softc *pr_sc;
140 uint8_t err_cfis[20];
166 struct ahci_ioreq *ioreq;
168 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
169 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
172 struct ahci_cmd_hdr {
177 uint32_t reserved[4];
180 struct ahci_prdt_entry {
183 #define DBCMASK 0x3fffff
187 struct pci_ahci_softc {
188 struct pci_devinst *asc_pi;
203 struct ahci_port port[MAX_PORTS];
205 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
207 static inline void lba_to_msf(uint8_t *buf, int lba)
210 buf[0] = (lba / 75) / 60;
211 buf[1] = (lba / 75) % 60;
216 * generate HBA intr depending on whether or not ports within
217 * the controller have an interrupt pending.
220 ahci_generate_intr(struct pci_ahci_softc *sc)
222 struct pci_devinst *pi;
227 for (i = 0; i < sc->ports; i++) {
228 struct ahci_port *pr;
234 DPRINTF("%s %x\n", __func__, sc->is);
236 if (sc->is && (sc->ghc & AHCI_GHC_IE)) {
237 if (pci_msi_enabled(pi)) {
239 * Generate an MSI interrupt on every edge
241 pci_generate_msi(pi, 0);
242 } else if (!sc->lintr) {
244 * Only generate a pin-based interrupt if one wasn't
248 pci_lintr_assert(pi);
250 } else if (sc->lintr) {
252 * No interrupts: deassert pin-based signal if it had
255 pci_lintr_deassert(pi);
261 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
263 int offset, len, irq;
265 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
269 case FIS_TYPE_REGD2H:
274 case FIS_TYPE_SETDEVBITS:
279 case FIS_TYPE_PIOSETUP:
285 WPRINTF("unsupported fis type %d\n", ft);
288 memcpy(p->rfis + offset, fis, len);
291 ahci_generate_intr(p->pr_sc);
296 ahci_write_fis_piosetup(struct ahci_port *p)
300 memset(fis, 0, sizeof(fis));
301 fis[0] = FIS_TYPE_PIOSETUP;
302 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
306 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
311 error = (tfd >> 8) & 0xff;
312 memset(fis, 0, sizeof(fis));
313 fis[0] = FIS_TYPE_SETDEVBITS;
317 if (fis[2] & ATA_S_ERROR) {
318 p->is |= AHCI_P_IX_TFE;
319 p->err_cfis[0] = slot;
320 p->err_cfis[2] = tfd & 0x77;
321 p->err_cfis[3] = error;
322 memcpy(&p->err_cfis[4], cfis + 4, 16);
324 *(uint32_t *)(fis + 4) = (1 << slot);
325 p->sact &= ~(1 << slot);
328 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
332 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
337 error = (tfd >> 8) & 0xff;
338 memset(fis, 0, sizeof(fis));
339 fis[0] = FIS_TYPE_REGD2H;
353 if (fis[2] & ATA_S_ERROR) {
354 p->is |= AHCI_P_IX_TFE;
355 p->err_cfis[0] = 0x80;
356 p->err_cfis[2] = tfd & 0xff;
357 p->err_cfis[3] = error;
358 memcpy(&p->err_cfis[4], cfis + 4, 16);
360 p->ci &= ~(1 << slot);
362 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
366 ahci_write_reset_fis_d2h(struct ahci_port *p)
370 memset(fis, 0, sizeof(fis));
371 fis[0] = FIS_TYPE_REGD2H;
379 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
383 ahci_check_stopped(struct ahci_port *p)
386 * If we are no longer processing the command list and nothing
387 * is in-flight, clear the running bit, the current command
388 * slot, the command issue and active bits.
390 if (!(p->cmd & AHCI_P_CMD_ST)) {
391 if (p->pending == 0) {
392 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
400 ahci_port_stop(struct ahci_port *p)
402 struct ahci_ioreq *aior;
408 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
410 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
412 * Try to cancel the outstanding blockif request.
414 error = blockif_cancel(p->bctx, &aior->io_req);
420 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
421 cfis[2] == ATA_READ_FPDMA_QUEUED)
425 p->sact &= ~(1 << slot);
427 p->ci &= ~(1 << slot);
430 * This command is now done.
432 p->pending &= ~(1 << slot);
435 * Delete the blockif request from the busy list
437 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
440 * Move the blockif request back to the free list
442 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
445 ahci_check_stopped(p);
449 ahci_port_reset(struct ahci_port *pr)
453 pr->xfermode = ATA_UDMA6;
454 pr->mult_sectors = 128;
457 pr->ssts = ATA_SS_DET_NO_DEVICE;
458 pr->sig = 0xFFFFFFFF;
462 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
463 if (pr->sctl & ATA_SC_SPD_MASK)
464 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
466 pr->ssts |= ATA_SS_SPD_GEN3;
467 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
470 pr->tfd |= ATA_S_READY;
472 pr->sig = PxSIG_ATAPI;
473 ahci_write_reset_fis_d2h(pr);
477 ahci_reset(struct pci_ahci_softc *sc)
481 sc->ghc = AHCI_GHC_AE;
485 pci_lintr_deassert(sc->asc_pi);
489 for (i = 0; i < sc->ports; i++) {
492 sc->port[i].sctl = 0;
493 ahci_port_reset(&sc->port[i]);
498 ata_string(uint8_t *dest, const char *src, int len)
502 for (i = 0; i < len; i++) {
504 dest[i ^ 1] = *src++;
511 atapi_string(uint8_t *dest, const char *src, int len)
515 for (i = 0; i < len; i++) {
524 ahci_handle_dma(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done,
527 struct ahci_ioreq *aior;
528 struct blockif_req *breq;
529 struct pci_ahci_softc *sc;
530 struct ahci_prdt_entry *prdt;
531 struct ahci_cmd_hdr *hdr;
534 int i, err, iovcnt, ncq, readop;
537 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
538 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
543 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
544 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
545 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
546 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
549 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
550 cfis[2] == ATA_READ_FPDMA_QUEUED) {
551 lba = ((uint64_t)cfis[10] << 40) |
552 ((uint64_t)cfis[9] << 32) |
553 ((uint64_t)cfis[8] << 24) |
554 ((uint64_t)cfis[6] << 16) |
555 ((uint64_t)cfis[5] << 8) |
557 len = cfis[11] << 8 | cfis[3];
561 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
562 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
563 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
564 lba = ((uint64_t)cfis[10] << 40) |
565 ((uint64_t)cfis[9] << 32) |
566 ((uint64_t)cfis[8] << 24) |
567 ((uint64_t)cfis[6] << 16) |
568 ((uint64_t)cfis[5] << 8) |
570 len = cfis[13] << 8 | cfis[12];
574 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
575 (cfis[5] << 8) | cfis[4];
580 lba *= blockif_sectsz(p->bctx);
581 len *= blockif_sectsz(p->bctx);
584 * Pull request off free list
586 aior = STAILQ_FIRST(&p->iofhd);
587 assert(aior != NULL);
588 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
593 breq = &aior->io_req;
594 breq->br_offset = lba + done;
595 iovcnt = hdr->prdtl - seek;
596 if (iovcnt > BLOCKIF_IOV_MAX) {
597 aior->prdtl = iovcnt - BLOCKIF_IOV_MAX;
598 iovcnt = BLOCKIF_IOV_MAX;
601 breq->br_iovcnt = iovcnt;
604 * Mark this command in-flight.
606 p->pending |= 1 << slot;
609 * Stuff request onto busy list
611 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
614 * Build up the iovec based on the prdt
616 for (i = 0; i < iovcnt; i++) {
619 dbcsz = (prdt->dbc & DBCMASK) + 1;
620 breq->br_iov[i].iov_base = paddr_guest2host(ahci_ctx(sc),
622 breq->br_iov[i].iov_len = dbcsz;
627 err = blockif_read(p->bctx, breq);
629 err = blockif_write(p->bctx, breq);
633 p->ci &= ~(1 << slot);
637 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
639 struct ahci_ioreq *aior;
640 struct blockif_req *breq;
644 * Pull request off free list
646 aior = STAILQ_FIRST(&p->iofhd);
647 assert(aior != NULL);
648 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
654 breq = &aior->io_req;
657 * Mark this command in-flight.
659 p->pending |= 1 << slot;
662 * Stuff request onto busy list
664 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
666 err = blockif_flush(p->bctx, breq);
671 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
674 struct ahci_cmd_hdr *hdr;
675 struct ahci_prdt_entry *prdt;
679 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
682 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
683 for (i = 0; i < hdr->prdtl && len; i++) {
688 dbcsz = (prdt->dbc & DBCMASK) + 1;
689 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
690 sublen = len < dbcsz ? len : dbcsz;
691 memcpy(to, ptr, sublen);
699 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
701 struct ahci_ioreq *aior;
702 struct blockif_req *breq;
709 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
710 len = (uint16_t)cfis[13] << 8 | cfis[12];
712 } else { /* ATA_SEND_FPDMA_QUEUED */
713 len = (uint16_t)cfis[11] << 8 | cfis[3];
716 read_prdt(p, slot, cfis, buf, sizeof(buf));
720 elba = ((uint64_t)entry[5] << 40) |
721 ((uint64_t)entry[4] << 32) |
722 ((uint64_t)entry[3] << 24) |
723 ((uint64_t)entry[2] << 16) |
724 ((uint64_t)entry[1] << 8) |
726 elen = (uint16_t)entry[7] << 8 | entry[6];
730 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
731 p->pending &= ~(1 << slot);
732 ahci_check_stopped(p);
739 * Pull request off free list
741 aior = STAILQ_FIRST(&p->iofhd);
742 assert(aior != NULL);
743 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
750 breq = &aior->io_req;
751 breq->br_offset = elba * blockif_sectsz(p->bctx);
753 breq->br_iov[0].iov_len = elen * blockif_sectsz(p->bctx);
756 * Mark this command in-flight.
758 p->pending |= 1 << slot;
761 * Stuff request onto busy list
763 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
765 err = blockif_delete(p->bctx, breq);
770 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
773 struct ahci_cmd_hdr *hdr;
774 struct ahci_prdt_entry *prdt;
778 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
781 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
782 for (i = 0; i < hdr->prdtl && len; i++) {
787 dbcsz = (prdt->dbc & DBCMASK) + 1;
788 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
789 sublen = len < dbcsz ? len : dbcsz;
790 memcpy(ptr, from, sublen);
795 hdr->prdbc = size - len;
799 ahci_checksum(uint8_t *buf, int size)
804 for (i = 0; i < size - 1; i++)
806 buf[size - 1] = 0x100 - sum;
810 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
812 struct ahci_cmd_hdr *hdr;
815 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
816 if (p->atapi || hdr->prdtl == 0 || cfis[4] != 0x10 ||
817 cfis[5] != 0 || cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
818 ahci_write_fis_d2h(p, slot, cfis,
819 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
823 memset(buf, 0, sizeof(buf));
824 memcpy(buf, p->err_cfis, sizeof(p->err_cfis));
825 ahci_checksum(buf, sizeof(buf));
827 if (cfis[2] == ATA_READ_LOG_EXT)
828 ahci_write_fis_piosetup(p);
829 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
830 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
834 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
836 struct ahci_cmd_hdr *hdr;
838 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
839 if (p->atapi || hdr->prdtl == 0) {
840 ahci_write_fis_d2h(p, slot, cfis,
841 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
845 int sectsz, psectsz, psectoff, candelete, ro;
849 ro = blockif_is_ro(p->bctx);
850 candelete = blockif_candelete(p->bctx);
851 sectsz = blockif_sectsz(p->bctx);
852 sectors = blockif_size(p->bctx) / sectsz;
853 blockif_chs(p->bctx, &cyl, &heads, &sech);
854 blockif_psectsz(p->bctx, &psectsz, &psectoff);
855 memset(buf, 0, sizeof(buf));
860 ata_string((uint8_t *)(buf+10), p->ident, 20);
861 ata_string((uint8_t *)(buf+23), "001", 8);
862 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
863 buf[47] = (0x8000 | 128);
865 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
867 buf[53] = (1 << 1 | 1 << 2);
869 buf[59] = (0x100 | p->mult_sectors);
870 if (sectors <= 0x0fffffff) {
872 buf[61] = (sectors >> 16);
878 if (p->xfermode & ATA_WDMA0)
879 buf[63] |= (1 << ((p->xfermode & 7) + 8));
887 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
889 buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
890 (p->ssts & ATA_SS_SPD_MASK) >> 3);
893 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
894 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
895 buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
896 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
898 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
899 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
900 buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
901 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
904 if (p->xfermode & ATA_UDMA0)
905 buf[88] |= (1 << ((p->xfermode & 7) + 8));
906 buf[93] = (1 | 1 <<14);
908 buf[101] = (sectors >> 16);
909 buf[102] = (sectors >> 32);
910 buf[103] = (sectors >> 48);
911 if (candelete && !ro) {
912 buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
914 buf[169] = ATA_SUPPORT_DSM_TRIM;
918 if (psectsz > sectsz) {
920 buf[106] |= ffsl(psectsz / sectsz) - 1;
921 buf[209] |= (psectoff / sectsz);
925 buf[117] = sectsz / 2;
926 buf[118] = ((sectsz / 2) >> 16);
928 buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
929 buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
932 ahci_checksum((uint8_t *)buf, sizeof(buf));
933 ahci_write_fis_piosetup(p);
934 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
935 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
940 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
943 ahci_write_fis_d2h(p, slot, cfis,
944 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
948 memset(buf, 0, sizeof(buf));
949 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
950 ata_string((uint8_t *)(buf+10), p->ident, 20);
951 ata_string((uint8_t *)(buf+23), "001", 8);
952 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
953 buf[49] = (1 << 9 | 1 << 8);
954 buf[50] = (1 << 14 | 1);
955 buf[53] = (1 << 2 | 1 << 1);
958 if (p->xfermode & ATA_WDMA0)
959 buf[63] |= (1 << ((p->xfermode & 7) + 8));
965 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
966 buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
969 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
970 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
973 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
974 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
977 if (p->xfermode & ATA_UDMA0)
978 buf[88] |= (1 << ((p->xfermode & 7) + 8));
981 ahci_checksum((uint8_t *)buf, sizeof(buf));
982 ahci_write_fis_piosetup(p);
983 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
984 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
989 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
998 if (acmd[1] & 1) { /* VPD */
999 if (acmd[2] == 0) { /* Supported VPD pages */
1007 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1009 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1010 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1011 ahci_write_fis_d2h(p, slot, cfis, tfd);
1023 atapi_string(buf + 8, "BHYVE", 8);
1024 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1025 atapi_string(buf + 32, "001", 4);
1031 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1032 write_prdt(p, slot, cfis, buf, len);
1033 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1037 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1042 sectors = blockif_size(p->bctx) / 2048;
1043 be32enc(buf, sectors - 1);
1044 be32enc(buf + 4, 2048);
1045 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1046 write_prdt(p, slot, cfis, buf, sizeof(buf));
1047 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1051 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1059 len = be16dec(acmd + 7);
1060 format = acmd[9] >> 6;
1066 uint8_t start_track, buf[20], *bp;
1068 msf = (acmd[1] >> 1) & 1;
1069 start_track = acmd[6];
1070 if (start_track > 1 && start_track != 0xaa) {
1072 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1074 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1075 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1076 ahci_write_fis_d2h(p, slot, cfis, tfd);
1082 if (start_track <= 1) {
1102 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1106 lba_to_msf(bp, sectors);
1109 be32enc(bp, sectors);
1113 be16enc(buf, size - 2);
1116 write_prdt(p, slot, cfis, buf, len);
1117 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1118 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1125 memset(buf, 0, sizeof(buf));
1129 if (len > sizeof(buf))
1131 write_prdt(p, slot, cfis, buf, len);
1132 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1133 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1140 uint8_t start_track, *bp, buf[50];
1142 msf = (acmd[1] >> 1) & 1;
1143 start_track = acmd[6];
1179 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1183 lba_to_msf(bp, sectors);
1186 be32enc(bp, sectors);
1209 be16enc(buf, size - 2);
1212 write_prdt(p, slot, cfis, buf, len);
1213 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1214 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1221 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1223 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1224 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1225 ahci_write_fis_d2h(p, slot, cfis, tfd);
1232 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1236 memset(buf, 0, sizeof(buf));
1239 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1240 write_prdt(p, slot, cfis, buf, sizeof(buf));
1241 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1245 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis,
1246 uint32_t done, int seek)
1248 struct ahci_ioreq *aior;
1249 struct ahci_cmd_hdr *hdr;
1250 struct ahci_prdt_entry *prdt;
1251 struct blockif_req *breq;
1252 struct pci_ahci_softc *sc;
1260 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1261 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1264 lba = be32dec(acmd + 2);
1265 if (acmd[0] == READ_10)
1266 len = be16dec(acmd + 7);
1268 len = be32dec(acmd + 6);
1270 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1271 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1277 * Pull request off free list
1279 aior = STAILQ_FIRST(&p->iofhd);
1280 assert(aior != NULL);
1281 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1286 breq = &aior->io_req;
1287 breq->br_offset = lba + done;
1288 iovcnt = hdr->prdtl - seek;
1289 if (iovcnt > BLOCKIF_IOV_MAX) {
1290 aior->prdtl = iovcnt - BLOCKIF_IOV_MAX;
1291 iovcnt = BLOCKIF_IOV_MAX;
1294 breq->br_iovcnt = iovcnt;
1297 * Mark this command in-flight.
1299 p->pending |= 1 << slot;
1302 * Stuff request onto busy list
1304 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1307 * Build up the iovec based on the prdt
1309 for (i = 0; i < iovcnt; i++) {
1312 dbcsz = (prdt->dbc & DBCMASK) + 1;
1313 breq->br_iov[i].iov_base = paddr_guest2host(ahci_ctx(sc),
1315 breq->br_iov[i].iov_len = dbcsz;
1316 aior->done += dbcsz;
1319 err = blockif_read(p->bctx, breq);
1324 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1332 if (len > sizeof(buf))
1334 memset(buf, 0, len);
1335 buf[0] = 0x70 | (1 << 7);
1336 buf[2] = p->sense_key;
1339 write_prdt(p, slot, cfis, buf, len);
1340 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1341 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1345 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1347 uint8_t *acmd = cfis + 0x40;
1350 switch (acmd[4] & 3) {
1354 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1355 tfd = ATA_S_READY | ATA_S_DSC;
1358 /* TODO eject media */
1359 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1360 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1362 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1365 ahci_write_fis_d2h(p, slot, cfis, tfd);
1369 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1377 len = be16dec(acmd + 7);
1379 code = acmd[2] & 0x3f;
1384 case MODEPAGE_RW_ERROR_RECOVERY:
1388 if (len > sizeof(buf))
1391 memset(buf, 0, sizeof(buf));
1392 be16enc(buf, 16 - 2);
1397 write_prdt(p, slot, cfis, buf, len);
1398 tfd = ATA_S_READY | ATA_S_DSC;
1401 case MODEPAGE_CD_CAPABILITIES:
1405 if (len > sizeof(buf))
1408 memset(buf, 0, sizeof(buf));
1409 be16enc(buf, 30 - 2);
1415 be16enc(&buf[18], 2);
1416 be16enc(&buf[20], 512);
1417 write_prdt(p, slot, cfis, buf, len);
1418 tfd = ATA_S_READY | ATA_S_DSC;
1427 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1429 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1434 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1436 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1439 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1440 ahci_write_fis_d2h(p, slot, cfis, tfd);
1444 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1452 /* we don't support asynchronous operation */
1453 if (!(acmd[1] & 1)) {
1454 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1456 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1461 len = be16dec(acmd + 7);
1462 if (len > sizeof(buf))
1465 memset(buf, 0, sizeof(buf));
1466 be16enc(buf, 8 - 2);
1470 write_prdt(p, slot, cfis, buf, len);
1471 tfd = ATA_S_READY | ATA_S_DSC;
1473 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1474 ahci_write_fis_d2h(p, slot, cfis, tfd);
1478 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1488 for (i = 0; i < 16; i++)
1489 DPRINTF("%02x ", acmd[i]);
1495 case TEST_UNIT_READY:
1496 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1497 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1500 atapi_inquiry(p, slot, cfis);
1503 atapi_read_capacity(p, slot, cfis);
1507 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1508 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1511 atapi_read_toc(p, slot, cfis);
1514 atapi_report_luns(p, slot, cfis);
1518 atapi_read(p, slot, cfis, 0, 0);
1521 atapi_request_sense(p, slot, cfis);
1523 case START_STOP_UNIT:
1524 atapi_start_stop_unit(p, slot, cfis);
1527 atapi_mode_sense(p, slot, cfis);
1529 case GET_EVENT_STATUS_NOTIFICATION:
1530 atapi_get_event_status_notification(p, slot, cfis);
1533 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1534 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1536 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1537 ATA_S_READY | ATA_S_ERROR);
1543 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1547 case ATA_ATA_IDENTIFY:
1548 handle_identify(p, slot, cfis);
1550 case ATA_SETFEATURES:
1553 case ATA_SF_ENAB_SATA_SF:
1555 case ATA_SATA_SF_AN:
1556 p->tfd = ATA_S_DSC | ATA_S_READY;
1559 p->tfd = ATA_S_ERROR | ATA_S_READY;
1560 p->tfd |= (ATA_ERROR_ABORT << 8);
1564 case ATA_SF_ENAB_WCACHE:
1565 case ATA_SF_DIS_WCACHE:
1566 case ATA_SF_ENAB_RCACHE:
1567 case ATA_SF_DIS_RCACHE:
1568 p->tfd = ATA_S_DSC | ATA_S_READY;
1570 case ATA_SF_SETXFER:
1572 switch (cfis[12] & 0xf8) {
1578 p->xfermode = (cfis[12] & 0x7);
1581 p->tfd = ATA_S_DSC | ATA_S_READY;
1585 p->tfd = ATA_S_ERROR | ATA_S_READY;
1586 p->tfd |= (ATA_ERROR_ABORT << 8);
1589 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1593 if (cfis[12] != 0 &&
1594 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1595 p->tfd = ATA_S_ERROR | ATA_S_READY;
1596 p->tfd |= (ATA_ERROR_ABORT << 8);
1598 p->mult_sectors = cfis[12];
1599 p->tfd = ATA_S_DSC | ATA_S_READY;
1601 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1609 case ATA_READ_MUL48:
1610 case ATA_WRITE_MUL48:
1613 case ATA_READ_DMA48:
1614 case ATA_WRITE_DMA48:
1615 case ATA_READ_FPDMA_QUEUED:
1616 case ATA_WRITE_FPDMA_QUEUED:
1617 ahci_handle_dma(p, slot, cfis, 0, 0);
1619 case ATA_FLUSHCACHE:
1620 case ATA_FLUSHCACHE48:
1621 ahci_handle_flush(p, slot, cfis);
1623 case ATA_DATA_SET_MANAGEMENT:
1624 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1625 cfis[13] == 0 && cfis[12] == 1) {
1626 ahci_handle_dsm_trim(p, slot, cfis, 0);
1629 ahci_write_fis_d2h(p, slot, cfis,
1630 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1632 case ATA_SEND_FPDMA_QUEUED:
1633 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1634 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1635 cfis[11] == 0 && cfis[13] == 1) {
1636 ahci_handle_dsm_trim(p, slot, cfis, 0);
1639 ahci_write_fis_d2h(p, slot, cfis,
1640 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1642 case ATA_READ_LOG_EXT:
1643 case ATA_READ_LOG_DMA_EXT:
1644 ahci_handle_read_log(p, slot, cfis);
1647 ahci_write_fis_d2h(p, slot, cfis,
1648 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1650 case ATA_STANDBY_CMD:
1651 case ATA_STANDBY_IMMEDIATE:
1653 case ATA_IDLE_IMMEDIATE:
1655 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1657 case ATA_ATAPI_IDENTIFY:
1658 handle_atapi_identify(p, slot, cfis);
1660 case ATA_PACKET_CMD:
1662 ahci_write_fis_d2h(p, slot, cfis,
1663 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1665 handle_packet_cmd(p, slot, cfis);
1668 WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
1669 ahci_write_fis_d2h(p, slot, cfis,
1670 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1676 ahci_handle_slot(struct ahci_port *p, int slot)
1678 struct ahci_cmd_hdr *hdr;
1679 struct ahci_prdt_entry *prdt;
1680 struct pci_ahci_softc *sc;
1685 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1686 cfl = (hdr->flags & 0x1f) * 4;
1687 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1688 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1689 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1693 for (i = 0; i < cfl; i++) {
1696 DPRINTF("%02x ", cfis[i]);
1700 for (i = 0; i < hdr->prdtl; i++) {
1701 DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
1706 if (cfis[0] != FIS_TYPE_REGH2D) {
1707 WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
1711 if (cfis[1] & 0x80) {
1712 ahci_handle_cmd(p, slot, cfis);
1714 if (cfis[15] & (1 << 2))
1716 else if (p->reset) {
1720 p->ci &= ~(1 << slot);
1725 ahci_handle_port(struct ahci_port *p)
1729 if (!(p->cmd & AHCI_P_CMD_ST))
1733 * Search for any new commands to issue ignoring those that
1734 * are already in-flight.
1736 for (i = 0; (i < 32) && p->ci; i++) {
1737 if ((p->ci & (1 << i)) && !(p->pending & (1 << i))) {
1738 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1739 p->cmd |= i << AHCI_P_CMD_CCS_SHIFT;
1740 ahci_handle_slot(p, i);
1746 * blockif callback routine - this runs in the context of the blockif
1747 * i/o thread, so the mutex needs to be acquired.
1750 ata_ioreq_cb(struct blockif_req *br, int err)
1752 struct ahci_cmd_hdr *hdr;
1753 struct ahci_ioreq *aior;
1754 struct ahci_port *p;
1755 struct pci_ahci_softc *sc;
1758 int pending, slot, ncq, dsm;
1760 DPRINTF("%s %d\n", __func__, err);
1763 aior = br->br_param;
1767 pending = aior->prdtl;
1769 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1771 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1772 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1773 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1775 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1776 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1777 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1780 pthread_mutex_lock(&sc->mtx);
1783 * Delete the blockif request from the busy list
1785 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1788 * Move the blockif request back to the free list
1790 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1793 hdr->prdbc = aior->done;
1796 if (aior->done != aior->len && !err) {
1797 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1801 if (pending && !err) {
1802 ahci_handle_dma(p, slot, cfis, aior->done,
1803 hdr->prdtl - pending);
1808 if (!err && aior->done == aior->len) {
1809 tfd = ATA_S_READY | ATA_S_DSC;
1811 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1815 ahci_write_fis_sdb(p, slot, cfis, tfd);
1817 ahci_write_fis_d2h(p, slot, cfis, tfd);
1820 * This command is now complete.
1822 p->pending &= ~(1 << slot);
1824 ahci_check_stopped(p);
1826 pthread_mutex_unlock(&sc->mtx);
1827 DPRINTF("%s exit\n", __func__);
1831 atapi_ioreq_cb(struct blockif_req *br, int err)
1833 struct ahci_cmd_hdr *hdr;
1834 struct ahci_ioreq *aior;
1835 struct ahci_port *p;
1836 struct pci_ahci_softc *sc;
1841 DPRINTF("%s %d\n", __func__, err);
1843 aior = br->br_param;
1847 pending = aior->prdtl;
1849 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1851 pthread_mutex_lock(&sc->mtx);
1854 * Delete the blockif request from the busy list
1856 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1859 * Move the blockif request back to the free list
1861 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1864 hdr->prdbc = aior->done;
1866 if (pending && !err) {
1867 atapi_read(p, slot, cfis, aior->done, hdr->prdtl - pending);
1871 if (!err && aior->done == aior->len) {
1872 tfd = ATA_S_READY | ATA_S_DSC;
1874 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1876 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1879 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1880 ahci_write_fis_d2h(p, slot, cfis, tfd);
1883 * This command is now complete.
1885 p->pending &= ~(1 << slot);
1887 ahci_check_stopped(p);
1889 pthread_mutex_unlock(&sc->mtx);
1890 DPRINTF("%s exit\n", __func__);
1894 pci_ahci_ioreq_init(struct ahci_port *pr)
1896 struct ahci_ioreq *vr;
1899 pr->ioqsz = blockif_queuesz(pr->bctx);
1900 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
1901 STAILQ_INIT(&pr->iofhd);
1904 * Add all i/o request entries to the free queue
1906 for (i = 0; i < pr->ioqsz; i++) {
1910 vr->io_req.br_callback = ata_ioreq_cb;
1912 vr->io_req.br_callback = atapi_ioreq_cb;
1913 vr->io_req.br_param = vr;
1914 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
1917 TAILQ_INIT(&pr->iobhd);
1921 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
1923 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
1924 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
1925 struct ahci_port *p = &sc->port[port];
1927 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
1928 port, offset, value);
1947 p->ie = value & 0xFDC000FF;
1948 ahci_generate_intr(sc);
1954 if (!(value & AHCI_P_CMD_ST)) {
1959 p->cmd |= AHCI_P_CMD_CR;
1960 clb = (uint64_t)p->clbu << 32 | p->clb;
1961 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
1962 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
1965 if (value & AHCI_P_CMD_FRE) {
1968 p->cmd |= AHCI_P_CMD_FR;
1969 fb = (uint64_t)p->fbu << 32 | p->fb;
1970 /* we don't support FBSCP, so rfis size is 256Bytes */
1971 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
1973 p->cmd &= ~AHCI_P_CMD_FR;
1976 if (value & AHCI_P_CMD_CLO) {
1978 p->cmd &= ~AHCI_P_CMD_CLO;
1981 ahci_handle_port(p);
1987 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
1991 if (!(p->cmd & AHCI_P_CMD_ST)) {
1992 if (value & ATA_SC_DET_RESET)
2004 ahci_handle_port(p);
2014 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2016 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2024 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
2027 if (value & AHCI_GHC_HR)
2029 else if (value & AHCI_GHC_IE) {
2030 sc->ghc |= AHCI_GHC_IE;
2031 ahci_generate_intr(sc);
2036 ahci_generate_intr(sc);
2044 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2045 int baridx, uint64_t offset, int size, uint64_t value)
2047 struct pci_ahci_softc *sc = pi->pi_arg;
2049 assert(baridx == 5);
2052 pthread_mutex_lock(&sc->mtx);
2054 if (offset < AHCI_OFFSET)
2055 pci_ahci_host_write(sc, offset, value);
2056 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2057 pci_ahci_port_write(sc, offset, value);
2059 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
2061 pthread_mutex_unlock(&sc->mtx);
2065 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2081 uint32_t *p = &sc->cap;
2082 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2090 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
2097 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2100 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2101 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2121 uint32_t *p= &sc->port[port].clb;
2122 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2131 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
2132 port, offset, value);
2138 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2139 uint64_t offset, int size)
2141 struct pci_ahci_softc *sc = pi->pi_arg;
2144 assert(baridx == 5);
2147 pthread_mutex_lock(&sc->mtx);
2149 if (offset < AHCI_OFFSET)
2150 value = pci_ahci_host_read(sc, offset);
2151 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2152 value = pci_ahci_port_read(sc, offset);
2155 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n", offset);
2158 pthread_mutex_unlock(&sc->mtx);
2164 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
2166 char bident[sizeof("XX:X:X")];
2167 struct blockif_ctxt *bctxt;
2168 struct pci_ahci_softc *sc;
2176 fprintf(stderr, "pci_ahci: backing device required\n");
2181 dbg = fopen("/tmp/log", "w+");
2184 sc = calloc(1, sizeof(struct pci_ahci_softc));
2187 sc->ports = MAX_PORTS;
2190 * Only use port 0 for a backing device. All other ports will be
2193 sc->port[0].atapi = atapi;
2196 * Attempt to open the backing image. Use the PCI
2197 * slot/func for the identifier string.
2199 snprintf(bident, sizeof(bident), "%d:%d", pi->pi_slot, pi->pi_func);
2200 bctxt = blockif_open(opts, bident);
2201 if (bctxt == NULL) {
2205 sc->port[0].bctx = bctxt;
2206 sc->port[0].pr_sc = sc;
2209 * Create an identifier for the backing file. Use parts of the
2210 * md5 sum of the filename
2213 MD5Update(&mdctx, opts, strlen(opts));
2214 MD5Final(digest, &mdctx);
2215 sprintf(sc->port[0].ident, "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2216 digest[0], digest[1], digest[2], digest[3], digest[4], digest[5]);
2219 * Allocate blockif request structures and add them
2222 pci_ahci_ioreq_init(&sc->port[0]);
2224 pthread_mutex_init(&sc->mtx, NULL);
2226 /* Intel ICH8 AHCI */
2227 slots = sc->port[0].ioqsz;
2231 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2232 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2233 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2234 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2235 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2237 /* Only port 0 implemented */
2240 sc->cap2 = AHCI_CAP2_APST;
2243 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2244 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2245 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2246 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2247 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2248 pci_emul_add_msicap(pi, 1);
2249 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2250 AHCI_OFFSET + sc->ports * AHCI_STEP);
2252 pci_lintr_request(pi);
2256 blockif_close(sc->port[0].bctx);
2264 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2267 return (pci_ahci_init(ctx, pi, opts, 0));
2271 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2274 return (pci_ahci_init(ctx, pi, opts, 1));
2278 * Use separate emulation names to distinguish drive and atapi devices
2280 struct pci_devemu pci_de_ahci_hd = {
2281 .pe_emu = "ahci-hd",
2282 .pe_init = pci_ahci_hd_init,
2283 .pe_barwrite = pci_ahci_write,
2284 .pe_barread = pci_ahci_read
2286 PCI_EMUL_SET(pci_de_ahci_hd);
2288 struct pci_devemu pci_de_ahci_cd = {
2289 .pe_emu = "ahci-cd",
2290 .pe_init = pci_ahci_atapi_init,
2291 .pe_barwrite = pci_ahci_write,
2292 .pe_barread = pci_ahci_read
2294 PCI_EMUL_SET(pci_de_ahci_cd);