2 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
36 #include <sys/ioctl.h>
39 #include <sys/endian.h>
51 #include <pthread_np.h>
60 #define MAX_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
62 #define PxSIG_ATA 0x00000101 /* ATA drive */
63 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
66 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
67 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
68 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
69 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
70 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
71 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
72 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
73 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
79 #define TEST_UNIT_READY 0x00
80 #define REQUEST_SENSE 0x03
82 #define START_STOP_UNIT 0x1B
83 #define PREVENT_ALLOW 0x1E
84 #define READ_CAPACITY 0x25
86 #define POSITION_TO_ELEMENT 0x2B
88 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
89 #define MODE_SENSE_10 0x5A
90 #define REPORT_LUNS 0xA0
95 * SCSI mode page codes
97 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
98 #define MODEPAGE_CD_CAPABILITIES 0x2A
103 #define ATA_SF_ENAB_SATA_SF 0x10
104 #define ATA_SATA_SF_AN 0x05
105 #define ATA_SF_DIS_SATA_SF 0x90
112 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
114 #define DPRINTF(format, arg...)
116 #define WPRINTF(format, arg...) printf(format, ##arg)
119 struct blockif_req io_req;
120 struct ahci_port *io_pr;
121 STAILQ_ENTRY(ahci_ioreq) io_flist;
122 TAILQ_ENTRY(ahci_ioreq) io_blist;
131 struct blockif_ctxt *bctx;
132 struct pci_ahci_softc *pr_sc;
140 uint8_t err_cfis[20];
166 struct ahci_ioreq *ioreq;
168 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
169 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
172 struct ahci_cmd_hdr {
177 uint32_t reserved[4];
180 struct ahci_prdt_entry {
183 #define DBCMASK 0x3fffff
187 struct pci_ahci_softc {
188 struct pci_devinst *asc_pi;
203 struct ahci_port port[MAX_PORTS];
205 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
207 static inline void lba_to_msf(uint8_t *buf, int lba)
210 buf[0] = (lba / 75) / 60;
211 buf[1] = (lba / 75) % 60;
216 * generate HBA intr depending on whether or not ports within
217 * the controller have an interrupt pending.
220 ahci_generate_intr(struct pci_ahci_softc *sc)
222 struct pci_devinst *pi;
227 for (i = 0; i < sc->ports; i++) {
228 struct ahci_port *pr;
234 DPRINTF("%s %x\n", __func__, sc->is);
236 if (sc->is && (sc->ghc & AHCI_GHC_IE)) {
237 if (pci_msi_enabled(pi)) {
239 * Generate an MSI interrupt on every edge
241 pci_generate_msi(pi, 0);
242 } else if (!sc->lintr) {
244 * Only generate a pin-based interrupt if one wasn't
248 pci_lintr_assert(pi);
250 } else if (sc->lintr) {
252 * No interrupts: deassert pin-based signal if it had
255 pci_lintr_deassert(pi);
261 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
263 int offset, len, irq;
265 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
269 case FIS_TYPE_REGD2H:
274 case FIS_TYPE_SETDEVBITS:
279 case FIS_TYPE_PIOSETUP:
285 WPRINTF("unsupported fis type %d\n", ft);
288 memcpy(p->rfis + offset, fis, len);
291 ahci_generate_intr(p->pr_sc);
296 ahci_write_fis_piosetup(struct ahci_port *p)
300 memset(fis, 0, sizeof(fis));
301 fis[0] = FIS_TYPE_PIOSETUP;
302 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
306 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
311 error = (tfd >> 8) & 0xff;
312 memset(fis, 0, sizeof(fis));
313 fis[0] = FIS_TYPE_SETDEVBITS;
317 if (fis[2] & ATA_S_ERROR) {
318 p->is |= AHCI_P_IX_TFE;
319 p->err_cfis[0] = slot;
320 p->err_cfis[2] = tfd & 0x77;
321 p->err_cfis[3] = error;
322 memcpy(&p->err_cfis[4], cfis + 4, 16);
324 *(uint32_t *)(fis + 4) = (1 << slot);
325 p->sact &= ~(1 << slot);
328 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
332 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
337 error = (tfd >> 8) & 0xff;
338 memset(fis, 0, sizeof(fis));
339 fis[0] = FIS_TYPE_REGD2H;
353 if (fis[2] & ATA_S_ERROR) {
354 p->is |= AHCI_P_IX_TFE;
355 p->err_cfis[0] = 0x80;
356 p->err_cfis[2] = tfd & 0xff;
357 p->err_cfis[3] = error;
358 memcpy(&p->err_cfis[4], cfis + 4, 16);
360 p->ci &= ~(1 << slot);
362 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
366 ahci_write_reset_fis_d2h(struct ahci_port *p)
370 memset(fis, 0, sizeof(fis));
371 fis[0] = FIS_TYPE_REGD2H;
379 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
383 ahci_check_stopped(struct ahci_port *p)
386 * If we are no longer processing the command list and nothing
387 * is in-flight, clear the running bit, the current command
388 * slot, the command issue and active bits.
390 if (!(p->cmd & AHCI_P_CMD_ST)) {
391 if (p->pending == 0) {
392 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
400 ahci_port_stop(struct ahci_port *p)
402 struct ahci_ioreq *aior;
408 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
410 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
412 * Try to cancel the outstanding blockif request.
414 error = blockif_cancel(p->bctx, &aior->io_req);
420 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
421 cfis[2] == ATA_READ_FPDMA_QUEUED ||
422 cfis[2] == ATA_SEND_FPDMA_QUEUED)
426 p->sact &= ~(1 << slot);
428 p->ci &= ~(1 << slot);
431 * This command is now done.
433 p->pending &= ~(1 << slot);
436 * Delete the blockif request from the busy list
438 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
441 * Move the blockif request back to the free list
443 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
446 ahci_check_stopped(p);
450 ahci_port_reset(struct ahci_port *pr)
454 pr->xfermode = ATA_UDMA6;
455 pr->mult_sectors = 128;
458 pr->ssts = ATA_SS_DET_NO_DEVICE;
459 pr->sig = 0xFFFFFFFF;
463 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
464 if (pr->sctl & ATA_SC_SPD_MASK)
465 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
467 pr->ssts |= ATA_SS_SPD_GEN3;
468 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
471 pr->tfd |= ATA_S_READY;
473 pr->sig = PxSIG_ATAPI;
474 ahci_write_reset_fis_d2h(pr);
478 ahci_reset(struct pci_ahci_softc *sc)
482 sc->ghc = AHCI_GHC_AE;
486 pci_lintr_deassert(sc->asc_pi);
490 for (i = 0; i < sc->ports; i++) {
493 sc->port[i].cmd = (AHCI_P_CMD_SUD | AHCI_P_CMD_POD);
494 if (sc->port[i].bctx)
495 sc->port[i].cmd |= AHCI_P_CMD_CPS;
496 sc->port[i].sctl = 0;
497 ahci_port_reset(&sc->port[i]);
502 ata_string(uint8_t *dest, const char *src, int len)
506 for (i = 0; i < len; i++) {
508 dest[i ^ 1] = *src++;
515 atapi_string(uint8_t *dest, const char *src, int len)
519 for (i = 0; i < len; i++) {
528 * Build up the iovec based on the PRDT, 'done' and 'len'.
531 ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
532 struct ahci_prdt_entry *prdt, uint16_t prdtl)
534 struct blockif_req *breq = &aior->io_req;
535 int i, j, skip, todo, left, extra;
538 /* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
540 left = aior->len - aior->done;
542 for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
544 dbcsz = (prdt->dbc & DBCMASK) + 1;
545 /* Skip already done part of the PRDT */
553 breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
554 prdt->dba + skip, dbcsz);
555 breq->br_iov[j].iov_len = dbcsz;
562 /* If we got limited by IOV length, round I/O down to sector size. */
563 if (j == BLOCKIF_IOV_MAX) {
564 extra = todo % blockif_sectsz(p->bctx);
568 if (breq->br_iov[j - 1].iov_len > extra) {
569 breq->br_iov[j - 1].iov_len -= extra;
572 extra -= breq->br_iov[j - 1].iov_len;
578 breq->br_resid = todo;
580 aior->more = (aior->done < aior->len && i < prdtl);
584 ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
586 struct ahci_ioreq *aior;
587 struct blockif_req *breq;
588 struct ahci_prdt_entry *prdt;
589 struct ahci_cmd_hdr *hdr;
592 int err, ncq, readop;
594 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
595 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
599 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
600 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
601 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
602 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
605 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
606 cfis[2] == ATA_READ_FPDMA_QUEUED) {
607 lba = ((uint64_t)cfis[10] << 40) |
608 ((uint64_t)cfis[9] << 32) |
609 ((uint64_t)cfis[8] << 24) |
610 ((uint64_t)cfis[6] << 16) |
611 ((uint64_t)cfis[5] << 8) |
613 len = cfis[11] << 8 | cfis[3];
617 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
618 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
619 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
620 lba = ((uint64_t)cfis[10] << 40) |
621 ((uint64_t)cfis[9] << 32) |
622 ((uint64_t)cfis[8] << 24) |
623 ((uint64_t)cfis[6] << 16) |
624 ((uint64_t)cfis[5] << 8) |
626 len = cfis[13] << 8 | cfis[12];
630 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
631 (cfis[5] << 8) | cfis[4];
636 lba *= blockif_sectsz(p->bctx);
637 len *= blockif_sectsz(p->bctx);
639 /* Pull request off free list */
640 aior = STAILQ_FIRST(&p->iofhd);
641 assert(aior != NULL);
642 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
648 breq = &aior->io_req;
649 breq->br_offset = lba + done;
650 ahci_build_iov(p, aior, prdt, hdr->prdtl);
652 /* Mark this command in-flight. */
653 p->pending |= 1 << slot;
655 /* Stuff request onto busy list. */
656 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
659 err = blockif_read(p->bctx, breq);
661 err = blockif_write(p->bctx, breq);
665 p->ci &= ~(1 << slot);
669 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
671 struct ahci_ioreq *aior;
672 struct blockif_req *breq;
676 * Pull request off free list
678 aior = STAILQ_FIRST(&p->iofhd);
679 assert(aior != NULL);
680 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
686 breq = &aior->io_req;
689 * Mark this command in-flight.
691 p->pending |= 1 << slot;
694 * Stuff request onto busy list
696 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
698 err = blockif_flush(p->bctx, breq);
703 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
706 struct ahci_cmd_hdr *hdr;
707 struct ahci_prdt_entry *prdt;
711 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
714 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
715 for (i = 0; i < hdr->prdtl && len; i++) {
720 dbcsz = (prdt->dbc & DBCMASK) + 1;
721 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
722 sublen = len < dbcsz ? len : dbcsz;
723 memcpy(to, ptr, sublen);
731 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
733 struct ahci_ioreq *aior;
734 struct blockif_req *breq;
741 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
742 len = (uint16_t)cfis[13] << 8 | cfis[12];
744 } else { /* ATA_SEND_FPDMA_QUEUED */
745 len = (uint16_t)cfis[11] << 8 | cfis[3];
748 read_prdt(p, slot, cfis, buf, sizeof(buf));
752 elba = ((uint64_t)entry[5] << 40) |
753 ((uint64_t)entry[4] << 32) |
754 ((uint64_t)entry[3] << 24) |
755 ((uint64_t)entry[2] << 16) |
756 ((uint64_t)entry[1] << 8) |
758 elen = (uint16_t)entry[7] << 8 | entry[6];
762 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
763 p->pending &= ~(1 << slot);
764 ahci_check_stopped(p);
771 * Pull request off free list
773 aior = STAILQ_FIRST(&p->iofhd);
774 assert(aior != NULL);
775 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
780 aior->more = (len != done);
782 breq = &aior->io_req;
783 breq->br_offset = elba * blockif_sectsz(p->bctx);
784 breq->br_resid = elen * blockif_sectsz(p->bctx);
787 * Mark this command in-flight.
789 p->pending |= 1 << slot;
792 * Stuff request onto busy list
794 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
796 err = blockif_delete(p->bctx, breq);
801 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
804 struct ahci_cmd_hdr *hdr;
805 struct ahci_prdt_entry *prdt;
809 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
812 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
813 for (i = 0; i < hdr->prdtl && len; i++) {
818 dbcsz = (prdt->dbc & DBCMASK) + 1;
819 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
820 sublen = len < dbcsz ? len : dbcsz;
821 memcpy(ptr, from, sublen);
826 hdr->prdbc = size - len;
830 ahci_checksum(uint8_t *buf, int size)
835 for (i = 0; i < size - 1; i++)
837 buf[size - 1] = 0x100 - sum;
841 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
843 struct ahci_cmd_hdr *hdr;
846 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
847 if (p->atapi || hdr->prdtl == 0 || cfis[4] != 0x10 ||
848 cfis[5] != 0 || cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
849 ahci_write_fis_d2h(p, slot, cfis,
850 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
854 memset(buf, 0, sizeof(buf));
855 memcpy(buf, p->err_cfis, sizeof(p->err_cfis));
856 ahci_checksum(buf, sizeof(buf));
858 if (cfis[2] == ATA_READ_LOG_EXT)
859 ahci_write_fis_piosetup(p);
860 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
861 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
865 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
867 struct ahci_cmd_hdr *hdr;
869 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
870 if (p->atapi || hdr->prdtl == 0) {
871 ahci_write_fis_d2h(p, slot, cfis,
872 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
876 int sectsz, psectsz, psectoff, candelete, ro;
880 ro = blockif_is_ro(p->bctx);
881 candelete = blockif_candelete(p->bctx);
882 sectsz = blockif_sectsz(p->bctx);
883 sectors = blockif_size(p->bctx) / sectsz;
884 blockif_chs(p->bctx, &cyl, &heads, &sech);
885 blockif_psectsz(p->bctx, &psectsz, &psectoff);
886 memset(buf, 0, sizeof(buf));
891 ata_string((uint8_t *)(buf+10), p->ident, 20);
892 ata_string((uint8_t *)(buf+23), "001", 8);
893 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
894 buf[47] = (0x8000 | 128);
896 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
898 buf[53] = (1 << 1 | 1 << 2);
900 buf[59] = (0x100 | p->mult_sectors);
901 if (sectors <= 0x0fffffff) {
903 buf[61] = (sectors >> 16);
909 if (p->xfermode & ATA_WDMA0)
910 buf[63] |= (1 << ((p->xfermode & 7) + 8));
918 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
920 buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
921 (p->ssts & ATA_SS_SPD_MASK) >> 3);
924 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
925 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
926 buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
927 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
929 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
930 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
931 buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
932 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
935 if (p->xfermode & ATA_UDMA0)
936 buf[88] |= (1 << ((p->xfermode & 7) + 8));
938 buf[101] = (sectors >> 16);
939 buf[102] = (sectors >> 32);
940 buf[103] = (sectors >> 48);
941 if (candelete && !ro) {
942 buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
944 buf[169] = ATA_SUPPORT_DSM_TRIM;
948 if (psectsz > sectsz) {
950 buf[106] |= ffsl(psectsz / sectsz) - 1;
951 buf[209] |= (psectoff / sectsz);
955 buf[117] = sectsz / 2;
956 buf[118] = ((sectsz / 2) >> 16);
958 buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
959 buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
962 ahci_checksum((uint8_t *)buf, sizeof(buf));
963 ahci_write_fis_piosetup(p);
964 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
965 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
970 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
973 ahci_write_fis_d2h(p, slot, cfis,
974 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
978 memset(buf, 0, sizeof(buf));
979 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
980 ata_string((uint8_t *)(buf+10), p->ident, 20);
981 ata_string((uint8_t *)(buf+23), "001", 8);
982 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
983 buf[49] = (1 << 9 | 1 << 8);
984 buf[50] = (1 << 14 | 1);
985 buf[53] = (1 << 2 | 1 << 1);
988 if (p->xfermode & ATA_WDMA0)
989 buf[63] |= (1 << ((p->xfermode & 7) + 8));
995 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
996 buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
999 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1000 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1001 buf[83] = (1 << 14);
1002 buf[84] = (1 << 14);
1003 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1004 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1005 buf[87] = (1 << 14);
1007 if (p->xfermode & ATA_UDMA0)
1008 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1011 ahci_checksum((uint8_t *)buf, sizeof(buf));
1012 ahci_write_fis_piosetup(p);
1013 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1014 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1019 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
1028 if (acmd[1] & 1) { /* VPD */
1029 if (acmd[2] == 0) { /* Supported VPD pages */
1037 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1039 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1040 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1041 ahci_write_fis_d2h(p, slot, cfis, tfd);
1053 atapi_string(buf + 8, "BHYVE", 8);
1054 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1055 atapi_string(buf + 32, "001", 4);
1061 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1062 write_prdt(p, slot, cfis, buf, len);
1063 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1067 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1072 sectors = blockif_size(p->bctx) / 2048;
1073 be32enc(buf, sectors - 1);
1074 be32enc(buf + 4, 2048);
1075 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1076 write_prdt(p, slot, cfis, buf, sizeof(buf));
1077 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1081 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1089 len = be16dec(acmd + 7);
1090 format = acmd[9] >> 6;
1096 uint8_t start_track, buf[20], *bp;
1098 msf = (acmd[1] >> 1) & 1;
1099 start_track = acmd[6];
1100 if (start_track > 1 && start_track != 0xaa) {
1102 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1104 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1105 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1106 ahci_write_fis_d2h(p, slot, cfis, tfd);
1112 if (start_track <= 1) {
1132 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1136 lba_to_msf(bp, sectors);
1139 be32enc(bp, sectors);
1143 be16enc(buf, size - 2);
1146 write_prdt(p, slot, cfis, buf, len);
1147 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1148 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1155 memset(buf, 0, sizeof(buf));
1159 if (len > sizeof(buf))
1161 write_prdt(p, slot, cfis, buf, len);
1162 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1163 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1170 uint8_t start_track, *bp, buf[50];
1172 msf = (acmd[1] >> 1) & 1;
1173 start_track = acmd[6];
1209 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1213 lba_to_msf(bp, sectors);
1216 be32enc(bp, sectors);
1239 be16enc(buf, size - 2);
1242 write_prdt(p, slot, cfis, buf, len);
1243 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1244 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1251 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1253 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1254 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1255 ahci_write_fis_d2h(p, slot, cfis, tfd);
1262 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1266 memset(buf, 0, sizeof(buf));
1269 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1270 write_prdt(p, slot, cfis, buf, sizeof(buf));
1271 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1275 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
1277 struct ahci_ioreq *aior;
1278 struct ahci_cmd_hdr *hdr;
1279 struct ahci_prdt_entry *prdt;
1280 struct blockif_req *breq;
1281 struct pci_ahci_softc *sc;
1289 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1290 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1292 lba = be32dec(acmd + 2);
1293 if (acmd[0] == READ_10)
1294 len = be16dec(acmd + 7);
1296 len = be32dec(acmd + 6);
1298 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1299 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1305 * Pull request off free list
1307 aior = STAILQ_FIRST(&p->iofhd);
1308 assert(aior != NULL);
1309 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1314 breq = &aior->io_req;
1315 breq->br_offset = lba + done;
1316 ahci_build_iov(p, aior, prdt, hdr->prdtl);
1318 /* Mark this command in-flight. */
1319 p->pending |= 1 << slot;
1321 /* Stuff request onto busy list. */
1322 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1324 err = blockif_read(p->bctx, breq);
1329 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1337 if (len > sizeof(buf))
1339 memset(buf, 0, len);
1340 buf[0] = 0x70 | (1 << 7);
1341 buf[2] = p->sense_key;
1344 write_prdt(p, slot, cfis, buf, len);
1345 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1346 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1350 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1352 uint8_t *acmd = cfis + 0x40;
1355 switch (acmd[4] & 3) {
1359 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1360 tfd = ATA_S_READY | ATA_S_DSC;
1363 /* TODO eject media */
1364 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1365 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1367 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1370 ahci_write_fis_d2h(p, slot, cfis, tfd);
1374 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1382 len = be16dec(acmd + 7);
1384 code = acmd[2] & 0x3f;
1389 case MODEPAGE_RW_ERROR_RECOVERY:
1393 if (len > sizeof(buf))
1396 memset(buf, 0, sizeof(buf));
1397 be16enc(buf, 16 - 2);
1402 write_prdt(p, slot, cfis, buf, len);
1403 tfd = ATA_S_READY | ATA_S_DSC;
1406 case MODEPAGE_CD_CAPABILITIES:
1410 if (len > sizeof(buf))
1413 memset(buf, 0, sizeof(buf));
1414 be16enc(buf, 30 - 2);
1420 be16enc(&buf[18], 2);
1421 be16enc(&buf[20], 512);
1422 write_prdt(p, slot, cfis, buf, len);
1423 tfd = ATA_S_READY | ATA_S_DSC;
1432 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1434 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1439 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1441 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1444 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1445 ahci_write_fis_d2h(p, slot, cfis, tfd);
1449 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1457 /* we don't support asynchronous operation */
1458 if (!(acmd[1] & 1)) {
1459 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1461 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1466 len = be16dec(acmd + 7);
1467 if (len > sizeof(buf))
1470 memset(buf, 0, sizeof(buf));
1471 be16enc(buf, 8 - 2);
1475 write_prdt(p, slot, cfis, buf, len);
1476 tfd = ATA_S_READY | ATA_S_DSC;
1478 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1479 ahci_write_fis_d2h(p, slot, cfis, tfd);
1483 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1493 for (i = 0; i < 16; i++)
1494 DPRINTF("%02x ", acmd[i]);
1500 case TEST_UNIT_READY:
1501 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1502 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1505 atapi_inquiry(p, slot, cfis);
1508 atapi_read_capacity(p, slot, cfis);
1512 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1513 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1516 atapi_read_toc(p, slot, cfis);
1519 atapi_report_luns(p, slot, cfis);
1523 atapi_read(p, slot, cfis, 0);
1526 atapi_request_sense(p, slot, cfis);
1528 case START_STOP_UNIT:
1529 atapi_start_stop_unit(p, slot, cfis);
1532 atapi_mode_sense(p, slot, cfis);
1534 case GET_EVENT_STATUS_NOTIFICATION:
1535 atapi_get_event_status_notification(p, slot, cfis);
1538 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1539 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1541 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1542 ATA_S_READY | ATA_S_ERROR);
1548 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1552 case ATA_ATA_IDENTIFY:
1553 handle_identify(p, slot, cfis);
1555 case ATA_SETFEATURES:
1558 case ATA_SF_ENAB_SATA_SF:
1560 case ATA_SATA_SF_AN:
1561 p->tfd = ATA_S_DSC | ATA_S_READY;
1564 p->tfd = ATA_S_ERROR | ATA_S_READY;
1565 p->tfd |= (ATA_ERROR_ABORT << 8);
1569 case ATA_SF_ENAB_WCACHE:
1570 case ATA_SF_DIS_WCACHE:
1571 case ATA_SF_ENAB_RCACHE:
1572 case ATA_SF_DIS_RCACHE:
1573 p->tfd = ATA_S_DSC | ATA_S_READY;
1575 case ATA_SF_SETXFER:
1577 switch (cfis[12] & 0xf8) {
1583 p->xfermode = (cfis[12] & 0x7);
1586 p->tfd = ATA_S_DSC | ATA_S_READY;
1590 p->tfd = ATA_S_ERROR | ATA_S_READY;
1591 p->tfd |= (ATA_ERROR_ABORT << 8);
1594 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1598 if (cfis[12] != 0 &&
1599 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1600 p->tfd = ATA_S_ERROR | ATA_S_READY;
1601 p->tfd |= (ATA_ERROR_ABORT << 8);
1603 p->mult_sectors = cfis[12];
1604 p->tfd = ATA_S_DSC | ATA_S_READY;
1606 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1614 case ATA_READ_MUL48:
1615 case ATA_WRITE_MUL48:
1618 case ATA_READ_DMA48:
1619 case ATA_WRITE_DMA48:
1620 case ATA_READ_FPDMA_QUEUED:
1621 case ATA_WRITE_FPDMA_QUEUED:
1622 ahci_handle_rw(p, slot, cfis, 0);
1624 case ATA_FLUSHCACHE:
1625 case ATA_FLUSHCACHE48:
1626 ahci_handle_flush(p, slot, cfis);
1628 case ATA_DATA_SET_MANAGEMENT:
1629 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1630 cfis[13] == 0 && cfis[12] == 1) {
1631 ahci_handle_dsm_trim(p, slot, cfis, 0);
1634 ahci_write_fis_d2h(p, slot, cfis,
1635 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1637 case ATA_SEND_FPDMA_QUEUED:
1638 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1639 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1640 cfis[11] == 0 && cfis[13] == 1) {
1641 ahci_handle_dsm_trim(p, slot, cfis, 0);
1644 ahci_write_fis_d2h(p, slot, cfis,
1645 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1647 case ATA_READ_LOG_EXT:
1648 case ATA_READ_LOG_DMA_EXT:
1649 ahci_handle_read_log(p, slot, cfis);
1652 ahci_write_fis_d2h(p, slot, cfis,
1653 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1655 case ATA_STANDBY_CMD:
1656 case ATA_STANDBY_IMMEDIATE:
1658 case ATA_IDLE_IMMEDIATE:
1660 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1662 case ATA_ATAPI_IDENTIFY:
1663 handle_atapi_identify(p, slot, cfis);
1665 case ATA_PACKET_CMD:
1667 ahci_write_fis_d2h(p, slot, cfis,
1668 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1670 handle_packet_cmd(p, slot, cfis);
1673 WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
1674 ahci_write_fis_d2h(p, slot, cfis,
1675 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1681 ahci_handle_slot(struct ahci_port *p, int slot)
1683 struct ahci_cmd_hdr *hdr;
1684 struct ahci_prdt_entry *prdt;
1685 struct pci_ahci_softc *sc;
1690 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1691 cfl = (hdr->flags & 0x1f) * 4;
1692 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1693 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1694 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1698 for (i = 0; i < cfl; i++) {
1701 DPRINTF("%02x ", cfis[i]);
1705 for (i = 0; i < hdr->prdtl; i++) {
1706 DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
1711 if (cfis[0] != FIS_TYPE_REGH2D) {
1712 WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
1716 if (cfis[1] & 0x80) {
1717 ahci_handle_cmd(p, slot, cfis);
1719 if (cfis[15] & (1 << 2))
1721 else if (p->reset) {
1725 p->ci &= ~(1 << slot);
1730 ahci_handle_port(struct ahci_port *p)
1734 if (!(p->cmd & AHCI_P_CMD_ST))
1738 * Search for any new commands to issue ignoring those that
1739 * are already in-flight.
1741 for (i = 0; (i < 32) && p->ci; i++) {
1742 if ((p->ci & (1 << i)) && !(p->pending & (1 << i))) {
1743 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1744 p->cmd |= i << AHCI_P_CMD_CCS_SHIFT;
1745 ahci_handle_slot(p, i);
1751 * blockif callback routine - this runs in the context of the blockif
1752 * i/o thread, so the mutex needs to be acquired.
1755 ata_ioreq_cb(struct blockif_req *br, int err)
1757 struct ahci_cmd_hdr *hdr;
1758 struct ahci_ioreq *aior;
1759 struct ahci_port *p;
1760 struct pci_ahci_softc *sc;
1765 DPRINTF("%s %d\n", __func__, err);
1768 aior = br->br_param;
1773 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1775 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1776 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1777 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1779 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1780 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1781 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1784 pthread_mutex_lock(&sc->mtx);
1787 * Delete the blockif request from the busy list
1789 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1792 * Move the blockif request back to the free list
1794 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1797 hdr->prdbc = aior->done;
1799 if (!err && aior->more) {
1801 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1803 ahci_handle_rw(p, slot, cfis, aior->done);
1808 tfd = ATA_S_READY | ATA_S_DSC;
1810 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1812 ahci_write_fis_sdb(p, slot, cfis, tfd);
1814 ahci_write_fis_d2h(p, slot, cfis, tfd);
1817 * This command is now complete.
1819 p->pending &= ~(1 << slot);
1821 ahci_check_stopped(p);
1823 pthread_mutex_unlock(&sc->mtx);
1824 DPRINTF("%s exit\n", __func__);
1828 atapi_ioreq_cb(struct blockif_req *br, int err)
1830 struct ahci_cmd_hdr *hdr;
1831 struct ahci_ioreq *aior;
1832 struct ahci_port *p;
1833 struct pci_ahci_softc *sc;
1838 DPRINTF("%s %d\n", __func__, err);
1840 aior = br->br_param;
1845 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1847 pthread_mutex_lock(&sc->mtx);
1850 * Delete the blockif request from the busy list
1852 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1855 * Move the blockif request back to the free list
1857 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1860 hdr->prdbc = aior->done;
1862 if (!err && aior->more) {
1863 atapi_read(p, slot, cfis, aior->done);
1868 tfd = ATA_S_READY | ATA_S_DSC;
1870 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1872 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1874 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1875 ahci_write_fis_d2h(p, slot, cfis, tfd);
1878 * This command is now complete.
1880 p->pending &= ~(1 << slot);
1882 ahci_check_stopped(p);
1884 pthread_mutex_unlock(&sc->mtx);
1885 DPRINTF("%s exit\n", __func__);
1889 pci_ahci_ioreq_init(struct ahci_port *pr)
1891 struct ahci_ioreq *vr;
1894 pr->ioqsz = blockif_queuesz(pr->bctx);
1895 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
1896 STAILQ_INIT(&pr->iofhd);
1899 * Add all i/o request entries to the free queue
1901 for (i = 0; i < pr->ioqsz; i++) {
1905 vr->io_req.br_callback = ata_ioreq_cb;
1907 vr->io_req.br_callback = atapi_ioreq_cb;
1908 vr->io_req.br_param = vr;
1909 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
1912 TAILQ_INIT(&pr->iobhd);
1916 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
1918 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
1919 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
1920 struct ahci_port *p = &sc->port[port];
1922 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
1923 port, offset, value);
1942 p->ie = value & 0xFDC000FF;
1943 ahci_generate_intr(sc);
1947 p->cmd &= ~(AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
1948 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
1949 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
1950 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK);
1951 p->cmd |= (AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
1952 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
1953 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
1954 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK) & value;
1956 if (!(value & AHCI_P_CMD_ST)) {
1961 p->cmd |= AHCI_P_CMD_CR;
1962 clb = (uint64_t)p->clbu << 32 | p->clb;
1963 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
1964 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
1967 if (value & AHCI_P_CMD_FRE) {
1970 p->cmd |= AHCI_P_CMD_FR;
1971 fb = (uint64_t)p->fbu << 32 | p->fb;
1972 /* we don't support FBSCP, so rfis size is 256Bytes */
1973 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
1975 p->cmd &= ~AHCI_P_CMD_FR;
1978 if (value & AHCI_P_CMD_CLO) {
1980 p->cmd &= ~AHCI_P_CMD_CLO;
1983 if (value & AHCI_P_CMD_ICC_MASK) {
1984 p->cmd &= ~AHCI_P_CMD_ICC_MASK;
1987 ahci_handle_port(p);
1993 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
1997 if (!(p->cmd & AHCI_P_CMD_ST)) {
1998 if (value & ATA_SC_DET_RESET)
2010 ahci_handle_port(p);
2020 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2022 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2030 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
2033 if (value & AHCI_GHC_HR)
2035 else if (value & AHCI_GHC_IE) {
2036 sc->ghc |= AHCI_GHC_IE;
2037 ahci_generate_intr(sc);
2042 ahci_generate_intr(sc);
2050 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2051 int baridx, uint64_t offset, int size, uint64_t value)
2053 struct pci_ahci_softc *sc = pi->pi_arg;
2055 assert(baridx == 5);
2058 pthread_mutex_lock(&sc->mtx);
2060 if (offset < AHCI_OFFSET)
2061 pci_ahci_host_write(sc, offset, value);
2062 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2063 pci_ahci_port_write(sc, offset, value);
2065 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
2067 pthread_mutex_unlock(&sc->mtx);
2071 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2087 uint32_t *p = &sc->cap;
2088 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2096 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
2103 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2106 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2107 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2127 uint32_t *p= &sc->port[port].clb;
2128 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2137 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
2138 port, offset, value);
2144 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2145 uint64_t offset, int size)
2147 struct pci_ahci_softc *sc = pi->pi_arg;
2150 assert(baridx == 5);
2153 pthread_mutex_lock(&sc->mtx);
2155 if (offset < AHCI_OFFSET)
2156 value = pci_ahci_host_read(sc, offset);
2157 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2158 value = pci_ahci_port_read(sc, offset);
2161 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n", offset);
2164 pthread_mutex_unlock(&sc->mtx);
2170 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
2172 char bident[sizeof("XX:X:X")];
2173 struct blockif_ctxt *bctxt;
2174 struct pci_ahci_softc *sc;
2182 fprintf(stderr, "pci_ahci: backing device required\n");
2187 dbg = fopen("/tmp/log", "w+");
2190 sc = calloc(1, sizeof(struct pci_ahci_softc));
2193 sc->ports = MAX_PORTS;
2196 * Only use port 0 for a backing device. All other ports will be
2199 sc->port[0].atapi = atapi;
2202 * Attempt to open the backing image. Use the PCI
2203 * slot/func for the identifier string.
2205 snprintf(bident, sizeof(bident), "%d:%d", pi->pi_slot, pi->pi_func);
2206 bctxt = blockif_open(opts, bident);
2207 if (bctxt == NULL) {
2211 sc->port[0].bctx = bctxt;
2212 sc->port[0].pr_sc = sc;
2215 * Create an identifier for the backing file. Use parts of the
2216 * md5 sum of the filename
2219 MD5Update(&mdctx, opts, strlen(opts));
2220 MD5Final(digest, &mdctx);
2221 sprintf(sc->port[0].ident, "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2222 digest[0], digest[1], digest[2], digest[3], digest[4], digest[5]);
2225 * Allocate blockif request structures and add them
2228 pci_ahci_ioreq_init(&sc->port[0]);
2230 pthread_mutex_init(&sc->mtx, NULL);
2232 /* Intel ICH8 AHCI */
2233 slots = sc->port[0].ioqsz;
2237 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2238 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2239 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2240 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2241 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2243 /* Only port 0 implemented */
2246 sc->cap2 = AHCI_CAP2_APST;
2249 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2250 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2251 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2252 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2253 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2254 pci_emul_add_msicap(pi, 1);
2255 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2256 AHCI_OFFSET + sc->ports * AHCI_STEP);
2258 pci_lintr_request(pi);
2262 blockif_close(sc->port[0].bctx);
2270 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2273 return (pci_ahci_init(ctx, pi, opts, 0));
2277 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2280 return (pci_ahci_init(ctx, pi, opts, 1));
2284 * Use separate emulation names to distinguish drive and atapi devices
2286 struct pci_devemu pci_de_ahci_hd = {
2287 .pe_emu = "ahci-hd",
2288 .pe_init = pci_ahci_hd_init,
2289 .pe_barwrite = pci_ahci_write,
2290 .pe_barread = pci_ahci_read
2292 PCI_EMUL_SET(pci_de_ahci_hd);
2294 struct pci_devemu pci_de_ahci_cd = {
2295 .pe_emu = "ahci-cd",
2296 .pe_init = pci_ahci_atapi_init,
2297 .pe_barwrite = pci_ahci_write,
2298 .pe_barread = pci_ahci_read
2300 PCI_EMUL_SET(pci_de_ahci_cd);