2 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
36 #include <sys/ioctl.h>
39 #include <sys/endian.h>
51 #include <pthread_np.h>
60 #define MAX_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
62 #define PxSIG_ATA 0x00000101 /* ATA drive */
63 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
66 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
67 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
68 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
69 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
70 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
71 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
72 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
73 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
79 #define TEST_UNIT_READY 0x00
80 #define REQUEST_SENSE 0x03
82 #define START_STOP_UNIT 0x1B
83 #define PREVENT_ALLOW 0x1E
84 #define READ_CAPACITY 0x25
86 #define POSITION_TO_ELEMENT 0x2B
88 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
89 #define MODE_SENSE_10 0x5A
90 #define REPORT_LUNS 0xA0
95 * SCSI mode page codes
97 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
98 #define MODEPAGE_CD_CAPABILITIES 0x2A
103 #define ATA_SF_ENAB_SATA_SF 0x10
104 #define ATA_SATA_SF_AN 0x05
105 #define ATA_SF_DIS_SATA_SF 0x90
112 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
114 #define DPRINTF(format, arg...)
116 #define WPRINTF(format, arg...) printf(format, ##arg)
119 struct blockif_req io_req;
120 struct ahci_port *io_pr;
121 STAILQ_ENTRY(ahci_ioreq) io_flist;
122 TAILQ_ENTRY(ahci_ioreq) io_blist;
131 struct blockif_ctxt *bctx;
132 struct pci_ahci_softc *pr_sc;
141 uint8_t err_cfis[20];
168 struct ahci_ioreq *ioreq;
170 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
171 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
174 struct ahci_cmd_hdr {
179 uint32_t reserved[4];
182 struct ahci_prdt_entry {
185 #define DBCMASK 0x3fffff
189 struct pci_ahci_softc {
190 struct pci_devinst *asc_pi;
205 struct ahci_port port[MAX_PORTS];
207 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
209 static void ahci_handle_port(struct ahci_port *p);
211 static inline void lba_to_msf(uint8_t *buf, int lba)
214 buf[0] = (lba / 75) / 60;
215 buf[1] = (lba / 75) % 60;
220 * generate HBA intr depending on whether or not ports within
221 * the controller have an interrupt pending.
224 ahci_generate_intr(struct pci_ahci_softc *sc)
226 struct pci_devinst *pi;
231 for (i = 0; i < sc->ports; i++) {
232 struct ahci_port *pr;
238 DPRINTF("%s %x\n", __func__, sc->is);
240 if (sc->is && (sc->ghc & AHCI_GHC_IE)) {
241 if (pci_msi_enabled(pi)) {
243 * Generate an MSI interrupt on every edge
245 pci_generate_msi(pi, 0);
246 } else if (!sc->lintr) {
248 * Only generate a pin-based interrupt if one wasn't
252 pci_lintr_assert(pi);
254 } else if (sc->lintr) {
256 * No interrupts: deassert pin-based signal if it had
259 pci_lintr_deassert(pi);
265 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
267 int offset, len, irq;
269 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
273 case FIS_TYPE_REGD2H:
276 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_DHR : 0;
278 case FIS_TYPE_SETDEVBITS:
281 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_SDB : 0;
283 case FIS_TYPE_PIOSETUP:
286 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_PS : 0;
289 WPRINTF("unsupported fis type %d\n", ft);
292 if (fis[2] & ATA_S_ERROR) {
294 irq |= AHCI_P_IX_TFE;
296 memcpy(p->rfis + offset, fis, len);
299 ahci_generate_intr(p->pr_sc);
304 ahci_write_fis_piosetup(struct ahci_port *p)
308 memset(fis, 0, sizeof(fis));
309 fis[0] = FIS_TYPE_PIOSETUP;
310 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
314 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
319 error = (tfd >> 8) & 0xff;
321 memset(fis, 0, sizeof(fis));
322 fis[0] = FIS_TYPE_SETDEVBITS;
326 if (fis[2] & ATA_S_ERROR) {
327 p->err_cfis[0] = slot;
328 p->err_cfis[2] = tfd;
329 p->err_cfis[3] = error;
330 memcpy(&p->err_cfis[4], cfis + 4, 16);
332 *(uint32_t *)(fis + 4) = (1 << slot);
333 p->sact &= ~(1 << slot);
337 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
341 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
346 error = (tfd >> 8) & 0xff;
347 memset(fis, 0, sizeof(fis));
348 fis[0] = FIS_TYPE_REGD2H;
362 if (fis[2] & ATA_S_ERROR) {
363 p->err_cfis[0] = 0x80;
364 p->err_cfis[2] = tfd & 0xff;
365 p->err_cfis[3] = error;
366 memcpy(&p->err_cfis[4], cfis + 4, 16);
368 p->ci &= ~(1 << slot);
370 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
374 ahci_write_fis_d2h_ncq(struct ahci_port *p, int slot)
378 p->tfd = ATA_S_READY | ATA_S_DSC;
379 memset(fis, 0, sizeof(fis));
380 fis[0] = FIS_TYPE_REGD2H;
381 fis[1] = 0; /* No interrupt */
382 fis[2] = p->tfd; /* Status */
383 fis[3] = 0; /* No error */
384 p->ci &= ~(1 << slot);
385 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
389 ahci_write_reset_fis_d2h(struct ahci_port *p)
393 memset(fis, 0, sizeof(fis));
394 fis[0] = FIS_TYPE_REGD2H;
402 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
406 ahci_check_stopped(struct ahci_port *p)
409 * If we are no longer processing the command list and nothing
410 * is in-flight, clear the running bit, the current command
411 * slot, the command issue and active bits.
413 if (!(p->cmd & AHCI_P_CMD_ST)) {
414 if (p->pending == 0) {
416 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
425 ahci_port_stop(struct ahci_port *p)
427 struct ahci_ioreq *aior;
433 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
435 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
437 * Try to cancel the outstanding blockif request.
439 error = blockif_cancel(p->bctx, &aior->io_req);
445 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
446 cfis[2] == ATA_READ_FPDMA_QUEUED ||
447 cfis[2] == ATA_SEND_FPDMA_QUEUED)
451 p->sact &= ~(1 << slot);
453 p->ci &= ~(1 << slot);
456 * This command is now done.
458 p->pending &= ~(1 << slot);
461 * Delete the blockif request from the busy list
463 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
466 * Move the blockif request back to the free list
468 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
471 ahci_check_stopped(p);
475 ahci_port_reset(struct ahci_port *pr)
479 pr->xfermode = ATA_UDMA6;
480 pr->mult_sectors = 128;
483 pr->ssts = ATA_SS_DET_NO_DEVICE;
484 pr->sig = 0xFFFFFFFF;
488 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
489 if (pr->sctl & ATA_SC_SPD_MASK)
490 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
492 pr->ssts |= ATA_SS_SPD_GEN3;
493 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
496 pr->tfd |= ATA_S_READY;
498 pr->sig = PxSIG_ATAPI;
499 ahci_write_reset_fis_d2h(pr);
503 ahci_reset(struct pci_ahci_softc *sc)
507 sc->ghc = AHCI_GHC_AE;
511 pci_lintr_deassert(sc->asc_pi);
515 for (i = 0; i < sc->ports; i++) {
518 sc->port[i].cmd = (AHCI_P_CMD_SUD | AHCI_P_CMD_POD);
519 if (sc->port[i].bctx)
520 sc->port[i].cmd |= AHCI_P_CMD_CPS;
521 sc->port[i].sctl = 0;
522 ahci_port_reset(&sc->port[i]);
527 ata_string(uint8_t *dest, const char *src, int len)
531 for (i = 0; i < len; i++) {
533 dest[i ^ 1] = *src++;
540 atapi_string(uint8_t *dest, const char *src, int len)
544 for (i = 0; i < len; i++) {
553 * Build up the iovec based on the PRDT, 'done' and 'len'.
556 ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
557 struct ahci_prdt_entry *prdt, uint16_t prdtl)
559 struct blockif_req *breq = &aior->io_req;
560 int i, j, skip, todo, left, extra;
563 /* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
565 left = aior->len - aior->done;
567 for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
569 dbcsz = (prdt->dbc & DBCMASK) + 1;
570 /* Skip already done part of the PRDT */
578 breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
579 prdt->dba + skip, dbcsz);
580 breq->br_iov[j].iov_len = dbcsz;
587 /* If we got limited by IOV length, round I/O down to sector size. */
588 if (j == BLOCKIF_IOV_MAX) {
589 extra = todo % blockif_sectsz(p->bctx);
593 if (breq->br_iov[j - 1].iov_len > extra) {
594 breq->br_iov[j - 1].iov_len -= extra;
597 extra -= breq->br_iov[j - 1].iov_len;
603 breq->br_resid = todo;
605 aior->more = (aior->done < aior->len && i < prdtl);
609 ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
611 struct ahci_ioreq *aior;
612 struct blockif_req *breq;
613 struct ahci_prdt_entry *prdt;
614 struct ahci_cmd_hdr *hdr;
617 int err, first, ncq, readop;
619 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
620 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
625 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
626 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
627 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
628 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
631 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
632 cfis[2] == ATA_READ_FPDMA_QUEUED) {
633 lba = ((uint64_t)cfis[10] << 40) |
634 ((uint64_t)cfis[9] << 32) |
635 ((uint64_t)cfis[8] << 24) |
636 ((uint64_t)cfis[6] << 16) |
637 ((uint64_t)cfis[5] << 8) |
639 len = cfis[11] << 8 | cfis[3];
643 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
644 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
645 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
646 lba = ((uint64_t)cfis[10] << 40) |
647 ((uint64_t)cfis[9] << 32) |
648 ((uint64_t)cfis[8] << 24) |
649 ((uint64_t)cfis[6] << 16) |
650 ((uint64_t)cfis[5] << 8) |
652 len = cfis[13] << 8 | cfis[12];
656 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
657 (cfis[5] << 8) | cfis[4];
662 lba *= blockif_sectsz(p->bctx);
663 len *= blockif_sectsz(p->bctx);
665 /* Pull request off free list */
666 aior = STAILQ_FIRST(&p->iofhd);
667 assert(aior != NULL);
668 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
674 breq = &aior->io_req;
675 breq->br_offset = lba + done;
676 ahci_build_iov(p, aior, prdt, hdr->prdtl);
678 /* Mark this command in-flight. */
679 p->pending |= 1 << slot;
681 /* Stuff request onto busy list. */
682 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
685 ahci_write_fis_d2h_ncq(p, slot);
688 err = blockif_read(p->bctx, breq);
690 err = blockif_write(p->bctx, breq);
695 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
697 struct ahci_ioreq *aior;
698 struct blockif_req *breq;
702 * Pull request off free list
704 aior = STAILQ_FIRST(&p->iofhd);
705 assert(aior != NULL);
706 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
712 breq = &aior->io_req;
715 * Mark this command in-flight.
717 p->pending |= 1 << slot;
720 * Stuff request onto busy list
722 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
724 err = blockif_flush(p->bctx, breq);
729 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
732 struct ahci_cmd_hdr *hdr;
733 struct ahci_prdt_entry *prdt;
737 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
740 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
741 for (i = 0; i < hdr->prdtl && len; i++) {
746 dbcsz = (prdt->dbc & DBCMASK) + 1;
747 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
748 sublen = len < dbcsz ? len : dbcsz;
749 memcpy(to, ptr, sublen);
757 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
759 struct ahci_ioreq *aior;
760 struct blockif_req *breq;
768 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
769 len = (uint16_t)cfis[13] << 8 | cfis[12];
772 } else { /* ATA_SEND_FPDMA_QUEUED */
773 len = (uint16_t)cfis[11] << 8 | cfis[3];
777 read_prdt(p, slot, cfis, buf, sizeof(buf));
781 elba = ((uint64_t)entry[5] << 40) |
782 ((uint64_t)entry[4] << 32) |
783 ((uint64_t)entry[3] << 24) |
784 ((uint64_t)entry[2] << 16) |
785 ((uint64_t)entry[1] << 8) |
787 elen = (uint16_t)entry[7] << 8 | entry[6];
791 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
792 p->pending &= ~(1 << slot);
793 ahci_check_stopped(p);
802 * Pull request off free list
804 aior = STAILQ_FIRST(&p->iofhd);
805 assert(aior != NULL);
806 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
811 aior->more = (len != done);
813 breq = &aior->io_req;
814 breq->br_offset = elba * blockif_sectsz(p->bctx);
815 breq->br_resid = elen * blockif_sectsz(p->bctx);
818 * Mark this command in-flight.
820 p->pending |= 1 << slot;
823 * Stuff request onto busy list
825 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
828 ahci_write_fis_d2h_ncq(p, slot);
830 err = blockif_delete(p->bctx, breq);
835 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
838 struct ahci_cmd_hdr *hdr;
839 struct ahci_prdt_entry *prdt;
843 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
846 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
847 for (i = 0; i < hdr->prdtl && len; i++) {
852 dbcsz = (prdt->dbc & DBCMASK) + 1;
853 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
854 sublen = len < dbcsz ? len : dbcsz;
855 memcpy(ptr, from, sublen);
860 hdr->prdbc = size - len;
864 ahci_checksum(uint8_t *buf, int size)
869 for (i = 0; i < size - 1; i++)
871 buf[size - 1] = 0x100 - sum;
875 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
877 struct ahci_cmd_hdr *hdr;
880 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
881 if (p->atapi || hdr->prdtl == 0 || cfis[4] != 0x10 ||
882 cfis[5] != 0 || cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
883 ahci_write_fis_d2h(p, slot, cfis,
884 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
888 memset(buf, 0, sizeof(buf));
889 memcpy(buf, p->err_cfis, sizeof(p->err_cfis));
890 ahci_checksum(buf, sizeof(buf));
892 if (cfis[2] == ATA_READ_LOG_EXT)
893 ahci_write_fis_piosetup(p);
894 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
895 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
899 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
901 struct ahci_cmd_hdr *hdr;
903 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
904 if (p->atapi || hdr->prdtl == 0) {
905 ahci_write_fis_d2h(p, slot, cfis,
906 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
910 int sectsz, psectsz, psectoff, candelete, ro;
914 ro = blockif_is_ro(p->bctx);
915 candelete = blockif_candelete(p->bctx);
916 sectsz = blockif_sectsz(p->bctx);
917 sectors = blockif_size(p->bctx) / sectsz;
918 blockif_chs(p->bctx, &cyl, &heads, &sech);
919 blockif_psectsz(p->bctx, &psectsz, &psectoff);
920 memset(buf, 0, sizeof(buf));
925 ata_string((uint8_t *)(buf+10), p->ident, 20);
926 ata_string((uint8_t *)(buf+23), "001", 8);
927 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
928 buf[47] = (0x8000 | 128);
930 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
932 buf[53] = (1 << 1 | 1 << 2);
934 buf[59] = (0x100 | p->mult_sectors);
935 if (sectors <= 0x0fffffff) {
937 buf[61] = (sectors >> 16);
943 if (p->xfermode & ATA_WDMA0)
944 buf[63] |= (1 << ((p->xfermode & 7) + 8));
952 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
954 buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
955 (p->ssts & ATA_SS_SPD_MASK) >> 3);
958 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
959 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
960 buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
961 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
963 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
964 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
965 buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
966 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
969 if (p->xfermode & ATA_UDMA0)
970 buf[88] |= (1 << ((p->xfermode & 7) + 8));
972 buf[101] = (sectors >> 16);
973 buf[102] = (sectors >> 32);
974 buf[103] = (sectors >> 48);
975 if (candelete && !ro) {
976 buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
978 buf[169] = ATA_SUPPORT_DSM_TRIM;
982 if (psectsz > sectsz) {
984 buf[106] |= ffsl(psectsz / sectsz) - 1;
985 buf[209] |= (psectoff / sectsz);
989 buf[117] = sectsz / 2;
990 buf[118] = ((sectsz / 2) >> 16);
992 buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
993 buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
996 ahci_checksum((uint8_t *)buf, sizeof(buf));
997 ahci_write_fis_piosetup(p);
998 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
999 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1004 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
1007 ahci_write_fis_d2h(p, slot, cfis,
1008 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1012 memset(buf, 0, sizeof(buf));
1013 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
1014 ata_string((uint8_t *)(buf+10), p->ident, 20);
1015 ata_string((uint8_t *)(buf+23), "001", 8);
1016 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
1017 buf[49] = (1 << 9 | 1 << 8);
1018 buf[50] = (1 << 14 | 1);
1019 buf[53] = (1 << 2 | 1 << 1);
1022 if (p->xfermode & ATA_WDMA0)
1023 buf[63] |= (1 << ((p->xfermode & 7) + 8));
1029 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
1030 buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
1033 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1034 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1035 buf[83] = (1 << 14);
1036 buf[84] = (1 << 14);
1037 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1038 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1039 buf[87] = (1 << 14);
1041 if (p->xfermode & ATA_UDMA0)
1042 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1045 ahci_checksum((uint8_t *)buf, sizeof(buf));
1046 ahci_write_fis_piosetup(p);
1047 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1048 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1053 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
1062 if (acmd[1] & 1) { /* VPD */
1063 if (acmd[2] == 0) { /* Supported VPD pages */
1071 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1073 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1074 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1075 ahci_write_fis_d2h(p, slot, cfis, tfd);
1087 atapi_string(buf + 8, "BHYVE", 8);
1088 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1089 atapi_string(buf + 32, "001", 4);
1095 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1096 write_prdt(p, slot, cfis, buf, len);
1097 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1101 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1106 sectors = blockif_size(p->bctx) / 2048;
1107 be32enc(buf, sectors - 1);
1108 be32enc(buf + 4, 2048);
1109 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1110 write_prdt(p, slot, cfis, buf, sizeof(buf));
1111 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1115 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1123 len = be16dec(acmd + 7);
1124 format = acmd[9] >> 6;
1130 uint8_t start_track, buf[20], *bp;
1132 msf = (acmd[1] >> 1) & 1;
1133 start_track = acmd[6];
1134 if (start_track > 1 && start_track != 0xaa) {
1136 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1138 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1139 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1140 ahci_write_fis_d2h(p, slot, cfis, tfd);
1146 if (start_track <= 1) {
1166 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1170 lba_to_msf(bp, sectors);
1173 be32enc(bp, sectors);
1177 be16enc(buf, size - 2);
1180 write_prdt(p, slot, cfis, buf, len);
1181 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1182 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1189 memset(buf, 0, sizeof(buf));
1193 if (len > sizeof(buf))
1195 write_prdt(p, slot, cfis, buf, len);
1196 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1197 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1204 uint8_t start_track, *bp, buf[50];
1206 msf = (acmd[1] >> 1) & 1;
1207 start_track = acmd[6];
1243 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1247 lba_to_msf(bp, sectors);
1250 be32enc(bp, sectors);
1273 be16enc(buf, size - 2);
1276 write_prdt(p, slot, cfis, buf, len);
1277 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1278 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1285 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1287 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1288 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1289 ahci_write_fis_d2h(p, slot, cfis, tfd);
1296 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1300 memset(buf, 0, sizeof(buf));
1303 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1304 write_prdt(p, slot, cfis, buf, sizeof(buf));
1305 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1309 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
1311 struct ahci_ioreq *aior;
1312 struct ahci_cmd_hdr *hdr;
1313 struct ahci_prdt_entry *prdt;
1314 struct blockif_req *breq;
1315 struct pci_ahci_softc *sc;
1323 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1324 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1326 lba = be32dec(acmd + 2);
1327 if (acmd[0] == READ_10)
1328 len = be16dec(acmd + 7);
1330 len = be32dec(acmd + 6);
1332 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1333 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1339 * Pull request off free list
1341 aior = STAILQ_FIRST(&p->iofhd);
1342 assert(aior != NULL);
1343 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1348 breq = &aior->io_req;
1349 breq->br_offset = lba + done;
1350 ahci_build_iov(p, aior, prdt, hdr->prdtl);
1352 /* Mark this command in-flight. */
1353 p->pending |= 1 << slot;
1355 /* Stuff request onto busy list. */
1356 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1358 err = blockif_read(p->bctx, breq);
1363 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1371 if (len > sizeof(buf))
1373 memset(buf, 0, len);
1374 buf[0] = 0x70 | (1 << 7);
1375 buf[2] = p->sense_key;
1378 write_prdt(p, slot, cfis, buf, len);
1379 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1380 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1384 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1386 uint8_t *acmd = cfis + 0x40;
1389 switch (acmd[4] & 3) {
1393 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1394 tfd = ATA_S_READY | ATA_S_DSC;
1397 /* TODO eject media */
1398 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1399 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1401 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1404 ahci_write_fis_d2h(p, slot, cfis, tfd);
1408 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1416 len = be16dec(acmd + 7);
1418 code = acmd[2] & 0x3f;
1423 case MODEPAGE_RW_ERROR_RECOVERY:
1427 if (len > sizeof(buf))
1430 memset(buf, 0, sizeof(buf));
1431 be16enc(buf, 16 - 2);
1436 write_prdt(p, slot, cfis, buf, len);
1437 tfd = ATA_S_READY | ATA_S_DSC;
1440 case MODEPAGE_CD_CAPABILITIES:
1444 if (len > sizeof(buf))
1447 memset(buf, 0, sizeof(buf));
1448 be16enc(buf, 30 - 2);
1454 be16enc(&buf[18], 2);
1455 be16enc(&buf[20], 512);
1456 write_prdt(p, slot, cfis, buf, len);
1457 tfd = ATA_S_READY | ATA_S_DSC;
1466 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1468 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1473 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1475 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1478 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1479 ahci_write_fis_d2h(p, slot, cfis, tfd);
1483 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1491 /* we don't support asynchronous operation */
1492 if (!(acmd[1] & 1)) {
1493 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1495 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1500 len = be16dec(acmd + 7);
1501 if (len > sizeof(buf))
1504 memset(buf, 0, sizeof(buf));
1505 be16enc(buf, 8 - 2);
1509 write_prdt(p, slot, cfis, buf, len);
1510 tfd = ATA_S_READY | ATA_S_DSC;
1512 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1513 ahci_write_fis_d2h(p, slot, cfis, tfd);
1517 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1527 for (i = 0; i < 16; i++)
1528 DPRINTF("%02x ", acmd[i]);
1534 case TEST_UNIT_READY:
1535 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1536 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1539 atapi_inquiry(p, slot, cfis);
1542 atapi_read_capacity(p, slot, cfis);
1546 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1547 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1550 atapi_read_toc(p, slot, cfis);
1553 atapi_report_luns(p, slot, cfis);
1557 atapi_read(p, slot, cfis, 0);
1560 atapi_request_sense(p, slot, cfis);
1562 case START_STOP_UNIT:
1563 atapi_start_stop_unit(p, slot, cfis);
1566 atapi_mode_sense(p, slot, cfis);
1568 case GET_EVENT_STATUS_NOTIFICATION:
1569 atapi_get_event_status_notification(p, slot, cfis);
1572 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1573 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1575 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1576 ATA_S_READY | ATA_S_ERROR);
1582 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1585 p->tfd |= ATA_S_BUSY;
1587 case ATA_ATA_IDENTIFY:
1588 handle_identify(p, slot, cfis);
1590 case ATA_SETFEATURES:
1593 case ATA_SF_ENAB_SATA_SF:
1595 case ATA_SATA_SF_AN:
1596 p->tfd = ATA_S_DSC | ATA_S_READY;
1599 p->tfd = ATA_S_ERROR | ATA_S_READY;
1600 p->tfd |= (ATA_ERROR_ABORT << 8);
1604 case ATA_SF_ENAB_WCACHE:
1605 case ATA_SF_DIS_WCACHE:
1606 case ATA_SF_ENAB_RCACHE:
1607 case ATA_SF_DIS_RCACHE:
1608 p->tfd = ATA_S_DSC | ATA_S_READY;
1610 case ATA_SF_SETXFER:
1612 switch (cfis[12] & 0xf8) {
1618 p->xfermode = (cfis[12] & 0x7);
1621 p->tfd = ATA_S_DSC | ATA_S_READY;
1625 p->tfd = ATA_S_ERROR | ATA_S_READY;
1626 p->tfd |= (ATA_ERROR_ABORT << 8);
1629 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1633 if (cfis[12] != 0 &&
1634 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1635 p->tfd = ATA_S_ERROR | ATA_S_READY;
1636 p->tfd |= (ATA_ERROR_ABORT << 8);
1638 p->mult_sectors = cfis[12];
1639 p->tfd = ATA_S_DSC | ATA_S_READY;
1641 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1649 case ATA_READ_MUL48:
1650 case ATA_WRITE_MUL48:
1653 case ATA_READ_DMA48:
1654 case ATA_WRITE_DMA48:
1655 case ATA_READ_FPDMA_QUEUED:
1656 case ATA_WRITE_FPDMA_QUEUED:
1657 ahci_handle_rw(p, slot, cfis, 0);
1659 case ATA_FLUSHCACHE:
1660 case ATA_FLUSHCACHE48:
1661 ahci_handle_flush(p, slot, cfis);
1663 case ATA_DATA_SET_MANAGEMENT:
1664 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1665 cfis[13] == 0 && cfis[12] == 1) {
1666 ahci_handle_dsm_trim(p, slot, cfis, 0);
1669 ahci_write_fis_d2h(p, slot, cfis,
1670 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1672 case ATA_SEND_FPDMA_QUEUED:
1673 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1674 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1675 cfis[11] == 0 && cfis[13] == 1) {
1676 ahci_handle_dsm_trim(p, slot, cfis, 0);
1679 ahci_write_fis_d2h(p, slot, cfis,
1680 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1682 case ATA_READ_LOG_EXT:
1683 case ATA_READ_LOG_DMA_EXT:
1684 ahci_handle_read_log(p, slot, cfis);
1686 case ATA_SECURITY_FREEZE_LOCK:
1689 ahci_write_fis_d2h(p, slot, cfis,
1690 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1692 case ATA_CHECK_POWER_MODE:
1693 cfis[12] = 0xff; /* always on */
1694 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1696 case ATA_STANDBY_CMD:
1697 case ATA_STANDBY_IMMEDIATE:
1699 case ATA_IDLE_IMMEDIATE:
1701 case ATA_READ_VERIFY:
1702 case ATA_READ_VERIFY48:
1703 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1705 case ATA_ATAPI_IDENTIFY:
1706 handle_atapi_identify(p, slot, cfis);
1708 case ATA_PACKET_CMD:
1710 ahci_write_fis_d2h(p, slot, cfis,
1711 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1713 handle_packet_cmd(p, slot, cfis);
1716 WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
1717 ahci_write_fis_d2h(p, slot, cfis,
1718 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1724 ahci_handle_slot(struct ahci_port *p, int slot)
1726 struct ahci_cmd_hdr *hdr;
1727 struct ahci_prdt_entry *prdt;
1728 struct pci_ahci_softc *sc;
1733 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1734 cfl = (hdr->flags & 0x1f) * 4;
1735 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1736 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1737 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1741 for (i = 0; i < cfl; i++) {
1744 DPRINTF("%02x ", cfis[i]);
1748 for (i = 0; i < hdr->prdtl; i++) {
1749 DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
1754 if (cfis[0] != FIS_TYPE_REGH2D) {
1755 WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
1759 if (cfis[1] & 0x80) {
1760 ahci_handle_cmd(p, slot, cfis);
1762 if (cfis[15] & (1 << 2))
1764 else if (p->reset) {
1768 p->ci &= ~(1 << slot);
1773 ahci_handle_port(struct ahci_port *p)
1776 if (!(p->cmd & AHCI_P_CMD_ST))
1780 * Search for any new commands to issue ignoring those that
1781 * are already in-flight. Stop if device is busy or in error.
1783 for (; (p->ci & ~p->pending) != 0; p->ccs = ((p->ccs + 1) & 31)) {
1784 if ((p->tfd & (ATA_S_BUSY | ATA_S_DRQ)) != 0)
1786 if (p->waitforclear)
1788 if ((p->ci & ~p->pending & (1 << p->ccs)) != 0) {
1789 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1790 p->cmd |= p->ccs << AHCI_P_CMD_CCS_SHIFT;
1791 ahci_handle_slot(p, p->ccs);
1797 * blockif callback routine - this runs in the context of the blockif
1798 * i/o thread, so the mutex needs to be acquired.
1801 ata_ioreq_cb(struct blockif_req *br, int err)
1803 struct ahci_cmd_hdr *hdr;
1804 struct ahci_ioreq *aior;
1805 struct ahci_port *p;
1806 struct pci_ahci_softc *sc;
1811 DPRINTF("%s %d\n", __func__, err);
1814 aior = br->br_param;
1819 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1821 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1822 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1823 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1825 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1826 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1827 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1830 pthread_mutex_lock(&sc->mtx);
1833 * Delete the blockif request from the busy list
1835 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1838 * Move the blockif request back to the free list
1840 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1843 hdr->prdbc = aior->done;
1845 if (!err && aior->more) {
1847 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1849 ahci_handle_rw(p, slot, cfis, aior->done);
1854 tfd = ATA_S_READY | ATA_S_DSC;
1856 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1858 ahci_write_fis_sdb(p, slot, cfis, tfd);
1860 ahci_write_fis_d2h(p, slot, cfis, tfd);
1863 * This command is now complete.
1865 p->pending &= ~(1 << slot);
1867 ahci_check_stopped(p);
1868 ahci_handle_port(p);
1870 pthread_mutex_unlock(&sc->mtx);
1871 DPRINTF("%s exit\n", __func__);
1875 atapi_ioreq_cb(struct blockif_req *br, int err)
1877 struct ahci_cmd_hdr *hdr;
1878 struct ahci_ioreq *aior;
1879 struct ahci_port *p;
1880 struct pci_ahci_softc *sc;
1885 DPRINTF("%s %d\n", __func__, err);
1887 aior = br->br_param;
1892 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1894 pthread_mutex_lock(&sc->mtx);
1897 * Delete the blockif request from the busy list
1899 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1902 * Move the blockif request back to the free list
1904 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1907 hdr->prdbc = aior->done;
1909 if (!err && aior->more) {
1910 atapi_read(p, slot, cfis, aior->done);
1915 tfd = ATA_S_READY | ATA_S_DSC;
1917 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1919 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1921 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1922 ahci_write_fis_d2h(p, slot, cfis, tfd);
1925 * This command is now complete.
1927 p->pending &= ~(1 << slot);
1929 ahci_check_stopped(p);
1930 ahci_handle_port(p);
1932 pthread_mutex_unlock(&sc->mtx);
1933 DPRINTF("%s exit\n", __func__);
1937 pci_ahci_ioreq_init(struct ahci_port *pr)
1939 struct ahci_ioreq *vr;
1942 pr->ioqsz = blockif_queuesz(pr->bctx);
1943 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
1944 STAILQ_INIT(&pr->iofhd);
1947 * Add all i/o request entries to the free queue
1949 for (i = 0; i < pr->ioqsz; i++) {
1953 vr->io_req.br_callback = ata_ioreq_cb;
1955 vr->io_req.br_callback = atapi_ioreq_cb;
1956 vr->io_req.br_param = vr;
1957 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
1960 TAILQ_INIT(&pr->iobhd);
1964 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
1966 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
1967 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
1968 struct ahci_port *p = &sc->port[port];
1970 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
1971 port, offset, value);
1990 p->ie = value & 0xFDC000FF;
1991 ahci_generate_intr(sc);
1995 p->cmd &= ~(AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
1996 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
1997 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
1998 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK);
1999 p->cmd |= (AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2000 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2001 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2002 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK) & value;
2004 if (!(value & AHCI_P_CMD_ST)) {
2009 p->cmd |= AHCI_P_CMD_CR;
2010 clb = (uint64_t)p->clbu << 32 | p->clb;
2011 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
2012 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
2015 if (value & AHCI_P_CMD_FRE) {
2018 p->cmd |= AHCI_P_CMD_FR;
2019 fb = (uint64_t)p->fbu << 32 | p->fb;
2020 /* we don't support FBSCP, so rfis size is 256Bytes */
2021 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
2023 p->cmd &= ~AHCI_P_CMD_FR;
2026 if (value & AHCI_P_CMD_CLO) {
2027 p->tfd &= ~(ATA_S_BUSY | ATA_S_DRQ);
2028 p->cmd &= ~AHCI_P_CMD_CLO;
2031 if (value & AHCI_P_CMD_ICC_MASK) {
2032 p->cmd &= ~AHCI_P_CMD_ICC_MASK;
2035 ahci_handle_port(p);
2041 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
2045 if (!(p->cmd & AHCI_P_CMD_ST)) {
2046 if (value & ATA_SC_DET_RESET)
2058 ahci_handle_port(p);
2068 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2070 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2078 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
2081 if (value & AHCI_GHC_HR)
2083 else if (value & AHCI_GHC_IE) {
2084 sc->ghc |= AHCI_GHC_IE;
2085 ahci_generate_intr(sc);
2090 ahci_generate_intr(sc);
2098 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2099 int baridx, uint64_t offset, int size, uint64_t value)
2101 struct pci_ahci_softc *sc = pi->pi_arg;
2103 assert(baridx == 5);
2104 assert((offset % 4) == 0 && size == 4);
2106 pthread_mutex_lock(&sc->mtx);
2108 if (offset < AHCI_OFFSET)
2109 pci_ahci_host_write(sc, offset, value);
2110 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2111 pci_ahci_port_write(sc, offset, value);
2113 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
2115 pthread_mutex_unlock(&sc->mtx);
2119 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2135 uint32_t *p = &sc->cap;
2136 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2144 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
2151 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2154 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2155 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2175 uint32_t *p= &sc->port[port].clb;
2176 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2185 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
2186 port, offset, value);
2192 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2193 uint64_t regoff, int size)
2195 struct pci_ahci_softc *sc = pi->pi_arg;
2199 assert(baridx == 5);
2200 assert(size == 1 || size == 2 || size == 4);
2201 assert((regoff & (size - 1)) == 0);
2203 pthread_mutex_lock(&sc->mtx);
2205 offset = regoff & ~0x3; /* round down to a multiple of 4 bytes */
2206 if (offset < AHCI_OFFSET)
2207 value = pci_ahci_host_read(sc, offset);
2208 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2209 value = pci_ahci_port_read(sc, offset);
2212 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n",
2215 value >>= 8 * (regoff & 0x3);
2217 pthread_mutex_unlock(&sc->mtx);
2223 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
2225 char bident[sizeof("XX:X:X")];
2226 struct blockif_ctxt *bctxt;
2227 struct pci_ahci_softc *sc;
2235 fprintf(stderr, "pci_ahci: backing device required\n");
2240 dbg = fopen("/tmp/log", "w+");
2243 sc = calloc(1, sizeof(struct pci_ahci_softc));
2246 sc->ports = MAX_PORTS;
2249 * Only use port 0 for a backing device. All other ports will be
2252 sc->port[0].atapi = atapi;
2255 * Attempt to open the backing image. Use the PCI
2256 * slot/func for the identifier string.
2258 snprintf(bident, sizeof(bident), "%d:%d", pi->pi_slot, pi->pi_func);
2259 bctxt = blockif_open(opts, bident);
2260 if (bctxt == NULL) {
2264 sc->port[0].bctx = bctxt;
2265 sc->port[0].pr_sc = sc;
2268 * Create an identifier for the backing file. Use parts of the
2269 * md5 sum of the filename
2272 MD5Update(&mdctx, opts, strlen(opts));
2273 MD5Final(digest, &mdctx);
2274 sprintf(sc->port[0].ident, "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2275 digest[0], digest[1], digest[2], digest[3], digest[4], digest[5]);
2278 * Allocate blockif request structures and add them
2281 pci_ahci_ioreq_init(&sc->port[0]);
2283 pthread_mutex_init(&sc->mtx, NULL);
2285 /* Intel ICH8 AHCI */
2286 slots = sc->port[0].ioqsz;
2290 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2291 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2292 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2293 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2294 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2296 /* Only port 0 implemented */
2299 sc->cap2 = AHCI_CAP2_APST;
2302 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2303 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2304 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2305 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2306 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2307 pci_emul_add_msicap(pi, 1);
2308 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2309 AHCI_OFFSET + sc->ports * AHCI_STEP);
2311 pci_lintr_request(pi);
2315 if (sc->port[0].bctx != NULL)
2316 blockif_close(sc->port[0].bctx);
2324 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2327 return (pci_ahci_init(ctx, pi, opts, 0));
2331 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2334 return (pci_ahci_init(ctx, pi, opts, 1));
2338 * Use separate emulation names to distinguish drive and atapi devices
2340 struct pci_devemu pci_de_ahci_hd = {
2341 .pe_emu = "ahci-hd",
2342 .pe_init = pci_ahci_hd_init,
2343 .pe_barwrite = pci_ahci_write,
2344 .pe_barread = pci_ahci_read
2346 PCI_EMUL_SET(pci_de_ahci_hd);
2348 struct pci_devemu pci_de_ahci_cd = {
2349 .pe_emu = "ahci-cd",
2350 .pe_init = pci_ahci_atapi_init,
2351 .pe_barwrite = pci_ahci_write,
2352 .pe_barread = pci_ahci_read
2354 PCI_EMUL_SET(pci_de_ahci_cd);