2 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
36 #include <sys/ioctl.h>
39 #include <sys/endian.h>
51 #include <pthread_np.h>
60 #define MAX_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
62 #define PxSIG_ATA 0x00000101 /* ATA drive */
63 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
66 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
67 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
68 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
69 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
70 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
71 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
72 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
73 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
79 #define TEST_UNIT_READY 0x00
80 #define REQUEST_SENSE 0x03
82 #define START_STOP_UNIT 0x1B
83 #define PREVENT_ALLOW 0x1E
84 #define READ_CAPACITY 0x25
86 #define POSITION_TO_ELEMENT 0x2B
88 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
89 #define MODE_SENSE_10 0x5A
90 #define REPORT_LUNS 0xA0
95 * SCSI mode page codes
97 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
98 #define MODEPAGE_CD_CAPABILITIES 0x2A
103 #define ATA_SF_ENAB_SATA_SF 0x10
104 #define ATA_SATA_SF_AN 0x05
105 #define ATA_SF_DIS_SATA_SF 0x90
112 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
114 #define DPRINTF(format, arg...)
116 #define WPRINTF(format, arg...) printf(format, ##arg)
119 struct blockif_req io_req;
120 struct ahci_port *io_pr;
121 STAILQ_ENTRY(ahci_ioreq) io_flist;
122 TAILQ_ENTRY(ahci_ioreq) io_blist;
131 struct blockif_ctxt *bctx;
132 struct pci_ahci_softc *pr_sc;
140 uint8_t err_cfis[20];
166 struct ahci_ioreq *ioreq;
168 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
169 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
172 struct ahci_cmd_hdr {
177 uint32_t reserved[4];
180 struct ahci_prdt_entry {
183 #define DBCMASK 0x3fffff
187 struct pci_ahci_softc {
188 struct pci_devinst *asc_pi;
203 struct ahci_port port[MAX_PORTS];
205 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
207 static inline void lba_to_msf(uint8_t *buf, int lba)
210 buf[0] = (lba / 75) / 60;
211 buf[1] = (lba / 75) % 60;
216 * generate HBA intr depending on whether or not ports within
217 * the controller have an interrupt pending.
220 ahci_generate_intr(struct pci_ahci_softc *sc)
222 struct pci_devinst *pi;
227 for (i = 0; i < sc->ports; i++) {
228 struct ahci_port *pr;
234 DPRINTF("%s %x\n", __func__, sc->is);
236 if (sc->is && (sc->ghc & AHCI_GHC_IE)) {
237 if (pci_msi_enabled(pi)) {
239 * Generate an MSI interrupt on every edge
241 pci_generate_msi(pi, 0);
242 } else if (!sc->lintr) {
244 * Only generate a pin-based interrupt if one wasn't
248 pci_lintr_assert(pi);
250 } else if (sc->lintr) {
252 * No interrupts: deassert pin-based signal if it had
255 pci_lintr_deassert(pi);
261 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
263 int offset, len, irq;
265 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
269 case FIS_TYPE_REGD2H:
274 case FIS_TYPE_SETDEVBITS:
279 case FIS_TYPE_PIOSETUP:
285 WPRINTF("unsupported fis type %d\n", ft);
288 memcpy(p->rfis + offset, fis, len);
291 ahci_generate_intr(p->pr_sc);
296 ahci_write_fis_piosetup(struct ahci_port *p)
300 memset(fis, 0, sizeof(fis));
301 fis[0] = FIS_TYPE_PIOSETUP;
302 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
306 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
311 error = (tfd >> 8) & 0xff;
312 memset(fis, 0, sizeof(fis));
313 fis[0] = FIS_TYPE_SETDEVBITS;
317 if (fis[2] & ATA_S_ERROR) {
318 p->is |= AHCI_P_IX_TFE;
319 p->err_cfis[0] = slot;
320 p->err_cfis[2] = tfd & 0x77;
321 p->err_cfis[3] = error;
322 memcpy(&p->err_cfis[4], cfis + 4, 16);
324 *(uint32_t *)(fis + 4) = (1 << slot);
325 p->sact &= ~(1 << slot);
328 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
332 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
337 error = (tfd >> 8) & 0xff;
338 memset(fis, 0, sizeof(fis));
339 fis[0] = FIS_TYPE_REGD2H;
353 if (fis[2] & ATA_S_ERROR) {
354 p->is |= AHCI_P_IX_TFE;
355 p->err_cfis[0] = 0x80;
356 p->err_cfis[2] = tfd & 0xff;
357 p->err_cfis[3] = error;
358 memcpy(&p->err_cfis[4], cfis + 4, 16);
360 p->ci &= ~(1 << slot);
362 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
366 ahci_write_reset_fis_d2h(struct ahci_port *p)
370 memset(fis, 0, sizeof(fis));
371 fis[0] = FIS_TYPE_REGD2H;
379 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
383 ahci_check_stopped(struct ahci_port *p)
386 * If we are no longer processing the command list and nothing
387 * is in-flight, clear the running bit, the current command
388 * slot, the command issue and active bits.
390 if (!(p->cmd & AHCI_P_CMD_ST)) {
391 if (p->pending == 0) {
392 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
400 ahci_port_stop(struct ahci_port *p)
402 struct ahci_ioreq *aior;
408 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
410 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
412 * Try to cancel the outstanding blockif request.
414 error = blockif_cancel(p->bctx, &aior->io_req);
420 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
421 cfis[2] == ATA_READ_FPDMA_QUEUED)
425 p->sact &= ~(1 << slot);
427 p->ci &= ~(1 << slot);
430 * This command is now done.
432 p->pending &= ~(1 << slot);
435 * Delete the blockif request from the busy list
437 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
440 * Move the blockif request back to the free list
442 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
445 ahci_check_stopped(p);
449 ahci_port_reset(struct ahci_port *pr)
453 pr->xfermode = ATA_UDMA6;
454 pr->mult_sectors = 128;
457 pr->ssts = ATA_SS_DET_NO_DEVICE;
458 pr->sig = 0xFFFFFFFF;
462 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
463 if (pr->sctl & ATA_SC_SPD_MASK)
464 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
466 pr->ssts |= ATA_SS_SPD_GEN3;
467 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
470 pr->tfd |= ATA_S_READY;
472 pr->sig = PxSIG_ATAPI;
473 ahci_write_reset_fis_d2h(pr);
477 ahci_reset(struct pci_ahci_softc *sc)
481 sc->ghc = AHCI_GHC_AE;
485 pci_lintr_deassert(sc->asc_pi);
489 for (i = 0; i < sc->ports; i++) {
492 sc->port[i].sctl = 0;
493 ahci_port_reset(&sc->port[i]);
498 ata_string(uint8_t *dest, const char *src, int len)
502 for (i = 0; i < len; i++) {
504 dest[i ^ 1] = *src++;
511 atapi_string(uint8_t *dest, const char *src, int len)
515 for (i = 0; i < len; i++) {
524 * Build up the iovec based on the PRDT, 'done' and 'len'.
527 ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
528 struct ahci_prdt_entry *prdt, uint16_t prdtl)
530 struct blockif_req *breq = &aior->io_req;
531 int i, j, skip, todo, left, extra;
534 /* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
536 left = aior->len - aior->done;
538 for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
540 dbcsz = (prdt->dbc & DBCMASK) + 1;
541 /* Skip already done part of the PRDT */
549 breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
550 prdt->dba + skip, dbcsz);
551 breq->br_iov[j].iov_len = dbcsz;
558 /* If we got limited by IOV length, round I/O down to sector size. */
559 if (j == BLOCKIF_IOV_MAX) {
560 extra = todo % blockif_sectsz(p->bctx);
564 if (breq->br_iov[j - 1].iov_len > extra) {
565 breq->br_iov[j - 1].iov_len -= extra;
568 extra -= breq->br_iov[j - 1].iov_len;
574 breq->br_resid = todo;
576 aior->more = (aior->done < aior->len && i < prdtl);
580 ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
582 struct ahci_ioreq *aior;
583 struct blockif_req *breq;
584 struct ahci_prdt_entry *prdt;
585 struct ahci_cmd_hdr *hdr;
588 int err, ncq, readop;
590 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
591 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
595 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
596 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
597 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
598 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
601 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
602 cfis[2] == ATA_READ_FPDMA_QUEUED) {
603 lba = ((uint64_t)cfis[10] << 40) |
604 ((uint64_t)cfis[9] << 32) |
605 ((uint64_t)cfis[8] << 24) |
606 ((uint64_t)cfis[6] << 16) |
607 ((uint64_t)cfis[5] << 8) |
609 len = cfis[11] << 8 | cfis[3];
613 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
614 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
615 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
616 lba = ((uint64_t)cfis[10] << 40) |
617 ((uint64_t)cfis[9] << 32) |
618 ((uint64_t)cfis[8] << 24) |
619 ((uint64_t)cfis[6] << 16) |
620 ((uint64_t)cfis[5] << 8) |
622 len = cfis[13] << 8 | cfis[12];
626 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
627 (cfis[5] << 8) | cfis[4];
632 lba *= blockif_sectsz(p->bctx);
633 len *= blockif_sectsz(p->bctx);
635 /* Pull request off free list */
636 aior = STAILQ_FIRST(&p->iofhd);
637 assert(aior != NULL);
638 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
644 breq = &aior->io_req;
645 breq->br_offset = lba + done;
646 ahci_build_iov(p, aior, prdt, hdr->prdtl);
648 /* Mark this command in-flight. */
649 p->pending |= 1 << slot;
651 /* Stuff request onto busy list. */
652 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
655 err = blockif_read(p->bctx, breq);
657 err = blockif_write(p->bctx, breq);
661 p->ci &= ~(1 << slot);
665 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
667 struct ahci_ioreq *aior;
668 struct blockif_req *breq;
672 * Pull request off free list
674 aior = STAILQ_FIRST(&p->iofhd);
675 assert(aior != NULL);
676 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
682 breq = &aior->io_req;
685 * Mark this command in-flight.
687 p->pending |= 1 << slot;
690 * Stuff request onto busy list
692 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
694 err = blockif_flush(p->bctx, breq);
699 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
702 struct ahci_cmd_hdr *hdr;
703 struct ahci_prdt_entry *prdt;
707 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
710 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
711 for (i = 0; i < hdr->prdtl && len; i++) {
716 dbcsz = (prdt->dbc & DBCMASK) + 1;
717 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
718 sublen = len < dbcsz ? len : dbcsz;
719 memcpy(to, ptr, sublen);
727 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
729 struct ahci_ioreq *aior;
730 struct blockif_req *breq;
737 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
738 len = (uint16_t)cfis[13] << 8 | cfis[12];
740 } else { /* ATA_SEND_FPDMA_QUEUED */
741 len = (uint16_t)cfis[11] << 8 | cfis[3];
744 read_prdt(p, slot, cfis, buf, sizeof(buf));
748 elba = ((uint64_t)entry[5] << 40) |
749 ((uint64_t)entry[4] << 32) |
750 ((uint64_t)entry[3] << 24) |
751 ((uint64_t)entry[2] << 16) |
752 ((uint64_t)entry[1] << 8) |
754 elen = (uint16_t)entry[7] << 8 | entry[6];
758 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
759 p->pending &= ~(1 << slot);
760 ahci_check_stopped(p);
767 * Pull request off free list
769 aior = STAILQ_FIRST(&p->iofhd);
770 assert(aior != NULL);
771 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
776 aior->more = (len != done);
778 breq = &aior->io_req;
779 breq->br_offset = elba * blockif_sectsz(p->bctx);
780 breq->br_resid = elen * blockif_sectsz(p->bctx);
783 * Mark this command in-flight.
785 p->pending |= 1 << slot;
788 * Stuff request onto busy list
790 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
792 err = blockif_delete(p->bctx, breq);
797 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
800 struct ahci_cmd_hdr *hdr;
801 struct ahci_prdt_entry *prdt;
805 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
808 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
809 for (i = 0; i < hdr->prdtl && len; i++) {
814 dbcsz = (prdt->dbc & DBCMASK) + 1;
815 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
816 sublen = len < dbcsz ? len : dbcsz;
817 memcpy(ptr, from, sublen);
822 hdr->prdbc = size - len;
826 ahci_checksum(uint8_t *buf, int size)
831 for (i = 0; i < size - 1; i++)
833 buf[size - 1] = 0x100 - sum;
837 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
839 struct ahci_cmd_hdr *hdr;
842 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
843 if (p->atapi || hdr->prdtl == 0 || cfis[4] != 0x10 ||
844 cfis[5] != 0 || cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
845 ahci_write_fis_d2h(p, slot, cfis,
846 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
850 memset(buf, 0, sizeof(buf));
851 memcpy(buf, p->err_cfis, sizeof(p->err_cfis));
852 ahci_checksum(buf, sizeof(buf));
854 if (cfis[2] == ATA_READ_LOG_EXT)
855 ahci_write_fis_piosetup(p);
856 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
857 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
861 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
863 struct ahci_cmd_hdr *hdr;
865 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
866 if (p->atapi || hdr->prdtl == 0) {
867 ahci_write_fis_d2h(p, slot, cfis,
868 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
872 int sectsz, psectsz, psectoff, candelete, ro;
876 ro = blockif_is_ro(p->bctx);
877 candelete = blockif_candelete(p->bctx);
878 sectsz = blockif_sectsz(p->bctx);
879 sectors = blockif_size(p->bctx) / sectsz;
880 blockif_chs(p->bctx, &cyl, &heads, &sech);
881 blockif_psectsz(p->bctx, &psectsz, &psectoff);
882 memset(buf, 0, sizeof(buf));
887 ata_string((uint8_t *)(buf+10), p->ident, 20);
888 ata_string((uint8_t *)(buf+23), "001", 8);
889 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
890 buf[47] = (0x8000 | 128);
892 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
894 buf[53] = (1 << 1 | 1 << 2);
896 buf[59] = (0x100 | p->mult_sectors);
897 if (sectors <= 0x0fffffff) {
899 buf[61] = (sectors >> 16);
905 if (p->xfermode & ATA_WDMA0)
906 buf[63] |= (1 << ((p->xfermode & 7) + 8));
914 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
916 buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
917 (p->ssts & ATA_SS_SPD_MASK) >> 3);
920 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
921 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
922 buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
923 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
925 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
926 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
927 buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
928 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
931 if (p->xfermode & ATA_UDMA0)
932 buf[88] |= (1 << ((p->xfermode & 7) + 8));
933 buf[93] = (1 | 1 <<14);
935 buf[101] = (sectors >> 16);
936 buf[102] = (sectors >> 32);
937 buf[103] = (sectors >> 48);
938 if (candelete && !ro) {
939 buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
941 buf[169] = ATA_SUPPORT_DSM_TRIM;
945 if (psectsz > sectsz) {
947 buf[106] |= ffsl(psectsz / sectsz) - 1;
948 buf[209] |= (psectoff / sectsz);
952 buf[117] = sectsz / 2;
953 buf[118] = ((sectsz / 2) >> 16);
955 buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
956 buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
959 ahci_checksum((uint8_t *)buf, sizeof(buf));
960 ahci_write_fis_piosetup(p);
961 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
962 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
967 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
970 ahci_write_fis_d2h(p, slot, cfis,
971 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
975 memset(buf, 0, sizeof(buf));
976 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
977 ata_string((uint8_t *)(buf+10), p->ident, 20);
978 ata_string((uint8_t *)(buf+23), "001", 8);
979 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
980 buf[49] = (1 << 9 | 1 << 8);
981 buf[50] = (1 << 14 | 1);
982 buf[53] = (1 << 2 | 1 << 1);
985 if (p->xfermode & ATA_WDMA0)
986 buf[63] |= (1 << ((p->xfermode & 7) + 8));
992 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
993 buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
996 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
997 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1000 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1001 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1002 buf[87] = (1 << 14);
1004 if (p->xfermode & ATA_UDMA0)
1005 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1008 ahci_checksum((uint8_t *)buf, sizeof(buf));
1009 ahci_write_fis_piosetup(p);
1010 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1011 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1016 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
1025 if (acmd[1] & 1) { /* VPD */
1026 if (acmd[2] == 0) { /* Supported VPD pages */
1034 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1036 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1037 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1038 ahci_write_fis_d2h(p, slot, cfis, tfd);
1050 atapi_string(buf + 8, "BHYVE", 8);
1051 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1052 atapi_string(buf + 32, "001", 4);
1058 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1059 write_prdt(p, slot, cfis, buf, len);
1060 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1064 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1069 sectors = blockif_size(p->bctx) / 2048;
1070 be32enc(buf, sectors - 1);
1071 be32enc(buf + 4, 2048);
1072 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1073 write_prdt(p, slot, cfis, buf, sizeof(buf));
1074 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1078 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1086 len = be16dec(acmd + 7);
1087 format = acmd[9] >> 6;
1093 uint8_t start_track, buf[20], *bp;
1095 msf = (acmd[1] >> 1) & 1;
1096 start_track = acmd[6];
1097 if (start_track > 1 && start_track != 0xaa) {
1099 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1101 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1102 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1103 ahci_write_fis_d2h(p, slot, cfis, tfd);
1109 if (start_track <= 1) {
1129 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1133 lba_to_msf(bp, sectors);
1136 be32enc(bp, sectors);
1140 be16enc(buf, size - 2);
1143 write_prdt(p, slot, cfis, buf, len);
1144 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1145 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1152 memset(buf, 0, sizeof(buf));
1156 if (len > sizeof(buf))
1158 write_prdt(p, slot, cfis, buf, len);
1159 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1160 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1167 uint8_t start_track, *bp, buf[50];
1169 msf = (acmd[1] >> 1) & 1;
1170 start_track = acmd[6];
1206 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1210 lba_to_msf(bp, sectors);
1213 be32enc(bp, sectors);
1236 be16enc(buf, size - 2);
1239 write_prdt(p, slot, cfis, buf, len);
1240 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1241 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1248 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1250 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1251 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1252 ahci_write_fis_d2h(p, slot, cfis, tfd);
1259 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1263 memset(buf, 0, sizeof(buf));
1266 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1267 write_prdt(p, slot, cfis, buf, sizeof(buf));
1268 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1272 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
1274 struct ahci_ioreq *aior;
1275 struct ahci_cmd_hdr *hdr;
1276 struct ahci_prdt_entry *prdt;
1277 struct blockif_req *breq;
1278 struct pci_ahci_softc *sc;
1286 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1287 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1289 lba = be32dec(acmd + 2);
1290 if (acmd[0] == READ_10)
1291 len = be16dec(acmd + 7);
1293 len = be32dec(acmd + 6);
1295 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1296 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1302 * Pull request off free list
1304 aior = STAILQ_FIRST(&p->iofhd);
1305 assert(aior != NULL);
1306 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1311 breq = &aior->io_req;
1312 breq->br_offset = lba + done;
1313 ahci_build_iov(p, aior, prdt, hdr->prdtl);
1315 /* Mark this command in-flight. */
1316 p->pending |= 1 << slot;
1318 /* Stuff request onto busy list. */
1319 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1321 err = blockif_read(p->bctx, breq);
1326 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1334 if (len > sizeof(buf))
1336 memset(buf, 0, len);
1337 buf[0] = 0x70 | (1 << 7);
1338 buf[2] = p->sense_key;
1341 write_prdt(p, slot, cfis, buf, len);
1342 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1343 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1347 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1349 uint8_t *acmd = cfis + 0x40;
1352 switch (acmd[4] & 3) {
1356 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1357 tfd = ATA_S_READY | ATA_S_DSC;
1360 /* TODO eject media */
1361 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1362 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1364 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1367 ahci_write_fis_d2h(p, slot, cfis, tfd);
1371 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1379 len = be16dec(acmd + 7);
1381 code = acmd[2] & 0x3f;
1386 case MODEPAGE_RW_ERROR_RECOVERY:
1390 if (len > sizeof(buf))
1393 memset(buf, 0, sizeof(buf));
1394 be16enc(buf, 16 - 2);
1399 write_prdt(p, slot, cfis, buf, len);
1400 tfd = ATA_S_READY | ATA_S_DSC;
1403 case MODEPAGE_CD_CAPABILITIES:
1407 if (len > sizeof(buf))
1410 memset(buf, 0, sizeof(buf));
1411 be16enc(buf, 30 - 2);
1417 be16enc(&buf[18], 2);
1418 be16enc(&buf[20], 512);
1419 write_prdt(p, slot, cfis, buf, len);
1420 tfd = ATA_S_READY | ATA_S_DSC;
1429 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1431 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1436 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1438 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1441 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1442 ahci_write_fis_d2h(p, slot, cfis, tfd);
1446 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1454 /* we don't support asynchronous operation */
1455 if (!(acmd[1] & 1)) {
1456 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1458 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1463 len = be16dec(acmd + 7);
1464 if (len > sizeof(buf))
1467 memset(buf, 0, sizeof(buf));
1468 be16enc(buf, 8 - 2);
1472 write_prdt(p, slot, cfis, buf, len);
1473 tfd = ATA_S_READY | ATA_S_DSC;
1475 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1476 ahci_write_fis_d2h(p, slot, cfis, tfd);
1480 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1490 for (i = 0; i < 16; i++)
1491 DPRINTF("%02x ", acmd[i]);
1497 case TEST_UNIT_READY:
1498 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1499 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1502 atapi_inquiry(p, slot, cfis);
1505 atapi_read_capacity(p, slot, cfis);
1509 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1510 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1513 atapi_read_toc(p, slot, cfis);
1516 atapi_report_luns(p, slot, cfis);
1520 atapi_read(p, slot, cfis, 0);
1523 atapi_request_sense(p, slot, cfis);
1525 case START_STOP_UNIT:
1526 atapi_start_stop_unit(p, slot, cfis);
1529 atapi_mode_sense(p, slot, cfis);
1531 case GET_EVENT_STATUS_NOTIFICATION:
1532 atapi_get_event_status_notification(p, slot, cfis);
1535 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1536 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1538 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1539 ATA_S_READY | ATA_S_ERROR);
1545 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1549 case ATA_ATA_IDENTIFY:
1550 handle_identify(p, slot, cfis);
1552 case ATA_SETFEATURES:
1555 case ATA_SF_ENAB_SATA_SF:
1557 case ATA_SATA_SF_AN:
1558 p->tfd = ATA_S_DSC | ATA_S_READY;
1561 p->tfd = ATA_S_ERROR | ATA_S_READY;
1562 p->tfd |= (ATA_ERROR_ABORT << 8);
1566 case ATA_SF_ENAB_WCACHE:
1567 case ATA_SF_DIS_WCACHE:
1568 case ATA_SF_ENAB_RCACHE:
1569 case ATA_SF_DIS_RCACHE:
1570 p->tfd = ATA_S_DSC | ATA_S_READY;
1572 case ATA_SF_SETXFER:
1574 switch (cfis[12] & 0xf8) {
1580 p->xfermode = (cfis[12] & 0x7);
1583 p->tfd = ATA_S_DSC | ATA_S_READY;
1587 p->tfd = ATA_S_ERROR | ATA_S_READY;
1588 p->tfd |= (ATA_ERROR_ABORT << 8);
1591 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1595 if (cfis[12] != 0 &&
1596 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1597 p->tfd = ATA_S_ERROR | ATA_S_READY;
1598 p->tfd |= (ATA_ERROR_ABORT << 8);
1600 p->mult_sectors = cfis[12];
1601 p->tfd = ATA_S_DSC | ATA_S_READY;
1603 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1611 case ATA_READ_MUL48:
1612 case ATA_WRITE_MUL48:
1615 case ATA_READ_DMA48:
1616 case ATA_WRITE_DMA48:
1617 case ATA_READ_FPDMA_QUEUED:
1618 case ATA_WRITE_FPDMA_QUEUED:
1619 ahci_handle_rw(p, slot, cfis, 0);
1621 case ATA_FLUSHCACHE:
1622 case ATA_FLUSHCACHE48:
1623 ahci_handle_flush(p, slot, cfis);
1625 case ATA_DATA_SET_MANAGEMENT:
1626 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1627 cfis[13] == 0 && cfis[12] == 1) {
1628 ahci_handle_dsm_trim(p, slot, cfis, 0);
1631 ahci_write_fis_d2h(p, slot, cfis,
1632 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1634 case ATA_SEND_FPDMA_QUEUED:
1635 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1636 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1637 cfis[11] == 0 && cfis[13] == 1) {
1638 ahci_handle_dsm_trim(p, slot, cfis, 0);
1641 ahci_write_fis_d2h(p, slot, cfis,
1642 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1644 case ATA_READ_LOG_EXT:
1645 case ATA_READ_LOG_DMA_EXT:
1646 ahci_handle_read_log(p, slot, cfis);
1649 ahci_write_fis_d2h(p, slot, cfis,
1650 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1652 case ATA_STANDBY_CMD:
1653 case ATA_STANDBY_IMMEDIATE:
1655 case ATA_IDLE_IMMEDIATE:
1657 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1659 case ATA_ATAPI_IDENTIFY:
1660 handle_atapi_identify(p, slot, cfis);
1662 case ATA_PACKET_CMD:
1664 ahci_write_fis_d2h(p, slot, cfis,
1665 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1667 handle_packet_cmd(p, slot, cfis);
1670 WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
1671 ahci_write_fis_d2h(p, slot, cfis,
1672 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1678 ahci_handle_slot(struct ahci_port *p, int slot)
1680 struct ahci_cmd_hdr *hdr;
1681 struct ahci_prdt_entry *prdt;
1682 struct pci_ahci_softc *sc;
1687 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1688 cfl = (hdr->flags & 0x1f) * 4;
1689 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1690 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1691 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1695 for (i = 0; i < cfl; i++) {
1698 DPRINTF("%02x ", cfis[i]);
1702 for (i = 0; i < hdr->prdtl; i++) {
1703 DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
1708 if (cfis[0] != FIS_TYPE_REGH2D) {
1709 WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
1713 if (cfis[1] & 0x80) {
1714 ahci_handle_cmd(p, slot, cfis);
1716 if (cfis[15] & (1 << 2))
1718 else if (p->reset) {
1722 p->ci &= ~(1 << slot);
1727 ahci_handle_port(struct ahci_port *p)
1731 if (!(p->cmd & AHCI_P_CMD_ST))
1735 * Search for any new commands to issue ignoring those that
1736 * are already in-flight.
1738 for (i = 0; (i < 32) && p->ci; i++) {
1739 if ((p->ci & (1 << i)) && !(p->pending & (1 << i))) {
1740 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1741 p->cmd |= i << AHCI_P_CMD_CCS_SHIFT;
1742 ahci_handle_slot(p, i);
1748 * blockif callback routine - this runs in the context of the blockif
1749 * i/o thread, so the mutex needs to be acquired.
1752 ata_ioreq_cb(struct blockif_req *br, int err)
1754 struct ahci_cmd_hdr *hdr;
1755 struct ahci_ioreq *aior;
1756 struct ahci_port *p;
1757 struct pci_ahci_softc *sc;
1762 DPRINTF("%s %d\n", __func__, err);
1765 aior = br->br_param;
1770 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1772 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1773 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1774 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1776 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1777 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1778 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1781 pthread_mutex_lock(&sc->mtx);
1784 * Delete the blockif request from the busy list
1786 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1789 * Move the blockif request back to the free list
1791 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1794 hdr->prdbc = aior->done;
1796 if (!err && aior->more) {
1798 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1800 ahci_handle_rw(p, slot, cfis, aior->done);
1805 tfd = ATA_S_READY | ATA_S_DSC;
1807 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1809 ahci_write_fis_sdb(p, slot, cfis, tfd);
1811 ahci_write_fis_d2h(p, slot, cfis, tfd);
1814 * This command is now complete.
1816 p->pending &= ~(1 << slot);
1818 ahci_check_stopped(p);
1820 pthread_mutex_unlock(&sc->mtx);
1821 DPRINTF("%s exit\n", __func__);
1825 atapi_ioreq_cb(struct blockif_req *br, int err)
1827 struct ahci_cmd_hdr *hdr;
1828 struct ahci_ioreq *aior;
1829 struct ahci_port *p;
1830 struct pci_ahci_softc *sc;
1835 DPRINTF("%s %d\n", __func__, err);
1837 aior = br->br_param;
1842 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1844 pthread_mutex_lock(&sc->mtx);
1847 * Delete the blockif request from the busy list
1849 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1852 * Move the blockif request back to the free list
1854 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1857 hdr->prdbc = aior->done;
1859 if (!err && aior->more) {
1860 atapi_read(p, slot, cfis, aior->done);
1865 tfd = ATA_S_READY | ATA_S_DSC;
1867 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1869 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1871 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1872 ahci_write_fis_d2h(p, slot, cfis, tfd);
1875 * This command is now complete.
1877 p->pending &= ~(1 << slot);
1879 ahci_check_stopped(p);
1881 pthread_mutex_unlock(&sc->mtx);
1882 DPRINTF("%s exit\n", __func__);
1886 pci_ahci_ioreq_init(struct ahci_port *pr)
1888 struct ahci_ioreq *vr;
1891 pr->ioqsz = blockif_queuesz(pr->bctx);
1892 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
1893 STAILQ_INIT(&pr->iofhd);
1896 * Add all i/o request entries to the free queue
1898 for (i = 0; i < pr->ioqsz; i++) {
1902 vr->io_req.br_callback = ata_ioreq_cb;
1904 vr->io_req.br_callback = atapi_ioreq_cb;
1905 vr->io_req.br_param = vr;
1906 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
1909 TAILQ_INIT(&pr->iobhd);
1913 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
1915 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
1916 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
1917 struct ahci_port *p = &sc->port[port];
1919 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
1920 port, offset, value);
1939 p->ie = value & 0xFDC000FF;
1940 ahci_generate_intr(sc);
1946 if (!(value & AHCI_P_CMD_ST)) {
1951 p->cmd |= AHCI_P_CMD_CR;
1952 clb = (uint64_t)p->clbu << 32 | p->clb;
1953 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
1954 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
1957 if (value & AHCI_P_CMD_FRE) {
1960 p->cmd |= AHCI_P_CMD_FR;
1961 fb = (uint64_t)p->fbu << 32 | p->fb;
1962 /* we don't support FBSCP, so rfis size is 256Bytes */
1963 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
1965 p->cmd &= ~AHCI_P_CMD_FR;
1968 if (value & AHCI_P_CMD_CLO) {
1970 p->cmd &= ~AHCI_P_CMD_CLO;
1973 ahci_handle_port(p);
1979 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
1983 if (!(p->cmd & AHCI_P_CMD_ST)) {
1984 if (value & ATA_SC_DET_RESET)
1996 ahci_handle_port(p);
2006 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2008 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2016 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
2019 if (value & AHCI_GHC_HR)
2021 else if (value & AHCI_GHC_IE) {
2022 sc->ghc |= AHCI_GHC_IE;
2023 ahci_generate_intr(sc);
2028 ahci_generate_intr(sc);
2036 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2037 int baridx, uint64_t offset, int size, uint64_t value)
2039 struct pci_ahci_softc *sc = pi->pi_arg;
2041 assert(baridx == 5);
2044 pthread_mutex_lock(&sc->mtx);
2046 if (offset < AHCI_OFFSET)
2047 pci_ahci_host_write(sc, offset, value);
2048 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2049 pci_ahci_port_write(sc, offset, value);
2051 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
2053 pthread_mutex_unlock(&sc->mtx);
2057 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2073 uint32_t *p = &sc->cap;
2074 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2082 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
2089 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2092 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2093 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2113 uint32_t *p= &sc->port[port].clb;
2114 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2123 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
2124 port, offset, value);
2130 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2131 uint64_t offset, int size)
2133 struct pci_ahci_softc *sc = pi->pi_arg;
2136 assert(baridx == 5);
2139 pthread_mutex_lock(&sc->mtx);
2141 if (offset < AHCI_OFFSET)
2142 value = pci_ahci_host_read(sc, offset);
2143 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2144 value = pci_ahci_port_read(sc, offset);
2147 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n", offset);
2150 pthread_mutex_unlock(&sc->mtx);
2156 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
2158 char bident[sizeof("XX:X:X")];
2159 struct blockif_ctxt *bctxt;
2160 struct pci_ahci_softc *sc;
2168 fprintf(stderr, "pci_ahci: backing device required\n");
2173 dbg = fopen("/tmp/log", "w+");
2176 sc = calloc(1, sizeof(struct pci_ahci_softc));
2179 sc->ports = MAX_PORTS;
2182 * Only use port 0 for a backing device. All other ports will be
2185 sc->port[0].atapi = atapi;
2188 * Attempt to open the backing image. Use the PCI
2189 * slot/func for the identifier string.
2191 snprintf(bident, sizeof(bident), "%d:%d", pi->pi_slot, pi->pi_func);
2192 bctxt = blockif_open(opts, bident);
2193 if (bctxt == NULL) {
2197 sc->port[0].bctx = bctxt;
2198 sc->port[0].pr_sc = sc;
2201 * Create an identifier for the backing file. Use parts of the
2202 * md5 sum of the filename
2205 MD5Update(&mdctx, opts, strlen(opts));
2206 MD5Final(digest, &mdctx);
2207 sprintf(sc->port[0].ident, "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2208 digest[0], digest[1], digest[2], digest[3], digest[4], digest[5]);
2211 * Allocate blockif request structures and add them
2214 pci_ahci_ioreq_init(&sc->port[0]);
2216 pthread_mutex_init(&sc->mtx, NULL);
2218 /* Intel ICH8 AHCI */
2219 slots = sc->port[0].ioqsz;
2223 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2224 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2225 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2226 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2227 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2229 /* Only port 0 implemented */
2232 sc->cap2 = AHCI_CAP2_APST;
2235 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2236 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2237 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2238 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2239 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2240 pci_emul_add_msicap(pi, 1);
2241 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2242 AHCI_OFFSET + sc->ports * AHCI_STEP);
2244 pci_lintr_request(pi);
2248 blockif_close(sc->port[0].bctx);
2256 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2259 return (pci_ahci_init(ctx, pi, opts, 0));
2263 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2266 return (pci_ahci_init(ctx, pi, opts, 1));
2270 * Use separate emulation names to distinguish drive and atapi devices
2272 struct pci_devemu pci_de_ahci_hd = {
2273 .pe_emu = "ahci-hd",
2274 .pe_init = pci_ahci_hd_init,
2275 .pe_barwrite = pci_ahci_write,
2276 .pe_barread = pci_ahci_read
2278 PCI_EMUL_SET(pci_de_ahci_hd);
2280 struct pci_devemu pci_de_ahci_cd = {
2281 .pe_emu = "ahci-cd",
2282 .pe_init = pci_ahci_atapi_init,
2283 .pe_barwrite = pci_ahci_write,
2284 .pe_barread = pci_ahci_read
2286 PCI_EMUL_SET(pci_de_ahci_cd);