2 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
36 #include <sys/ioctl.h>
39 #include <sys/endian.h>
51 #include <pthread_np.h>
59 #define MAX_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
61 #define PxSIG_ATA 0x00000101 /* ATA drive */
62 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
65 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
66 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
67 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
68 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
69 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
70 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
71 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
72 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
78 #define TEST_UNIT_READY 0x00
79 #define REQUEST_SENSE 0x03
81 #define START_STOP_UNIT 0x1B
82 #define PREVENT_ALLOW 0x1E
83 #define READ_CAPACITY 0x25
85 #define POSITION_TO_ELEMENT 0x2B
87 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
88 #define MODE_SENSE_10 0x5A
89 #define REPORT_LUNS 0xA0
94 * SCSI mode page codes
96 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
97 #define MODEPAGE_CD_CAPABILITIES 0x2A
102 #define ATA_SF_ENAB_SATA_SF 0x10
103 #define ATA_SATA_SF_AN 0x05
104 #define ATA_SF_DIS_SATA_SF 0x90
111 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
113 #define DPRINTF(format, arg...)
115 #define WPRINTF(format, arg...) printf(format, ##arg)
118 struct blockif_req io_req;
119 struct ahci_port *io_pr;
120 STAILQ_ENTRY(ahci_ioreq) io_flist;
121 TAILQ_ENTRY(ahci_ioreq) io_blist;
130 struct blockif_ctxt *bctx;
131 struct pci_ahci_softc *pr_sc;
138 uint8_t err_cfis[20];
164 struct ahci_ioreq *ioreq;
166 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
167 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
170 struct ahci_cmd_hdr {
175 uint32_t reserved[4];
178 struct ahci_prdt_entry {
181 #define DBCMASK 0x3fffff
185 struct pci_ahci_softc {
186 struct pci_devinst *asc_pi;
201 struct ahci_port port[MAX_PORTS];
203 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
205 static inline void lba_to_msf(uint8_t *buf, int lba)
208 buf[0] = (lba / 75) / 60;
209 buf[1] = (lba / 75) % 60;
214 * generate HBA intr depending on whether or not ports within
215 * the controller have an interrupt pending.
218 ahci_generate_intr(struct pci_ahci_softc *sc)
220 struct pci_devinst *pi;
225 for (i = 0; i < sc->ports; i++) {
226 struct ahci_port *pr;
232 DPRINTF("%s %x\n", __func__, sc->is);
234 if (sc->is && (sc->ghc & AHCI_GHC_IE)) {
235 if (pci_msi_enabled(pi)) {
237 * Generate an MSI interrupt on every edge
239 pci_generate_msi(pi, 0);
240 } else if (!sc->lintr) {
242 * Only generate a pin-based interrupt if one wasn't
246 pci_lintr_assert(pi);
248 } else if (sc->lintr) {
250 * No interrupts: deassert pin-based signal if it had
253 pci_lintr_deassert(pi);
259 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
261 int offset, len, irq;
263 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
267 case FIS_TYPE_REGD2H:
272 case FIS_TYPE_SETDEVBITS:
277 case FIS_TYPE_PIOSETUP:
283 WPRINTF("unsupported fis type %d\n", ft);
286 memcpy(p->rfis + offset, fis, len);
289 ahci_generate_intr(p->pr_sc);
294 ahci_write_fis_piosetup(struct ahci_port *p)
298 memset(fis, 0, sizeof(fis));
299 fis[0] = FIS_TYPE_PIOSETUP;
300 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
304 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
309 error = (tfd >> 8) & 0xff;
310 memset(fis, 0, sizeof(fis));
311 fis[0] = FIS_TYPE_SETDEVBITS;
315 if (fis[2] & ATA_S_ERROR) {
316 p->is |= AHCI_P_IX_TFE;
317 p->err_cfis[0] = slot;
318 p->err_cfis[2] = tfd & 0x77;
319 p->err_cfis[3] = error;
320 memcpy(&p->err_cfis[4], cfis + 4, 16);
322 *(uint32_t *)(fis + 4) = (1 << slot);
323 p->sact &= ~(1 << slot);
326 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
330 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
335 error = (tfd >> 8) & 0xff;
336 memset(fis, 0, sizeof(fis));
337 fis[0] = FIS_TYPE_REGD2H;
351 if (fis[2] & ATA_S_ERROR) {
352 p->is |= AHCI_P_IX_TFE;
353 p->err_cfis[0] = 0x80;
354 p->err_cfis[2] = tfd & 0xff;
355 p->err_cfis[3] = error;
356 memcpy(&p->err_cfis[4], cfis + 4, 16);
358 p->ci &= ~(1 << slot);
360 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
364 ahci_write_reset_fis_d2h(struct ahci_port *p)
368 memset(fis, 0, sizeof(fis));
369 fis[0] = FIS_TYPE_REGD2H;
377 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
381 ahci_check_stopped(struct ahci_port *p)
384 * If we are no longer processing the command list and nothing
385 * is in-flight, clear the running bit, the current command
386 * slot, the command issue and active bits.
388 if (!(p->cmd & AHCI_P_CMD_ST)) {
389 if (p->pending == 0) {
390 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
398 ahci_port_stop(struct ahci_port *p)
400 struct ahci_ioreq *aior;
406 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
408 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
410 * Try to cancel the outstanding blockif request.
412 error = blockif_cancel(p->bctx, &aior->io_req);
418 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
419 cfis[2] == ATA_READ_FPDMA_QUEUED)
423 p->sact &= ~(1 << slot);
425 p->ci &= ~(1 << slot);
428 * This command is now done.
430 p->pending &= ~(1 << slot);
433 * Delete the blockif request from the busy list
435 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
438 * Move the blockif request back to the free list
440 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
443 ahci_check_stopped(p);
447 ahci_port_reset(struct ahci_port *pr)
451 pr->xfermode = ATA_UDMA6;
452 pr->mult_sectors = 128;
455 pr->ssts = ATA_SS_DET_NO_DEVICE;
456 pr->sig = 0xFFFFFFFF;
460 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
461 if (pr->sctl & ATA_SC_SPD_MASK)
462 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
464 pr->ssts |= ATA_SS_SPD_GEN3;
465 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
468 pr->tfd |= ATA_S_READY;
470 pr->sig = PxSIG_ATAPI;
471 ahci_write_reset_fis_d2h(pr);
475 ahci_reset(struct pci_ahci_softc *sc)
479 sc->ghc = AHCI_GHC_AE;
483 pci_lintr_deassert(sc->asc_pi);
487 for (i = 0; i < sc->ports; i++) {
490 sc->port[i].sctl = 0;
491 ahci_port_reset(&sc->port[i]);
496 ata_string(uint8_t *dest, const char *src, int len)
500 for (i = 0; i < len; i++) {
502 dest[i ^ 1] = *src++;
509 atapi_string(uint8_t *dest, const char *src, int len)
513 for (i = 0; i < len; i++) {
522 ahci_handle_dma(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done,
525 struct ahci_ioreq *aior;
526 struct blockif_req *breq;
527 struct pci_ahci_softc *sc;
528 struct ahci_prdt_entry *prdt;
529 struct ahci_cmd_hdr *hdr;
532 int i, err, iovcnt, ncq, readop;
535 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
536 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
541 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
542 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
543 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
544 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
547 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
548 cfis[2] == ATA_READ_FPDMA_QUEUED) {
549 lba = ((uint64_t)cfis[10] << 40) |
550 ((uint64_t)cfis[9] << 32) |
551 ((uint64_t)cfis[8] << 24) |
552 ((uint64_t)cfis[6] << 16) |
553 ((uint64_t)cfis[5] << 8) |
555 len = cfis[11] << 8 | cfis[3];
559 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
560 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
561 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
562 lba = ((uint64_t)cfis[10] << 40) |
563 ((uint64_t)cfis[9] << 32) |
564 ((uint64_t)cfis[8] << 24) |
565 ((uint64_t)cfis[6] << 16) |
566 ((uint64_t)cfis[5] << 8) |
568 len = cfis[13] << 8 | cfis[12];
572 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
573 (cfis[5] << 8) | cfis[4];
578 lba *= blockif_sectsz(p->bctx);
579 len *= blockif_sectsz(p->bctx);
582 * Pull request off free list
584 aior = STAILQ_FIRST(&p->iofhd);
585 assert(aior != NULL);
586 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
591 breq = &aior->io_req;
592 breq->br_offset = lba + done;
593 iovcnt = hdr->prdtl - seek;
594 if (iovcnt > BLOCKIF_IOV_MAX) {
595 aior->prdtl = iovcnt - BLOCKIF_IOV_MAX;
596 iovcnt = BLOCKIF_IOV_MAX;
599 breq->br_iovcnt = iovcnt;
602 * Mark this command in-flight.
604 p->pending |= 1 << slot;
607 * Stuff request onto busy list
609 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
612 * Build up the iovec based on the prdt
614 for (i = 0; i < iovcnt; i++) {
617 dbcsz = (prdt->dbc & DBCMASK) + 1;
618 breq->br_iov[i].iov_base = paddr_guest2host(ahci_ctx(sc),
620 breq->br_iov[i].iov_len = dbcsz;
625 err = blockif_read(p->bctx, breq);
627 err = blockif_write(p->bctx, breq);
631 p->ci &= ~(1 << slot);
635 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
637 struct ahci_ioreq *aior;
638 struct blockif_req *breq;
642 * Pull request off free list
644 aior = STAILQ_FIRST(&p->iofhd);
645 assert(aior != NULL);
646 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
652 breq = &aior->io_req;
655 * Mark this command in-flight.
657 p->pending |= 1 << slot;
660 * Stuff request onto busy list
662 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
664 err = blockif_flush(p->bctx, breq);
669 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
672 struct ahci_cmd_hdr *hdr;
673 struct ahci_prdt_entry *prdt;
677 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
680 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
681 for (i = 0; i < hdr->prdtl && len; i++) {
686 dbcsz = (prdt->dbc & DBCMASK) + 1;
687 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
688 sublen = len < dbcsz ? len : dbcsz;
689 memcpy(to, ptr, sublen);
697 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
699 struct ahci_ioreq *aior;
700 struct blockif_req *breq;
707 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
708 len = (uint16_t)cfis[13] << 8 | cfis[12];
710 } else { /* ATA_SEND_FPDMA_QUEUED */
711 len = (uint16_t)cfis[11] << 8 | cfis[3];
714 read_prdt(p, slot, cfis, buf, sizeof(buf));
718 elba = ((uint64_t)entry[5] << 40) |
719 ((uint64_t)entry[4] << 32) |
720 ((uint64_t)entry[3] << 24) |
721 ((uint64_t)entry[2] << 16) |
722 ((uint64_t)entry[1] << 8) |
724 elen = (uint16_t)entry[7] << 8 | entry[6];
728 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
729 p->pending &= ~(1 << slot);
730 ahci_check_stopped(p);
737 * Pull request off free list
739 aior = STAILQ_FIRST(&p->iofhd);
740 assert(aior != NULL);
741 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
748 breq = &aior->io_req;
749 breq->br_offset = elba * blockif_sectsz(p->bctx);
751 breq->br_iov[0].iov_len = elen * blockif_sectsz(p->bctx);
754 * Mark this command in-flight.
756 p->pending |= 1 << slot;
759 * Stuff request onto busy list
761 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
763 err = blockif_delete(p->bctx, breq);
768 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
771 struct ahci_cmd_hdr *hdr;
772 struct ahci_prdt_entry *prdt;
776 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
779 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
780 for (i = 0; i < hdr->prdtl && len; i++) {
785 dbcsz = (prdt->dbc & DBCMASK) + 1;
786 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
787 sublen = len < dbcsz ? len : dbcsz;
788 memcpy(ptr, from, sublen);
793 hdr->prdbc = size - len;
797 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
799 struct ahci_cmd_hdr *hdr;
802 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
803 if (p->atapi || hdr->prdtl == 0 || cfis[4] != 0x10 ||
804 cfis[5] != 0 || cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
805 ahci_write_fis_d2h(p, slot, cfis,
806 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
810 memset(buf, 0, sizeof(buf));
811 memcpy(buf, p->err_cfis, sizeof(p->err_cfis));
813 if (cfis[2] == ATA_READ_LOG_EXT)
814 ahci_write_fis_piosetup(p);
815 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
816 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
820 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
822 struct ahci_cmd_hdr *hdr;
824 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
825 if (p->atapi || hdr->prdtl == 0) {
826 ahci_write_fis_d2h(p, slot, cfis,
827 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
831 int sectsz, psectsz, psectoff, candelete, ro;
835 ro = blockif_is_ro(p->bctx);
836 candelete = blockif_candelete(p->bctx);
837 sectsz = blockif_sectsz(p->bctx);
838 sectors = blockif_size(p->bctx) / sectsz;
839 blockif_chs(p->bctx, &cyl, &heads, &sech);
840 blockif_psectsz(p->bctx, &psectsz, &psectoff);
841 memset(buf, 0, sizeof(buf));
846 /* TODO emulate different serial? */
847 ata_string((uint8_t *)(buf+10), "123456", 20);
848 ata_string((uint8_t *)(buf+23), "001", 8);
849 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
850 buf[47] = (0x8000 | 128);
852 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
854 buf[53] = (1 << 1 | 1 << 2);
856 buf[59] = (0x100 | p->mult_sectors);
857 if (sectors <= 0x0fffffff) {
859 buf[61] = (sectors >> 16);
865 if (p->xfermode & ATA_WDMA0)
866 buf[63] |= (1 << ((p->xfermode & 7) + 8));
874 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
876 buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
877 (p->ssts & ATA_SS_SPD_MASK) >> 3);
880 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
881 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
882 buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
883 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
885 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
886 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
887 buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
888 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
891 if (p->xfermode & ATA_UDMA0)
892 buf[88] |= (1 << ((p->xfermode & 7) + 8));
893 buf[93] = (1 | 1 <<14);
895 buf[101] = (sectors >> 16);
896 buf[102] = (sectors >> 32);
897 buf[103] = (sectors >> 48);
898 if (candelete && !ro) {
899 buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
901 buf[169] = ATA_SUPPORT_DSM_TRIM;
905 if (psectsz > sectsz) {
907 buf[106] |= ffsl(psectsz / sectsz) - 1;
908 buf[209] |= (psectoff / sectsz);
912 buf[117] = sectsz / 2;
913 buf[118] = ((sectsz / 2) >> 16);
915 buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
916 buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
918 ahci_write_fis_piosetup(p);
919 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
920 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
925 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
928 ahci_write_fis_d2h(p, slot, cfis,
929 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
933 memset(buf, 0, sizeof(buf));
934 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
935 /* TODO emulate different serial? */
936 ata_string((uint8_t *)(buf+10), "123456", 20);
937 ata_string((uint8_t *)(buf+23), "001", 8);
938 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
939 buf[49] = (1 << 9 | 1 << 8);
940 buf[50] = (1 << 14 | 1);
941 buf[53] = (1 << 2 | 1 << 1);
944 if (p->xfermode & ATA_WDMA0)
945 buf[63] |= (1 << ((p->xfermode & 7) + 8));
951 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
952 buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
955 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
956 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
959 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
960 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
963 if (p->xfermode & ATA_UDMA0)
964 buf[88] |= (1 << ((p->xfermode & 7) + 8));
966 ahci_write_fis_piosetup(p);
967 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
968 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
973 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
982 if (acmd[1] & 1) { /* VPD */
983 if (acmd[2] == 0) { /* Supported VPD pages */
991 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
993 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
994 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
995 ahci_write_fis_d2h(p, slot, cfis, tfd);
1007 atapi_string(buf + 8, "BHYVE", 8);
1008 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1009 atapi_string(buf + 32, "001", 4);
1015 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1016 write_prdt(p, slot, cfis, buf, len);
1017 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1021 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1026 sectors = blockif_size(p->bctx) / 2048;
1027 be32enc(buf, sectors - 1);
1028 be32enc(buf + 4, 2048);
1029 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1030 write_prdt(p, slot, cfis, buf, sizeof(buf));
1031 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1035 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1043 len = be16dec(acmd + 7);
1044 format = acmd[9] >> 6;
1050 uint8_t start_track, buf[20], *bp;
1052 msf = (acmd[1] >> 1) & 1;
1053 start_track = acmd[6];
1054 if (start_track > 1 && start_track != 0xaa) {
1056 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1058 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1059 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1060 ahci_write_fis_d2h(p, slot, cfis, tfd);
1066 if (start_track <= 1) {
1086 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1090 lba_to_msf(bp, sectors);
1093 be32enc(bp, sectors);
1097 be16enc(buf, size - 2);
1100 write_prdt(p, slot, cfis, buf, len);
1101 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1102 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1109 memset(buf, 0, sizeof(buf));
1113 if (len > sizeof(buf))
1115 write_prdt(p, slot, cfis, buf, len);
1116 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1117 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1124 uint8_t start_track, *bp, buf[50];
1126 msf = (acmd[1] >> 1) & 1;
1127 start_track = acmd[6];
1163 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1167 lba_to_msf(bp, sectors);
1170 be32enc(bp, sectors);
1193 be16enc(buf, size - 2);
1196 write_prdt(p, slot, cfis, buf, len);
1197 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1198 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1205 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1207 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1208 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1209 ahci_write_fis_d2h(p, slot, cfis, tfd);
1216 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1220 memset(buf, 0, sizeof(buf));
1223 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1224 write_prdt(p, slot, cfis, buf, sizeof(buf));
1225 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1229 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis,
1230 uint32_t done, int seek)
1232 struct ahci_ioreq *aior;
1233 struct ahci_cmd_hdr *hdr;
1234 struct ahci_prdt_entry *prdt;
1235 struct blockif_req *breq;
1236 struct pci_ahci_softc *sc;
1244 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1245 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1248 lba = be32dec(acmd + 2);
1249 if (acmd[0] == READ_10)
1250 len = be16dec(acmd + 7);
1252 len = be32dec(acmd + 6);
1254 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1255 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1261 * Pull request off free list
1263 aior = STAILQ_FIRST(&p->iofhd);
1264 assert(aior != NULL);
1265 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1270 breq = &aior->io_req;
1271 breq->br_offset = lba + done;
1272 iovcnt = hdr->prdtl - seek;
1273 if (iovcnt > BLOCKIF_IOV_MAX) {
1274 aior->prdtl = iovcnt - BLOCKIF_IOV_MAX;
1275 iovcnt = BLOCKIF_IOV_MAX;
1278 breq->br_iovcnt = iovcnt;
1281 * Mark this command in-flight.
1283 p->pending |= 1 << slot;
1286 * Stuff request onto busy list
1288 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1291 * Build up the iovec based on the prdt
1293 for (i = 0; i < iovcnt; i++) {
1296 dbcsz = (prdt->dbc & DBCMASK) + 1;
1297 breq->br_iov[i].iov_base = paddr_guest2host(ahci_ctx(sc),
1299 breq->br_iov[i].iov_len = dbcsz;
1300 aior->done += dbcsz;
1303 err = blockif_read(p->bctx, breq);
1308 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1316 if (len > sizeof(buf))
1318 memset(buf, 0, len);
1319 buf[0] = 0x70 | (1 << 7);
1320 buf[2] = p->sense_key;
1323 write_prdt(p, slot, cfis, buf, len);
1324 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1325 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1329 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1331 uint8_t *acmd = cfis + 0x40;
1334 switch (acmd[4] & 3) {
1338 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1339 tfd = ATA_S_READY | ATA_S_DSC;
1342 /* TODO eject media */
1343 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1344 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1346 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1349 ahci_write_fis_d2h(p, slot, cfis, tfd);
1353 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1361 len = be16dec(acmd + 7);
1363 code = acmd[2] & 0x3f;
1368 case MODEPAGE_RW_ERROR_RECOVERY:
1372 if (len > sizeof(buf))
1375 memset(buf, 0, sizeof(buf));
1376 be16enc(buf, 16 - 2);
1381 write_prdt(p, slot, cfis, buf, len);
1382 tfd = ATA_S_READY | ATA_S_DSC;
1385 case MODEPAGE_CD_CAPABILITIES:
1389 if (len > sizeof(buf))
1392 memset(buf, 0, sizeof(buf));
1393 be16enc(buf, 30 - 2);
1399 be16enc(&buf[18], 2);
1400 be16enc(&buf[20], 512);
1401 write_prdt(p, slot, cfis, buf, len);
1402 tfd = ATA_S_READY | ATA_S_DSC;
1411 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1413 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1418 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1420 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1423 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1424 ahci_write_fis_d2h(p, slot, cfis, tfd);
1428 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1436 /* we don't support asynchronous operation */
1437 if (!(acmd[1] & 1)) {
1438 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1440 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1445 len = be16dec(acmd + 7);
1446 if (len > sizeof(buf))
1449 memset(buf, 0, sizeof(buf));
1450 be16enc(buf, 8 - 2);
1454 write_prdt(p, slot, cfis, buf, len);
1455 tfd = ATA_S_READY | ATA_S_DSC;
1457 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1458 ahci_write_fis_d2h(p, slot, cfis, tfd);
1462 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1472 for (i = 0; i < 16; i++)
1473 DPRINTF("%02x ", acmd[i]);
1479 case TEST_UNIT_READY:
1480 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1481 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1484 atapi_inquiry(p, slot, cfis);
1487 atapi_read_capacity(p, slot, cfis);
1491 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1492 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1495 atapi_read_toc(p, slot, cfis);
1498 atapi_report_luns(p, slot, cfis);
1502 atapi_read(p, slot, cfis, 0, 0);
1505 atapi_request_sense(p, slot, cfis);
1507 case START_STOP_UNIT:
1508 atapi_start_stop_unit(p, slot, cfis);
1511 atapi_mode_sense(p, slot, cfis);
1513 case GET_EVENT_STATUS_NOTIFICATION:
1514 atapi_get_event_status_notification(p, slot, cfis);
1517 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1518 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1520 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1521 ATA_S_READY | ATA_S_ERROR);
1527 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1531 case ATA_ATA_IDENTIFY:
1532 handle_identify(p, slot, cfis);
1534 case ATA_SETFEATURES:
1537 case ATA_SF_ENAB_SATA_SF:
1539 case ATA_SATA_SF_AN:
1540 p->tfd = ATA_S_DSC | ATA_S_READY;
1543 p->tfd = ATA_S_ERROR | ATA_S_READY;
1544 p->tfd |= (ATA_ERROR_ABORT << 8);
1548 case ATA_SF_ENAB_WCACHE:
1549 case ATA_SF_DIS_WCACHE:
1550 case ATA_SF_ENAB_RCACHE:
1551 case ATA_SF_DIS_RCACHE:
1552 p->tfd = ATA_S_DSC | ATA_S_READY;
1554 case ATA_SF_SETXFER:
1556 switch (cfis[12] & 0xf8) {
1562 p->xfermode = (cfis[12] & 0x7);
1565 p->tfd = ATA_S_DSC | ATA_S_READY;
1569 p->tfd = ATA_S_ERROR | ATA_S_READY;
1570 p->tfd |= (ATA_ERROR_ABORT << 8);
1573 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1577 if (cfis[12] != 0 &&
1578 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1579 p->tfd = ATA_S_ERROR | ATA_S_READY;
1580 p->tfd |= (ATA_ERROR_ABORT << 8);
1582 p->mult_sectors = cfis[12];
1583 p->tfd = ATA_S_DSC | ATA_S_READY;
1585 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1593 case ATA_READ_MUL48:
1594 case ATA_WRITE_MUL48:
1597 case ATA_READ_DMA48:
1598 case ATA_WRITE_DMA48:
1599 case ATA_READ_FPDMA_QUEUED:
1600 case ATA_WRITE_FPDMA_QUEUED:
1601 ahci_handle_dma(p, slot, cfis, 0, 0);
1603 case ATA_FLUSHCACHE:
1604 case ATA_FLUSHCACHE48:
1605 ahci_handle_flush(p, slot, cfis);
1607 case ATA_DATA_SET_MANAGEMENT:
1608 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1609 cfis[13] == 0 && cfis[12] == 1) {
1610 ahci_handle_dsm_trim(p, slot, cfis, 0);
1613 ahci_write_fis_d2h(p, slot, cfis,
1614 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1616 case ATA_SEND_FPDMA_QUEUED:
1617 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1618 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1619 cfis[11] == 0 && cfis[13] == 1) {
1620 ahci_handle_dsm_trim(p, slot, cfis, 0);
1623 ahci_write_fis_d2h(p, slot, cfis,
1624 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1626 case ATA_READ_LOG_EXT:
1627 case ATA_READ_LOG_DMA_EXT:
1628 ahci_handle_read_log(p, slot, cfis);
1631 ahci_write_fis_d2h(p, slot, cfis,
1632 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1634 case ATA_STANDBY_CMD:
1635 case ATA_STANDBY_IMMEDIATE:
1637 case ATA_IDLE_IMMEDIATE:
1639 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1641 case ATA_ATAPI_IDENTIFY:
1642 handle_atapi_identify(p, slot, cfis);
1644 case ATA_PACKET_CMD:
1646 ahci_write_fis_d2h(p, slot, cfis,
1647 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1649 handle_packet_cmd(p, slot, cfis);
1652 WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
1653 ahci_write_fis_d2h(p, slot, cfis,
1654 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1660 ahci_handle_slot(struct ahci_port *p, int slot)
1662 struct ahci_cmd_hdr *hdr;
1663 struct ahci_prdt_entry *prdt;
1664 struct pci_ahci_softc *sc;
1669 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1670 cfl = (hdr->flags & 0x1f) * 4;
1671 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1672 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1673 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1677 for (i = 0; i < cfl; i++) {
1680 DPRINTF("%02x ", cfis[i]);
1684 for (i = 0; i < hdr->prdtl; i++) {
1685 DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
1690 if (cfis[0] != FIS_TYPE_REGH2D) {
1691 WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
1695 if (cfis[1] & 0x80) {
1696 ahci_handle_cmd(p, slot, cfis);
1698 if (cfis[15] & (1 << 2))
1700 else if (p->reset) {
1704 p->ci &= ~(1 << slot);
1709 ahci_handle_port(struct ahci_port *p)
1713 if (!(p->cmd & AHCI_P_CMD_ST))
1717 * Search for any new commands to issue ignoring those that
1718 * are already in-flight.
1720 for (i = 0; (i < 32) && p->ci; i++) {
1721 if ((p->ci & (1 << i)) && !(p->pending & (1 << i))) {
1722 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1723 p->cmd |= i << AHCI_P_CMD_CCS_SHIFT;
1724 ahci_handle_slot(p, i);
1730 * blockif callback routine - this runs in the context of the blockif
1731 * i/o thread, so the mutex needs to be acquired.
1734 ata_ioreq_cb(struct blockif_req *br, int err)
1736 struct ahci_cmd_hdr *hdr;
1737 struct ahci_ioreq *aior;
1738 struct ahci_port *p;
1739 struct pci_ahci_softc *sc;
1742 int pending, slot, ncq, dsm;
1744 DPRINTF("%s %d\n", __func__, err);
1747 aior = br->br_param;
1751 pending = aior->prdtl;
1753 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1755 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1756 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1757 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1759 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1760 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1761 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1764 pthread_mutex_lock(&sc->mtx);
1767 * Delete the blockif request from the busy list
1769 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1772 * Move the blockif request back to the free list
1774 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1777 hdr->prdbc = aior->done;
1780 if (aior->done != aior->len && !err) {
1781 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1785 if (pending && !err) {
1786 ahci_handle_dma(p, slot, cfis, aior->done,
1787 hdr->prdtl - pending);
1792 if (!err && aior->done == aior->len) {
1793 tfd = ATA_S_READY | ATA_S_DSC;
1795 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1799 ahci_write_fis_sdb(p, slot, cfis, tfd);
1801 ahci_write_fis_d2h(p, slot, cfis, tfd);
1804 * This command is now complete.
1806 p->pending &= ~(1 << slot);
1808 ahci_check_stopped(p);
1810 pthread_mutex_unlock(&sc->mtx);
1811 DPRINTF("%s exit\n", __func__);
1815 atapi_ioreq_cb(struct blockif_req *br, int err)
1817 struct ahci_cmd_hdr *hdr;
1818 struct ahci_ioreq *aior;
1819 struct ahci_port *p;
1820 struct pci_ahci_softc *sc;
1825 DPRINTF("%s %d\n", __func__, err);
1827 aior = br->br_param;
1831 pending = aior->prdtl;
1833 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1835 pthread_mutex_lock(&sc->mtx);
1838 * Delete the blockif request from the busy list
1840 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1843 * Move the blockif request back to the free list
1845 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1848 hdr->prdbc = aior->done;
1850 if (pending && !err) {
1851 atapi_read(p, slot, cfis, aior->done, hdr->prdtl - pending);
1855 if (!err && aior->done == aior->len) {
1856 tfd = ATA_S_READY | ATA_S_DSC;
1858 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1860 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1863 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1864 ahci_write_fis_d2h(p, slot, cfis, tfd);
1867 * This command is now complete.
1869 p->pending &= ~(1 << slot);
1871 ahci_check_stopped(p);
1873 pthread_mutex_unlock(&sc->mtx);
1874 DPRINTF("%s exit\n", __func__);
1878 pci_ahci_ioreq_init(struct ahci_port *pr)
1880 struct ahci_ioreq *vr;
1883 pr->ioqsz = blockif_queuesz(pr->bctx);
1884 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
1885 STAILQ_INIT(&pr->iofhd);
1888 * Add all i/o request entries to the free queue
1890 for (i = 0; i < pr->ioqsz; i++) {
1894 vr->io_req.br_callback = ata_ioreq_cb;
1896 vr->io_req.br_callback = atapi_ioreq_cb;
1897 vr->io_req.br_param = vr;
1898 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
1901 TAILQ_INIT(&pr->iobhd);
1905 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
1907 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
1908 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
1909 struct ahci_port *p = &sc->port[port];
1911 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
1912 port, offset, value);
1931 p->ie = value & 0xFDC000FF;
1932 ahci_generate_intr(sc);
1938 if (!(value & AHCI_P_CMD_ST)) {
1943 p->cmd |= AHCI_P_CMD_CR;
1944 clb = (uint64_t)p->clbu << 32 | p->clb;
1945 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
1946 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
1949 if (value & AHCI_P_CMD_FRE) {
1952 p->cmd |= AHCI_P_CMD_FR;
1953 fb = (uint64_t)p->fbu << 32 | p->fb;
1954 /* we don't support FBSCP, so rfis size is 256Bytes */
1955 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
1957 p->cmd &= ~AHCI_P_CMD_FR;
1960 if (value & AHCI_P_CMD_CLO) {
1962 p->cmd &= ~AHCI_P_CMD_CLO;
1965 ahci_handle_port(p);
1971 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
1975 if (!(p->cmd & AHCI_P_CMD_ST)) {
1976 if (value & ATA_SC_DET_RESET)
1988 ahci_handle_port(p);
1998 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2000 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2008 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
2011 if (value & AHCI_GHC_HR)
2013 else if (value & AHCI_GHC_IE) {
2014 sc->ghc |= AHCI_GHC_IE;
2015 ahci_generate_intr(sc);
2020 ahci_generate_intr(sc);
2028 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2029 int baridx, uint64_t offset, int size, uint64_t value)
2031 struct pci_ahci_softc *sc = pi->pi_arg;
2033 assert(baridx == 5);
2036 pthread_mutex_lock(&sc->mtx);
2038 if (offset < AHCI_OFFSET)
2039 pci_ahci_host_write(sc, offset, value);
2040 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2041 pci_ahci_port_write(sc, offset, value);
2043 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
2045 pthread_mutex_unlock(&sc->mtx);
2049 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2065 uint32_t *p = &sc->cap;
2066 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2074 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
2081 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2084 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2085 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2105 uint32_t *p= &sc->port[port].clb;
2106 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2115 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
2116 port, offset, value);
2122 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2123 uint64_t offset, int size)
2125 struct pci_ahci_softc *sc = pi->pi_arg;
2128 assert(baridx == 5);
2131 pthread_mutex_lock(&sc->mtx);
2133 if (offset < AHCI_OFFSET)
2134 value = pci_ahci_host_read(sc, offset);
2135 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2136 value = pci_ahci_port_read(sc, offset);
2139 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n", offset);
2142 pthread_mutex_unlock(&sc->mtx);
2148 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
2150 char bident[sizeof("XX:X:X")];
2151 struct blockif_ctxt *bctxt;
2152 struct pci_ahci_softc *sc;
2158 fprintf(stderr, "pci_ahci: backing device required\n");
2163 dbg = fopen("/tmp/log", "w+");
2166 sc = calloc(1, sizeof(struct pci_ahci_softc));
2169 sc->ports = MAX_PORTS;
2172 * Only use port 0 for a backing device. All other ports will be
2175 sc->port[0].atapi = atapi;
2178 * Attempt to open the backing image. Use the PCI
2179 * slot/func for the identifier string.
2181 snprintf(bident, sizeof(bident), "%d:%d", pi->pi_slot, pi->pi_func);
2182 bctxt = blockif_open(opts, bident);
2183 if (bctxt == NULL) {
2187 sc->port[0].bctx = bctxt;
2188 sc->port[0].pr_sc = sc;
2191 * Allocate blockif request structures and add them
2194 pci_ahci_ioreq_init(&sc->port[0]);
2196 pthread_mutex_init(&sc->mtx, NULL);
2198 /* Intel ICH8 AHCI */
2199 slots = sc->port[0].ioqsz;
2203 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2204 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2205 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2206 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2207 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2209 /* Only port 0 implemented */
2212 sc->cap2 = AHCI_CAP2_APST;
2215 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2216 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2217 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2218 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2219 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2220 pci_emul_add_msicap(pi, 1);
2221 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2222 AHCI_OFFSET + sc->ports * AHCI_STEP);
2224 pci_lintr_request(pi);
2228 blockif_close(sc->port[0].bctx);
2236 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2239 return (pci_ahci_init(ctx, pi, opts, 0));
2243 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2246 return (pci_ahci_init(ctx, pi, opts, 1));
2250 * Use separate emulation names to distinguish drive and atapi devices
2252 struct pci_devemu pci_de_ahci_hd = {
2253 .pe_emu = "ahci-hd",
2254 .pe_init = pci_ahci_hd_init,
2255 .pe_barwrite = pci_ahci_write,
2256 .pe_barread = pci_ahci_read
2258 PCI_EMUL_SET(pci_de_ahci_hd);
2260 struct pci_devemu pci_de_ahci_cd = {
2261 .pe_emu = "ahci-cd",
2262 .pe_init = pci_ahci_atapi_init,
2263 .pe_barwrite = pci_ahci_write,
2264 .pe_barread = pci_ahci_read
2266 PCI_EMUL_SET(pci_de_ahci_cd);