2 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/linker_set.h>
36 #include <sys/ioctl.h>
39 #include <sys/endian.h>
58 #define MAX_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
60 #define PxSIG_ATA 0x00000101 /* ATA drive */
61 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
64 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
65 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
66 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
67 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
68 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
69 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
70 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
71 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
77 #define TEST_UNIT_READY 0x00
78 #define REQUEST_SENSE 0x03
80 #define START_STOP_UNIT 0x1B
81 #define PREVENT_ALLOW 0x1E
82 #define READ_CAPACITY 0x25
84 #define POSITION_TO_ELEMENT 0x2B
86 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
87 #define MODE_SENSE_10 0x5A
92 * SCSI mode page codes
94 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
95 #define MODEPAGE_CD_CAPABILITIES 0x2A
102 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
104 #define DPRINTF(format, arg...)
106 #define WPRINTF(format, arg...) printf(format, ##arg)
109 struct blockif_req io_req;
110 struct ahci_port *io_pr;
111 STAILQ_ENTRY(ahci_ioreq) io_list;
120 struct blockif_ctxt *bctx;
121 struct pci_ahci_softc *pr_sc;
152 struct ahci_ioreq *ioreq;
154 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
157 struct ahci_cmd_hdr {
162 uint32_t reserved[4];
165 struct ahci_prdt_entry {
168 #define DBCMASK 0x3fffff
172 struct pci_ahci_softc {
173 struct pci_devinst *asc_pi;
187 struct ahci_port port[MAX_PORTS];
189 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
191 static inline void lba_to_msf(uint8_t *buf, int lba)
194 buf[0] = (lba / 75) / 60;
195 buf[1] = (lba / 75) % 60;
200 * generate HBA intr depending on whether or not ports within
201 * the controller have an interrupt pending.
204 ahci_generate_intr(struct pci_ahci_softc *sc)
208 for (i = 0; i < sc->ports; i++) {
209 struct ahci_port *pr;
215 DPRINTF("%s %x\n", __func__, sc->is);
217 if (sc->is && (sc->ghc & AHCI_GHC_IE))
218 pci_generate_msi(sc->asc_pi, 0);
222 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
224 int offset, len, irq;
226 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
230 case FIS_TYPE_REGD2H:
235 case FIS_TYPE_SETDEVBITS:
240 case FIS_TYPE_PIOSETUP:
246 WPRINTF("unsupported fis type %d\n", ft);
249 memcpy(p->rfis + offset, fis, len);
252 ahci_generate_intr(p->pr_sc);
257 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint32_t tfd)
262 error = (tfd >> 8) & 0xff;
263 memset(fis, 0, sizeof(fis));
266 *(uint32_t *)(fis + 4) = (1 << slot);
267 if (fis[2] & ATA_S_ERROR)
268 p->is |= AHCI_P_IX_TFE;
270 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
274 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
279 error = (tfd >> 8) & 0xff;
280 memset(fis, 0, sizeof(fis));
281 fis[0] = FIS_TYPE_REGD2H;
295 if (fis[2] & ATA_S_ERROR)
296 p->is |= AHCI_P_IX_TFE;
298 p->ci &= ~(1 << slot);
299 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
303 ahci_write_reset_fis_d2h(struct ahci_port *p)
307 memset(fis, 0, sizeof(fis));
308 fis[0] = FIS_TYPE_REGD2H;
316 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
320 ahci_port_reset(struct ahci_port *pr)
325 pr->xfermode = ATA_UDMA6;
326 pr->mult_sectors = 128;
329 pr->ssts = ATA_SS_DET_NO_DEVICE;
330 pr->sig = 0xFFFFFFFF;
334 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_SPD_GEN2 |
336 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
339 pr->tfd |= ATA_S_READY;
341 pr->sig = PxSIG_ATAPI;
342 ahci_write_reset_fis_d2h(pr);
346 ahci_reset(struct pci_ahci_softc *sc)
350 sc->ghc = AHCI_GHC_AE;
352 for (i = 0; i < sc->ports; i++) {
355 ahci_port_reset(&sc->port[i]);
360 ata_string(uint8_t *dest, const char *src, int len)
364 for (i = 0; i < len; i++) {
366 dest[i ^ 1] = *src++;
373 atapi_string(uint8_t *dest, const char *src, int len)
377 for (i = 0; i < len; i++) {
386 ahci_handle_dma(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done,
389 struct ahci_ioreq *aior;
390 struct blockif_req *breq;
391 struct pci_ahci_softc *sc;
392 struct ahci_prdt_entry *prdt;
393 struct ahci_cmd_hdr *hdr;
396 int i, err, iovcnt, ncq, readop;
399 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
400 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
405 if (cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
406 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
409 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
410 cfis[2] == ATA_READ_FPDMA_QUEUED) {
411 lba = ((uint64_t)cfis[10] << 40) |
412 ((uint64_t)cfis[9] << 32) |
413 ((uint64_t)cfis[8] << 24) |
414 ((uint64_t)cfis[6] << 16) |
415 ((uint64_t)cfis[5] << 8) |
417 len = cfis[11] << 8 | cfis[3];
421 } else if (cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
422 lba = ((uint64_t)cfis[10] << 40) |
423 ((uint64_t)cfis[9] << 32) |
424 ((uint64_t)cfis[8] << 24) |
425 ((uint64_t)cfis[6] << 16) |
426 ((uint64_t)cfis[5] << 8) |
428 len = cfis[13] << 8 | cfis[12];
432 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
433 (cfis[5] << 8) | cfis[4];
438 lba *= blockif_sectsz(p->bctx);
439 len *= blockif_sectsz(p->bctx);
442 * Pull request off free list
444 aior = STAILQ_FIRST(&p->iofhd);
445 assert(aior != NULL);
446 STAILQ_REMOVE_HEAD(&p->iofhd, io_list);
451 breq = &aior->io_req;
452 breq->br_offset = lba + done;
453 iovcnt = hdr->prdtl - seek;
454 if (iovcnt > BLOCKIF_IOV_MAX) {
455 aior->prdtl = iovcnt - BLOCKIF_IOV_MAX;
456 iovcnt = BLOCKIF_IOV_MAX;
459 breq->br_iovcnt = iovcnt;
462 * Build up the iovec based on the prdt
464 for (i = 0; i < iovcnt; i++) {
467 dbcsz = (prdt->dbc & DBCMASK) + 1;
468 breq->br_iov[i].iov_base = paddr_guest2host(ahci_ctx(sc),
470 breq->br_iov[i].iov_len = dbcsz;
475 err = blockif_read(p->bctx, breq);
477 err = blockif_write(p->bctx, breq);
480 if (!aior->prdtl && ncq)
481 p->ci &= ~(1 << slot);
485 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
487 struct ahci_ioreq *aior;
488 struct blockif_req *breq;
492 * Pull request off free list
494 aior = STAILQ_FIRST(&p->iofhd);
495 assert(aior != NULL);
496 STAILQ_REMOVE_HEAD(&p->iofhd, io_list);
500 breq = &aior->io_req;
502 err = blockif_flush(p->bctx, breq);
507 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
510 struct ahci_cmd_hdr *hdr;
511 struct ahci_prdt_entry *prdt;
515 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
518 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
519 for (i = 0; i < hdr->prdtl && len; i++) {
523 dbcsz = (prdt->dbc & DBCMASK) + 1;
524 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
525 memcpy(ptr, from, dbcsz);
530 hdr->prdbc = size - len;
534 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
536 struct ahci_cmd_hdr *hdr;
538 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
539 if (p->atapi || hdr->prdtl == 0) {
540 p->tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
541 p->is |= AHCI_P_IX_TFE;
546 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
547 memset(buf, 0, sizeof(buf));
549 /* TODO emulate different serial? */
550 ata_string((uint8_t *)(buf+10), "123456", 20);
551 ata_string((uint8_t *)(buf+23), "001", 8);
552 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
553 buf[47] = (0x8000 | 128);
555 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
557 buf[53] = (1 << 1 | 1 << 2);
559 buf[59] = (0x100 | p->mult_sectors);
561 buf[61] = (sectors >> 16);
563 if (p->xfermode & ATA_WDMA0)
564 buf[63] |= (1 << ((p->xfermode & 7) + 8));
571 buf[76] = (1 << 8 | 1 << 2);
574 buf[82] = (1 << 5 | 1 << 14);
575 buf[83] = (1 << 10 | 1 << 12 | 1 << 13 | 1 << 14);
577 buf[85] = (1 << 5 | 1 << 14);
578 buf[86] = (1 << 10 | 1 << 12 | 1 << 13);
581 if (p->xfermode & ATA_UDMA0)
582 buf[88] |= (1 << ((p->xfermode & 7) + 8));
583 buf[93] = (1 | 1 <<14);
585 buf[101] = (sectors >> 16);
586 buf[102] = (sectors >> 32);
587 buf[103] = (sectors >> 48);
588 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
589 p->tfd = ATA_S_DSC | ATA_S_READY;
590 p->is |= AHCI_P_IX_DP;
592 p->ci &= ~(1 << slot);
593 ahci_generate_intr(p->pr_sc);
597 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
600 p->tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
601 p->is |= AHCI_P_IX_TFE;
605 memset(buf, 0, sizeof(buf));
606 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
607 /* TODO emulate different serial? */
608 ata_string((uint8_t *)(buf+10), "123456", 20);
609 ata_string((uint8_t *)(buf+23), "001", 8);
610 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
611 buf[49] = (1 << 9 | 1 << 8);
612 buf[50] = (1 << 14 | 1);
613 buf[53] = (1 << 2 | 1 << 1);
621 buf[76] = (1 << 2 | 1 << 1);
623 buf[80] = (0x1f << 4);
629 buf[88] = (1 << 14 | 0x7f);
630 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
631 p->tfd = ATA_S_DSC | ATA_S_READY;
632 p->is |= AHCI_P_IX_DHR;
634 p->ci &= ~(1 << slot);
635 ahci_generate_intr(p->pr_sc);
639 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
655 atapi_string(buf + 8, "BHYVE", 8);
656 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
657 atapi_string(buf + 32, "001", 4);
662 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
663 write_prdt(p, slot, cfis, buf, len);
664 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
668 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
673 sectors = blockif_size(p->bctx) / 2048;
674 be32enc(buf, sectors - 1);
675 be32enc(buf + 4, 2048);
676 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
677 write_prdt(p, slot, cfis, buf, sizeof(buf));
678 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
682 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
690 len = be16dec(acmd + 7);
691 format = acmd[9] >> 6;
697 uint8_t start_track, buf[20], *bp;
699 msf = (acmd[1] >> 1) & 1;
700 start_track = acmd[6];
701 if (start_track > 1 && start_track != 0xaa) {
703 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
705 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
706 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
707 ahci_write_fis_d2h(p, slot, cfis, tfd);
713 if (start_track <= 1) {
733 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
737 lba_to_msf(bp, sectors);
740 be32enc(bp, sectors);
744 be16enc(buf, size - 2);
747 write_prdt(p, slot, cfis, buf, len);
748 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
749 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
756 memset(buf, 0, sizeof(buf));
760 if (len > sizeof(buf))
762 write_prdt(p, slot, cfis, buf, len);
763 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
764 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
771 uint8_t start_track, *bp, buf[50];
773 msf = (acmd[1] >> 1) & 1;
774 start_track = acmd[6];
810 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
814 lba_to_msf(bp, sectors);
817 be32enc(bp, sectors);
840 be16enc(buf, size - 2);
843 write_prdt(p, slot, cfis, buf, len);
844 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
845 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
852 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
854 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
855 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
856 ahci_write_fis_d2h(p, slot, cfis, tfd);
863 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis,
864 uint32_t done, int seek)
866 struct ahci_ioreq *aior;
867 struct ahci_cmd_hdr *hdr;
868 struct ahci_prdt_entry *prdt;
869 struct blockif_req *breq;
870 struct pci_ahci_softc *sc;
878 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
879 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
882 lba = be32dec(acmd + 2);
883 if (acmd[0] == READ_10)
884 len = be16dec(acmd + 7);
886 len = be32dec(acmd + 6);
888 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
889 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
895 * Pull request off free list
897 aior = STAILQ_FIRST(&p->iofhd);
898 assert(aior != NULL);
899 STAILQ_REMOVE_HEAD(&p->iofhd, io_list);
904 breq = &aior->io_req;
905 breq->br_offset = lba + done;
906 iovcnt = hdr->prdtl - seek;
907 if (iovcnt > BLOCKIF_IOV_MAX) {
908 aior->prdtl = iovcnt - BLOCKIF_IOV_MAX;
909 iovcnt = BLOCKIF_IOV_MAX;
912 breq->br_iovcnt = iovcnt;
915 * Build up the iovec based on the prdt
917 for (i = 0; i < iovcnt; i++) {
920 dbcsz = (prdt->dbc & DBCMASK) + 1;
921 breq->br_iov[i].iov_base = paddr_guest2host(ahci_ctx(sc),
923 breq->br_iov[i].iov_len = dbcsz;
927 err = blockif_read(p->bctx, breq);
932 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
940 if (len > sizeof(buf))
943 buf[0] = 0x70 | (1 << 7);
944 buf[2] = p->sense_key;
947 write_prdt(p, slot, cfis, buf, len);
948 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
949 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
953 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
955 uint8_t *acmd = cfis + 0x40;
958 switch (acmd[4] & 3) {
962 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
963 tfd = ATA_S_READY | ATA_S_DSC;
966 /* TODO eject media */
967 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
968 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
970 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
973 ahci_write_fis_d2h(p, slot, cfis, tfd);
977 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
985 len = be16dec(acmd + 7);
987 code = acmd[2] & 0x3f;
992 case MODEPAGE_RW_ERROR_RECOVERY:
996 if (len > sizeof(buf))
999 memset(buf, 0, sizeof(buf));
1000 be16enc(buf, 16 - 2);
1005 write_prdt(p, slot, cfis, buf, len);
1006 tfd = ATA_S_READY | ATA_S_DSC;
1009 case MODEPAGE_CD_CAPABILITIES:
1013 if (len > sizeof(buf))
1016 memset(buf, 0, sizeof(buf));
1017 be16enc(buf, 30 - 2);
1023 be16enc(&buf[18], 2);
1024 be16enc(&buf[20], 512);
1025 write_prdt(p, slot, cfis, buf, len);
1026 tfd = ATA_S_READY | ATA_S_DSC;
1035 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1037 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1042 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1044 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1047 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1048 ahci_write_fis_d2h(p, slot, cfis, tfd);
1052 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1060 /* we don't support asynchronous operation */
1061 if (!(acmd[1] & 1)) {
1062 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1064 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1069 len = be16dec(acmd + 7);
1070 if (len > sizeof(buf))
1073 memset(buf, 0, sizeof(buf));
1074 be16enc(buf, 8 - 2);
1078 write_prdt(p, slot, cfis, buf, len);
1079 tfd = ATA_S_READY | ATA_S_DSC;
1081 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1082 ahci_write_fis_d2h(p, slot, cfis, tfd);
1086 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1096 for (i = 0; i < 16; i++)
1097 DPRINTF("%02x ", acmd[i]);
1103 case TEST_UNIT_READY:
1104 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1105 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1108 atapi_inquiry(p, slot, cfis);
1111 atapi_read_capacity(p, slot, cfis);
1115 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1116 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1119 atapi_read_toc(p, slot, cfis);
1123 atapi_read(p, slot, cfis, 0, 0);
1126 atapi_request_sense(p, slot, cfis);
1128 case START_STOP_UNIT:
1129 atapi_start_stop_unit(p, slot, cfis);
1132 atapi_mode_sense(p, slot, cfis);
1134 case GET_EVENT_STATUS_NOTIFICATION:
1135 atapi_get_event_status_notification(p, slot, cfis);
1138 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1139 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1141 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1142 ATA_S_READY | ATA_S_ERROR);
1148 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1152 case ATA_ATA_IDENTIFY:
1153 handle_identify(p, slot, cfis);
1155 case ATA_SETFEATURES:
1158 case ATA_SF_ENAB_WCACHE:
1159 case ATA_SF_DIS_WCACHE:
1160 case ATA_SF_ENAB_RCACHE:
1161 case ATA_SF_DIS_RCACHE:
1162 p->tfd = ATA_S_DSC | ATA_S_READY;
1164 case ATA_SF_SETXFER:
1166 switch (cfis[12] & 0xf8) {
1172 p->xfermode = (cfis[12] & 0x7);
1175 p->tfd = ATA_S_DSC | ATA_S_READY;
1179 p->tfd = ATA_S_ERROR | ATA_S_READY;
1180 p->tfd |= (ATA_ERROR_ABORT << 8);
1183 p->is |= AHCI_P_IX_DP;
1184 p->ci &= ~(1 << slot);
1185 ahci_generate_intr(p->pr_sc);
1189 if (cfis[12] != 0 &&
1190 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1191 p->tfd = ATA_S_ERROR | ATA_S_READY;
1192 p->tfd |= (ATA_ERROR_ABORT << 8);
1194 p->mult_sectors = cfis[12];
1195 p->tfd = ATA_S_DSC | ATA_S_READY;
1197 p->is |= AHCI_P_IX_DP;
1198 p->ci &= ~(1 << slot);
1199 ahci_generate_intr(p->pr_sc);
1203 case ATA_READ_DMA48:
1204 case ATA_WRITE_DMA48:
1205 case ATA_READ_FPDMA_QUEUED:
1206 case ATA_WRITE_FPDMA_QUEUED:
1207 ahci_handle_dma(p, slot, cfis, 0, 0);
1209 case ATA_FLUSHCACHE:
1210 case ATA_FLUSHCACHE48:
1211 ahci_handle_flush(p, slot, cfis);
1213 case ATA_STANDBY_CMD:
1216 case ATA_STANDBY_IMMEDIATE:
1217 case ATA_IDLE_IMMEDIATE:
1219 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1221 case ATA_ATAPI_IDENTIFY:
1222 handle_atapi_identify(p, slot, cfis);
1224 case ATA_PACKET_CMD:
1226 p->tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1227 p->is |= AHCI_P_IX_TFE;
1228 p->ci &= ~(1 << slot);
1229 ahci_generate_intr(p->pr_sc);
1231 handle_packet_cmd(p, slot, cfis);
1234 WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
1235 p->tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1236 p->is |= AHCI_P_IX_TFE;
1237 p->ci &= ~(1 << slot);
1238 ahci_generate_intr(p->pr_sc);
1244 ahci_handle_slot(struct ahci_port *p, int slot)
1246 struct ahci_cmd_hdr *hdr;
1247 struct ahci_prdt_entry *prdt;
1248 struct pci_ahci_softc *sc;
1253 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1254 cfl = (hdr->flags & 0x1f) * 4;
1255 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1256 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1257 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1261 for (i = 0; i < cfl; i++) {
1264 DPRINTF("%02x ", cfis[i]);
1268 for (i = 0; i < hdr->prdtl; i++) {
1269 DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
1274 if (cfis[0] != FIS_TYPE_REGH2D) {
1275 WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
1279 if (cfis[1] & 0x80) {
1280 ahci_handle_cmd(p, slot, cfis);
1282 if (cfis[15] & (1 << 2))
1284 else if (p->reset) {
1288 p->ci &= ~(1 << slot);
1293 ahci_handle_port(struct ahci_port *p)
1297 if (!(p->cmd & AHCI_P_CMD_ST))
1300 for (i = 0; (i < 32) && p->ci; i++) {
1301 if (p->ci & (1 << i))
1302 ahci_handle_slot(p, i);
1307 * blockif callback routine - this runs in the context of the blockif
1308 * i/o thread, so the mutex needs to be acquired.
1311 ata_ioreq_cb(struct blockif_req *br, int err)
1313 struct ahci_cmd_hdr *hdr;
1314 struct ahci_ioreq *aior;
1315 struct ahci_port *p;
1316 struct pci_ahci_softc *sc;
1319 int pending, slot, ncq;
1321 DPRINTF("%s %d\n", __func__, err);
1324 aior = br->br_param;
1328 pending = aior->prdtl;
1330 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1332 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1333 cfis[2] == ATA_READ_FPDMA_QUEUED)
1336 pthread_mutex_lock(&sc->mtx);
1339 * Move the blockif request back to the free list
1341 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_list);
1343 if (pending && !err) {
1344 ahci_handle_dma(p, slot, cfis, aior->done,
1345 hdr->prdtl - pending);
1349 if (!err && aior->done == aior->len) {
1350 tfd = ATA_S_READY | ATA_S_DSC;
1354 hdr->prdbc = aior->len;
1356 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1359 p->serr |= (1 << slot);
1363 p->sact &= ~(1 << slot);
1364 ahci_write_fis_sdb(p, slot, tfd);
1366 ahci_write_fis_d2h(p, slot, cfis, tfd);
1369 pthread_mutex_unlock(&sc->mtx);
1370 DPRINTF("%s exit\n", __func__);
1374 atapi_ioreq_cb(struct blockif_req *br, int err)
1376 struct ahci_cmd_hdr *hdr;
1377 struct ahci_ioreq *aior;
1378 struct ahci_port *p;
1379 struct pci_ahci_softc *sc;
1384 DPRINTF("%s %d\n", __func__, err);
1386 aior = br->br_param;
1390 pending = aior->prdtl;
1392 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1394 pthread_mutex_lock(&sc->mtx);
1397 * Move the blockif request back to the free list
1399 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_list);
1401 if (pending && !err) {
1402 atapi_read(p, slot, cfis, aior->done, hdr->prdtl - pending);
1406 if (!err && aior->done == aior->len) {
1407 tfd = ATA_S_READY | ATA_S_DSC;
1408 hdr->prdbc = aior->len;
1410 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1412 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1416 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1417 ahci_write_fis_d2h(p, slot, cfis, tfd);
1420 pthread_mutex_unlock(&sc->mtx);
1421 DPRINTF("%s exit\n", __func__);
1425 pci_ahci_ioreq_init(struct ahci_port *pr)
1427 struct ahci_ioreq *vr;
1430 pr->ioqsz = blockif_queuesz(pr->bctx);
1431 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
1432 STAILQ_INIT(&pr->iofhd);
1435 * Add all i/o request entries to the free queue
1437 for (i = 0; i < pr->ioqsz; i++) {
1441 vr->io_req.br_callback = ata_ioreq_cb;
1443 vr->io_req.br_callback = atapi_ioreq_cb;
1444 vr->io_req.br_param = vr;
1445 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_list);
1450 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
1452 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
1453 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
1454 struct ahci_port *p = &sc->port[port];
1456 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
1457 port, offset, value);
1476 p->ie = value & 0xFDC000FF;
1477 ahci_generate_intr(sc);
1483 if (!(value & AHCI_P_CMD_ST)) {
1484 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
1490 p->cmd |= AHCI_P_CMD_CR;
1491 clb = (uint64_t)p->clbu << 32 | p->clb;
1492 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
1493 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
1496 if (value & AHCI_P_CMD_FRE) {
1499 p->cmd |= AHCI_P_CMD_FR;
1500 fb = (uint64_t)p->fbu << 32 | p->fb;
1501 /* we don't support FBSCP, so rfis size is 256Bytes */
1502 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
1504 p->cmd &= ~AHCI_P_CMD_FR;
1507 if (value & AHCI_P_CMD_CLO) {
1509 p->cmd &= ~AHCI_P_CMD_CLO;
1512 ahci_handle_port(p);
1518 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
1521 if (!(p->cmd & AHCI_P_CMD_ST)) {
1522 if (value & ATA_SC_DET_RESET)
1535 ahci_handle_port(p);
1545 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
1547 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
1555 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
1558 if (value & AHCI_GHC_HR)
1560 else if (value & AHCI_GHC_IE) {
1561 sc->ghc |= AHCI_GHC_IE;
1562 ahci_generate_intr(sc);
1567 ahci_generate_intr(sc);
1575 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
1576 int baridx, uint64_t offset, int size, uint64_t value)
1578 struct pci_ahci_softc *sc = pi->pi_arg;
1580 assert(baridx == 5);
1583 pthread_mutex_lock(&sc->mtx);
1585 if (offset < AHCI_OFFSET)
1586 pci_ahci_host_write(sc, offset, value);
1587 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
1588 pci_ahci_port_write(sc, offset, value);
1590 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
1592 pthread_mutex_unlock(&sc->mtx);
1596 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
1612 uint32_t *p = &sc->cap;
1613 p += (offset - AHCI_CAP) / sizeof(uint32_t);
1621 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
1628 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
1631 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
1632 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
1652 uint32_t *p= &sc->port[port].clb;
1653 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
1662 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
1663 port, offset, value);
1669 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
1670 uint64_t offset, int size)
1672 struct pci_ahci_softc *sc = pi->pi_arg;
1675 assert(baridx == 5);
1678 pthread_mutex_lock(&sc->mtx);
1680 if (offset < AHCI_OFFSET)
1681 value = pci_ahci_host_read(sc, offset);
1682 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
1683 value = pci_ahci_port_read(sc, offset);
1686 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n", offset);
1689 pthread_mutex_unlock(&sc->mtx);
1695 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
1697 char bident[sizeof("XX:X:X")];
1698 struct blockif_ctxt *bctxt;
1699 struct pci_ahci_softc *sc;
1705 fprintf(stderr, "pci_ahci: backing device required\n");
1710 dbg = fopen("/tmp/log", "w+");
1713 sc = malloc(sizeof(struct pci_ahci_softc));
1714 memset(sc, 0, sizeof(struct pci_ahci_softc));
1717 sc->ports = MAX_PORTS;
1720 * Only use port 0 for a backing device. All other ports will be
1723 sc->port[0].atapi = atapi;
1726 * Attempt to open the backing image. Use the PCI
1727 * slot/func for the identifier string.
1729 snprintf(bident, sizeof(bident), "%d:%d", pi->pi_slot, pi->pi_func);
1730 bctxt = blockif_open(opts, bident);
1731 if (bctxt == NULL) {
1735 sc->port[0].bctx = bctxt;
1736 sc->port[0].pr_sc = sc;
1739 * Allocate blockif request structures and add them
1742 pci_ahci_ioreq_init(&sc->port[0]);
1744 pthread_mutex_init(&sc->mtx, NULL);
1746 /* Intel ICH8 AHCI */
1747 slots = sc->port[0].ioqsz;
1751 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
1752 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
1753 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
1754 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
1755 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
1757 /* Only port 0 implemented */
1760 sc->cap2 = AHCI_CAP2_APST;
1763 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
1764 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
1765 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
1766 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
1767 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
1768 pci_emul_add_msicap(pi, 1);
1769 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
1770 AHCI_OFFSET + sc->ports * AHCI_STEP);
1774 blockif_close(sc->port[0].bctx);
1782 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
1785 return (pci_ahci_init(ctx, pi, opts, 0));
1789 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
1792 return (pci_ahci_init(ctx, pi, opts, 1));
1796 * Use separate emulation names to distinguish drive and atapi devices
1798 struct pci_devemu pci_de_ahci_hd = {
1799 .pe_emu = "ahci-hd",
1800 .pe_init = pci_ahci_hd_init,
1801 .pe_barwrite = pci_ahci_write,
1802 .pe_barread = pci_ahci_read
1804 PCI_EMUL_SET(pci_de_ahci_hd);
1806 struct pci_devemu pci_de_ahci_cd = {
1807 .pe_emu = "ahci-cd",
1808 .pe_init = pci_ahci_atapi_init,
1809 .pe_barwrite = pci_ahci_write,
1810 .pe_barread = pci_ahci_read
1812 PCI_EMUL_SET(pci_de_ahci_cd);